2013-06-06 Vladimir Makarov <vmakarov@redhat.com>
[official-gcc.git] / gcc / config / s390 / s390.c
blob358345a4437a2328b8d48223daf0e27c468f7862
1 /* Subroutines used for code generation on IBM S/390 and zSeries
2 Copyright (C) 1999-2013 Free Software Foundation, Inc.
3 Contributed by Hartmut Penner (hpenner@de.ibm.com) and
4 Ulrich Weigand (uweigand@de.ibm.com) and
5 Andreas Krebbel (Andreas.Krebbel@de.ibm.com).
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-attr.h"
36 #include "flags.h"
37 #include "except.h"
38 #include "function.h"
39 #include "recog.h"
40 #include "expr.h"
41 #include "reload.h"
42 #include "diagnostic-core.h"
43 #include "basic-block.h"
44 #include "ggc.h"
45 #include "target.h"
46 #include "target-def.h"
47 #include "debug.h"
48 #include "langhooks.h"
49 #include "optabs.h"
50 #include "gimple.h"
51 #include "df.h"
52 #include "params.h"
53 #include "cfgloop.h"
54 #include "opts.h"
56 /* Define the specific costs for a given cpu. */
58 struct processor_costs
60 /* multiplication */
61 const int m; /* cost of an M instruction. */
62 const int mghi; /* cost of an MGHI instruction. */
63 const int mh; /* cost of an MH instruction. */
64 const int mhi; /* cost of an MHI instruction. */
65 const int ml; /* cost of an ML instruction. */
66 const int mr; /* cost of an MR instruction. */
67 const int ms; /* cost of an MS instruction. */
68 const int msg; /* cost of an MSG instruction. */
69 const int msgf; /* cost of an MSGF instruction. */
70 const int msgfr; /* cost of an MSGFR instruction. */
71 const int msgr; /* cost of an MSGR instruction. */
72 const int msr; /* cost of an MSR instruction. */
73 const int mult_df; /* cost of multiplication in DFmode. */
74 const int mxbr;
75 /* square root */
76 const int sqxbr; /* cost of square root in TFmode. */
77 const int sqdbr; /* cost of square root in DFmode. */
78 const int sqebr; /* cost of square root in SFmode. */
79 /* multiply and add */
80 const int madbr; /* cost of multiply and add in DFmode. */
81 const int maebr; /* cost of multiply and add in SFmode. */
82 /* division */
83 const int dxbr;
84 const int ddbr;
85 const int debr;
86 const int dlgr;
87 const int dlr;
88 const int dr;
89 const int dsgfr;
90 const int dsgr;
93 const struct processor_costs *s390_cost;
95 static const
96 struct processor_costs z900_cost =
98 COSTS_N_INSNS (5), /* M */
99 COSTS_N_INSNS (10), /* MGHI */
100 COSTS_N_INSNS (5), /* MH */
101 COSTS_N_INSNS (4), /* MHI */
102 COSTS_N_INSNS (5), /* ML */
103 COSTS_N_INSNS (5), /* MR */
104 COSTS_N_INSNS (4), /* MS */
105 COSTS_N_INSNS (15), /* MSG */
106 COSTS_N_INSNS (7), /* MSGF */
107 COSTS_N_INSNS (7), /* MSGFR */
108 COSTS_N_INSNS (10), /* MSGR */
109 COSTS_N_INSNS (4), /* MSR */
110 COSTS_N_INSNS (7), /* multiplication in DFmode */
111 COSTS_N_INSNS (13), /* MXBR */
112 COSTS_N_INSNS (136), /* SQXBR */
113 COSTS_N_INSNS (44), /* SQDBR */
114 COSTS_N_INSNS (35), /* SQEBR */
115 COSTS_N_INSNS (18), /* MADBR */
116 COSTS_N_INSNS (13), /* MAEBR */
117 COSTS_N_INSNS (134), /* DXBR */
118 COSTS_N_INSNS (30), /* DDBR */
119 COSTS_N_INSNS (27), /* DEBR */
120 COSTS_N_INSNS (220), /* DLGR */
121 COSTS_N_INSNS (34), /* DLR */
122 COSTS_N_INSNS (34), /* DR */
123 COSTS_N_INSNS (32), /* DSGFR */
124 COSTS_N_INSNS (32), /* DSGR */
127 static const
128 struct processor_costs z990_cost =
130 COSTS_N_INSNS (4), /* M */
131 COSTS_N_INSNS (2), /* MGHI */
132 COSTS_N_INSNS (2), /* MH */
133 COSTS_N_INSNS (2), /* MHI */
134 COSTS_N_INSNS (4), /* ML */
135 COSTS_N_INSNS (4), /* MR */
136 COSTS_N_INSNS (5), /* MS */
137 COSTS_N_INSNS (6), /* MSG */
138 COSTS_N_INSNS (4), /* MSGF */
139 COSTS_N_INSNS (4), /* MSGFR */
140 COSTS_N_INSNS (4), /* MSGR */
141 COSTS_N_INSNS (4), /* MSR */
142 COSTS_N_INSNS (1), /* multiplication in DFmode */
143 COSTS_N_INSNS (28), /* MXBR */
144 COSTS_N_INSNS (130), /* SQXBR */
145 COSTS_N_INSNS (66), /* SQDBR */
146 COSTS_N_INSNS (38), /* SQEBR */
147 COSTS_N_INSNS (1), /* MADBR */
148 COSTS_N_INSNS (1), /* MAEBR */
149 COSTS_N_INSNS (60), /* DXBR */
150 COSTS_N_INSNS (40), /* DDBR */
151 COSTS_N_INSNS (26), /* DEBR */
152 COSTS_N_INSNS (176), /* DLGR */
153 COSTS_N_INSNS (31), /* DLR */
154 COSTS_N_INSNS (31), /* DR */
155 COSTS_N_INSNS (31), /* DSGFR */
156 COSTS_N_INSNS (31), /* DSGR */
159 static const
160 struct processor_costs z9_109_cost =
162 COSTS_N_INSNS (4), /* M */
163 COSTS_N_INSNS (2), /* MGHI */
164 COSTS_N_INSNS (2), /* MH */
165 COSTS_N_INSNS (2), /* MHI */
166 COSTS_N_INSNS (4), /* ML */
167 COSTS_N_INSNS (4), /* MR */
168 COSTS_N_INSNS (5), /* MS */
169 COSTS_N_INSNS (6), /* MSG */
170 COSTS_N_INSNS (4), /* MSGF */
171 COSTS_N_INSNS (4), /* MSGFR */
172 COSTS_N_INSNS (4), /* MSGR */
173 COSTS_N_INSNS (4), /* MSR */
174 COSTS_N_INSNS (1), /* multiplication in DFmode */
175 COSTS_N_INSNS (28), /* MXBR */
176 COSTS_N_INSNS (130), /* SQXBR */
177 COSTS_N_INSNS (66), /* SQDBR */
178 COSTS_N_INSNS (38), /* SQEBR */
179 COSTS_N_INSNS (1), /* MADBR */
180 COSTS_N_INSNS (1), /* MAEBR */
181 COSTS_N_INSNS (60), /* DXBR */
182 COSTS_N_INSNS (40), /* DDBR */
183 COSTS_N_INSNS (26), /* DEBR */
184 COSTS_N_INSNS (30), /* DLGR */
185 COSTS_N_INSNS (23), /* DLR */
186 COSTS_N_INSNS (23), /* DR */
187 COSTS_N_INSNS (24), /* DSGFR */
188 COSTS_N_INSNS (24), /* DSGR */
191 static const
192 struct processor_costs z10_cost =
194 COSTS_N_INSNS (10), /* M */
195 COSTS_N_INSNS (10), /* MGHI */
196 COSTS_N_INSNS (10), /* MH */
197 COSTS_N_INSNS (10), /* MHI */
198 COSTS_N_INSNS (10), /* ML */
199 COSTS_N_INSNS (10), /* MR */
200 COSTS_N_INSNS (10), /* MS */
201 COSTS_N_INSNS (10), /* MSG */
202 COSTS_N_INSNS (10), /* MSGF */
203 COSTS_N_INSNS (10), /* MSGFR */
204 COSTS_N_INSNS (10), /* MSGR */
205 COSTS_N_INSNS (10), /* MSR */
206 COSTS_N_INSNS (1) , /* multiplication in DFmode */
207 COSTS_N_INSNS (50), /* MXBR */
208 COSTS_N_INSNS (120), /* SQXBR */
209 COSTS_N_INSNS (52), /* SQDBR */
210 COSTS_N_INSNS (38), /* SQEBR */
211 COSTS_N_INSNS (1), /* MADBR */
212 COSTS_N_INSNS (1), /* MAEBR */
213 COSTS_N_INSNS (111), /* DXBR */
214 COSTS_N_INSNS (39), /* DDBR */
215 COSTS_N_INSNS (32), /* DEBR */
216 COSTS_N_INSNS (160), /* DLGR */
217 COSTS_N_INSNS (71), /* DLR */
218 COSTS_N_INSNS (71), /* DR */
219 COSTS_N_INSNS (71), /* DSGFR */
220 COSTS_N_INSNS (71), /* DSGR */
223 static const
224 struct processor_costs z196_cost =
226 COSTS_N_INSNS (7), /* M */
227 COSTS_N_INSNS (5), /* MGHI */
228 COSTS_N_INSNS (5), /* MH */
229 COSTS_N_INSNS (5), /* MHI */
230 COSTS_N_INSNS (7), /* ML */
231 COSTS_N_INSNS (7), /* MR */
232 COSTS_N_INSNS (6), /* MS */
233 COSTS_N_INSNS (8), /* MSG */
234 COSTS_N_INSNS (6), /* MSGF */
235 COSTS_N_INSNS (6), /* MSGFR */
236 COSTS_N_INSNS (8), /* MSGR */
237 COSTS_N_INSNS (6), /* MSR */
238 COSTS_N_INSNS (1) , /* multiplication in DFmode */
239 COSTS_N_INSNS (40), /* MXBR B+40 */
240 COSTS_N_INSNS (100), /* SQXBR B+100 */
241 COSTS_N_INSNS (42), /* SQDBR B+42 */
242 COSTS_N_INSNS (28), /* SQEBR B+28 */
243 COSTS_N_INSNS (1), /* MADBR B */
244 COSTS_N_INSNS (1), /* MAEBR B */
245 COSTS_N_INSNS (101), /* DXBR B+101 */
246 COSTS_N_INSNS (29), /* DDBR */
247 COSTS_N_INSNS (22), /* DEBR */
248 COSTS_N_INSNS (160), /* DLGR cracked */
249 COSTS_N_INSNS (160), /* DLR cracked */
250 COSTS_N_INSNS (160), /* DR expanded */
251 COSTS_N_INSNS (160), /* DSGFR cracked */
252 COSTS_N_INSNS (160), /* DSGR cracked */
255 static const
256 struct processor_costs zEC12_cost =
258 COSTS_N_INSNS (7), /* M */
259 COSTS_N_INSNS (5), /* MGHI */
260 COSTS_N_INSNS (5), /* MH */
261 COSTS_N_INSNS (5), /* MHI */
262 COSTS_N_INSNS (7), /* ML */
263 COSTS_N_INSNS (7), /* MR */
264 COSTS_N_INSNS (6), /* MS */
265 COSTS_N_INSNS (8), /* MSG */
266 COSTS_N_INSNS (6), /* MSGF */
267 COSTS_N_INSNS (6), /* MSGFR */
268 COSTS_N_INSNS (8), /* MSGR */
269 COSTS_N_INSNS (6), /* MSR */
270 COSTS_N_INSNS (1) , /* multiplication in DFmode */
271 COSTS_N_INSNS (40), /* MXBR B+40 */
272 COSTS_N_INSNS (100), /* SQXBR B+100 */
273 COSTS_N_INSNS (42), /* SQDBR B+42 */
274 COSTS_N_INSNS (28), /* SQEBR B+28 */
275 COSTS_N_INSNS (1), /* MADBR B */
276 COSTS_N_INSNS (1), /* MAEBR B */
277 COSTS_N_INSNS (131), /* DXBR B+131 */
278 COSTS_N_INSNS (29), /* DDBR */
279 COSTS_N_INSNS (22), /* DEBR */
280 COSTS_N_INSNS (160), /* DLGR cracked */
281 COSTS_N_INSNS (160), /* DLR cracked */
282 COSTS_N_INSNS (160), /* DR expanded */
283 COSTS_N_INSNS (160), /* DSGFR cracked */
284 COSTS_N_INSNS (160), /* DSGR cracked */
287 extern int reload_completed;
289 /* Kept up to date using the SCHED_VARIABLE_ISSUE hook. */
290 static rtx last_scheduled_insn;
292 /* Structure used to hold the components of a S/390 memory
293 address. A legitimate address on S/390 is of the general
294 form
295 base + index + displacement
296 where any of the components is optional.
298 base and index are registers of the class ADDR_REGS,
299 displacement is an unsigned 12-bit immediate constant. */
301 struct s390_address
303 rtx base;
304 rtx indx;
305 rtx disp;
306 bool pointer;
307 bool literal_pool;
310 /* The following structure is embedded in the machine
311 specific part of struct function. */
313 struct GTY (()) s390_frame_layout
315 /* Offset within stack frame. */
316 HOST_WIDE_INT gprs_offset;
317 HOST_WIDE_INT f0_offset;
318 HOST_WIDE_INT f4_offset;
319 HOST_WIDE_INT f8_offset;
320 HOST_WIDE_INT backchain_offset;
322 /* Number of first and last gpr where slots in the register
323 save area are reserved for. */
324 int first_save_gpr_slot;
325 int last_save_gpr_slot;
327 /* Number of first and last gpr to be saved, restored. */
328 int first_save_gpr;
329 int first_restore_gpr;
330 int last_save_gpr;
331 int last_restore_gpr;
333 /* Bits standing for floating point registers. Set, if the
334 respective register has to be saved. Starting with reg 16 (f0)
335 at the rightmost bit.
336 Bit 15 - 8 7 6 5 4 3 2 1 0
337 fpr 15 - 8 7 5 3 1 6 4 2 0
338 reg 31 - 24 23 22 21 20 19 18 17 16 */
339 unsigned int fpr_bitmap;
341 /* Number of floating point registers f8-f15 which must be saved. */
342 int high_fprs;
344 /* Set if return address needs to be saved.
345 This flag is set by s390_return_addr_rtx if it could not use
346 the initial value of r14 and therefore depends on r14 saved
347 to the stack. */
348 bool save_return_addr_p;
350 /* Size of stack frame. */
351 HOST_WIDE_INT frame_size;
354 /* Define the structure for the machine field in struct function. */
356 struct GTY(()) machine_function
358 struct s390_frame_layout frame_layout;
360 /* Literal pool base register. */
361 rtx base_reg;
363 /* True if we may need to perform branch splitting. */
364 bool split_branches_pending_p;
366 /* Some local-dynamic TLS symbol name. */
367 const char *some_ld_name;
369 bool has_landing_pad_p;
372 /* Few accessor macros for struct cfun->machine->s390_frame_layout. */
374 #define cfun_frame_layout (cfun->machine->frame_layout)
375 #define cfun_save_high_fprs_p (!!cfun_frame_layout.high_fprs)
376 #define cfun_gprs_save_area_size ((cfun_frame_layout.last_save_gpr_slot - \
377 cfun_frame_layout.first_save_gpr_slot + 1) * UNITS_PER_LONG)
378 #define cfun_set_fpr_bit(BITNUM) (cfun->machine->frame_layout.fpr_bitmap |= \
379 (1 << (BITNUM)))
380 #define cfun_fpr_bit_p(BITNUM) (!!(cfun->machine->frame_layout.fpr_bitmap & \
381 (1 << (BITNUM))))
383 /* Number of GPRs and FPRs used for argument passing. */
384 #define GP_ARG_NUM_REG 5
385 #define FP_ARG_NUM_REG (TARGET_64BIT? 4 : 2)
387 /* A couple of shortcuts. */
388 #define CONST_OK_FOR_J(x) \
389 CONST_OK_FOR_CONSTRAINT_P((x), 'J', "J")
390 #define CONST_OK_FOR_K(x) \
391 CONST_OK_FOR_CONSTRAINT_P((x), 'K', "K")
392 #define CONST_OK_FOR_Os(x) \
393 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Os")
394 #define CONST_OK_FOR_Op(x) \
395 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Op")
396 #define CONST_OK_FOR_On(x) \
397 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "On")
399 #define REGNO_PAIR_OK(REGNO, MODE) \
400 (HARD_REGNO_NREGS ((REGNO), (MODE)) == 1 || !((REGNO) & 1))
402 /* That's the read ahead of the dynamic branch prediction unit in
403 bytes on a z10 (or higher) CPU. */
404 #define PREDICT_DISTANCE (TARGET_Z10 ? 384 : 2048)
406 /* Return the alignment for LABEL. We default to the -falign-labels
407 value except for the literal pool base label. */
409 s390_label_align (rtx label)
411 rtx prev_insn = prev_active_insn (label);
413 if (prev_insn == NULL_RTX)
414 goto old;
416 prev_insn = single_set (prev_insn);
418 if (prev_insn == NULL_RTX)
419 goto old;
421 prev_insn = SET_SRC (prev_insn);
423 /* Don't align literal pool base labels. */
424 if (GET_CODE (prev_insn) == UNSPEC
425 && XINT (prev_insn, 1) == UNSPEC_MAIN_BASE)
426 return 0;
428 old:
429 return align_labels_log;
432 static enum machine_mode
433 s390_libgcc_cmp_return_mode (void)
435 return TARGET_64BIT ? DImode : SImode;
438 static enum machine_mode
439 s390_libgcc_shift_count_mode (void)
441 return TARGET_64BIT ? DImode : SImode;
444 static enum machine_mode
445 s390_unwind_word_mode (void)
447 return TARGET_64BIT ? DImode : SImode;
450 /* Return true if the back end supports mode MODE. */
451 static bool
452 s390_scalar_mode_supported_p (enum machine_mode mode)
454 /* In contrast to the default implementation reject TImode constants on 31bit
455 TARGET_ZARCH for ABI compliance. */
456 if (!TARGET_64BIT && TARGET_ZARCH && mode == TImode)
457 return false;
459 if (DECIMAL_FLOAT_MODE_P (mode))
460 return default_decimal_float_supported_p ();
462 return default_scalar_mode_supported_p (mode);
465 /* Set the has_landing_pad_p flag in struct machine_function to VALUE. */
467 void
468 s390_set_has_landing_pad_p (bool value)
470 cfun->machine->has_landing_pad_p = value;
473 /* If two condition code modes are compatible, return a condition code
474 mode which is compatible with both. Otherwise, return
475 VOIDmode. */
477 static enum machine_mode
478 s390_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
480 if (m1 == m2)
481 return m1;
483 switch (m1)
485 case CCZmode:
486 if (m2 == CCUmode || m2 == CCTmode || m2 == CCZ1mode
487 || m2 == CCSmode || m2 == CCSRmode || m2 == CCURmode)
488 return m2;
489 return VOIDmode;
491 case CCSmode:
492 case CCUmode:
493 case CCTmode:
494 case CCSRmode:
495 case CCURmode:
496 case CCZ1mode:
497 if (m2 == CCZmode)
498 return m1;
500 return VOIDmode;
502 default:
503 return VOIDmode;
505 return VOIDmode;
508 /* Return true if SET either doesn't set the CC register, or else
509 the source and destination have matching CC modes and that
510 CC mode is at least as constrained as REQ_MODE. */
512 static bool
513 s390_match_ccmode_set (rtx set, enum machine_mode req_mode)
515 enum machine_mode set_mode;
517 gcc_assert (GET_CODE (set) == SET);
519 if (GET_CODE (SET_DEST (set)) != REG || !CC_REGNO_P (REGNO (SET_DEST (set))))
520 return 1;
522 set_mode = GET_MODE (SET_DEST (set));
523 switch (set_mode)
525 case CCSmode:
526 case CCSRmode:
527 case CCUmode:
528 case CCURmode:
529 case CCLmode:
530 case CCL1mode:
531 case CCL2mode:
532 case CCL3mode:
533 case CCT1mode:
534 case CCT2mode:
535 case CCT3mode:
536 if (req_mode != set_mode)
537 return 0;
538 break;
540 case CCZmode:
541 if (req_mode != CCSmode && req_mode != CCUmode && req_mode != CCTmode
542 && req_mode != CCSRmode && req_mode != CCURmode)
543 return 0;
544 break;
546 case CCAPmode:
547 case CCANmode:
548 if (req_mode != CCAmode)
549 return 0;
550 break;
552 default:
553 gcc_unreachable ();
556 return (GET_MODE (SET_SRC (set)) == set_mode);
559 /* Return true if every SET in INSN that sets the CC register
560 has source and destination with matching CC modes and that
561 CC mode is at least as constrained as REQ_MODE.
562 If REQ_MODE is VOIDmode, always return false. */
564 bool
565 s390_match_ccmode (rtx insn, enum machine_mode req_mode)
567 int i;
569 /* s390_tm_ccmode returns VOIDmode to indicate failure. */
570 if (req_mode == VOIDmode)
571 return false;
573 if (GET_CODE (PATTERN (insn)) == SET)
574 return s390_match_ccmode_set (PATTERN (insn), req_mode);
576 if (GET_CODE (PATTERN (insn)) == PARALLEL)
577 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
579 rtx set = XVECEXP (PATTERN (insn), 0, i);
580 if (GET_CODE (set) == SET)
581 if (!s390_match_ccmode_set (set, req_mode))
582 return false;
585 return true;
588 /* If a test-under-mask instruction can be used to implement
589 (compare (and ... OP1) OP2), return the CC mode required
590 to do that. Otherwise, return VOIDmode.
591 MIXED is true if the instruction can distinguish between
592 CC1 and CC2 for mixed selected bits (TMxx), it is false
593 if the instruction cannot (TM). */
595 enum machine_mode
596 s390_tm_ccmode (rtx op1, rtx op2, bool mixed)
598 int bit0, bit1;
600 /* ??? Fixme: should work on CONST_DOUBLE as well. */
601 if (GET_CODE (op1) != CONST_INT || GET_CODE (op2) != CONST_INT)
602 return VOIDmode;
604 /* Selected bits all zero: CC0.
605 e.g.: int a; if ((a & (16 + 128)) == 0) */
606 if (INTVAL (op2) == 0)
607 return CCTmode;
609 /* Selected bits all one: CC3.
610 e.g.: int a; if ((a & (16 + 128)) == 16 + 128) */
611 if (INTVAL (op2) == INTVAL (op1))
612 return CCT3mode;
614 /* Exactly two bits selected, mixed zeroes and ones: CC1 or CC2. e.g.:
615 int a;
616 if ((a & (16 + 128)) == 16) -> CCT1
617 if ((a & (16 + 128)) == 128) -> CCT2 */
618 if (mixed)
620 bit1 = exact_log2 (INTVAL (op2));
621 bit0 = exact_log2 (INTVAL (op1) ^ INTVAL (op2));
622 if (bit0 != -1 && bit1 != -1)
623 return bit0 > bit1 ? CCT1mode : CCT2mode;
626 return VOIDmode;
629 /* Given a comparison code OP (EQ, NE, etc.) and the operands
630 OP0 and OP1 of a COMPARE, return the mode to be used for the
631 comparison. */
633 enum machine_mode
634 s390_select_ccmode (enum rtx_code code, rtx op0, rtx op1)
636 switch (code)
638 case EQ:
639 case NE:
640 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
641 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
642 return CCAPmode;
643 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
644 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
645 return CCAPmode;
646 if ((GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
647 || GET_CODE (op1) == NEG)
648 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
649 return CCLmode;
651 if (GET_CODE (op0) == AND)
653 /* Check whether we can potentially do it via TM. */
654 enum machine_mode ccmode;
655 ccmode = s390_tm_ccmode (XEXP (op0, 1), op1, 1);
656 if (ccmode != VOIDmode)
658 /* Relax CCTmode to CCZmode to allow fall-back to AND
659 if that turns out to be beneficial. */
660 return ccmode == CCTmode ? CCZmode : ccmode;
664 if (register_operand (op0, HImode)
665 && GET_CODE (op1) == CONST_INT
666 && (INTVAL (op1) == -1 || INTVAL (op1) == 65535))
667 return CCT3mode;
668 if (register_operand (op0, QImode)
669 && GET_CODE (op1) == CONST_INT
670 && (INTVAL (op1) == -1 || INTVAL (op1) == 255))
671 return CCT3mode;
673 return CCZmode;
675 case LE:
676 case LT:
677 case GE:
678 case GT:
679 /* The only overflow condition of NEG and ABS happens when
680 -INT_MAX is used as parameter, which stays negative. So
681 we have an overflow from a positive value to a negative.
682 Using CCAP mode the resulting cc can be used for comparisons. */
683 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
684 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
685 return CCAPmode;
687 /* If constants are involved in an add instruction it is possible to use
688 the resulting cc for comparisons with zero. Knowing the sign of the
689 constant the overflow behavior gets predictable. e.g.:
690 int a, b; if ((b = a + c) > 0)
691 with c as a constant value: c < 0 -> CCAN and c >= 0 -> CCAP */
692 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
693 && (CONST_OK_FOR_K (INTVAL (XEXP (op0, 1)))
694 || (CONST_OK_FOR_CONSTRAINT_P (INTVAL (XEXP (op0, 1)), 'O', "Os")
695 /* Avoid INT32_MIN on 32 bit. */
696 && (!TARGET_ZARCH || INTVAL (XEXP (op0, 1)) != -0x7fffffff - 1))))
698 if (INTVAL (XEXP((op0), 1)) < 0)
699 return CCANmode;
700 else
701 return CCAPmode;
703 /* Fall through. */
704 case UNORDERED:
705 case ORDERED:
706 case UNEQ:
707 case UNLE:
708 case UNLT:
709 case UNGE:
710 case UNGT:
711 case LTGT:
712 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
713 && GET_CODE (op1) != CONST_INT)
714 return CCSRmode;
715 return CCSmode;
717 case LTU:
718 case GEU:
719 if (GET_CODE (op0) == PLUS
720 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
721 return CCL1mode;
723 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
724 && GET_CODE (op1) != CONST_INT)
725 return CCURmode;
726 return CCUmode;
728 case LEU:
729 case GTU:
730 if (GET_CODE (op0) == MINUS
731 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
732 return CCL2mode;
734 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
735 && GET_CODE (op1) != CONST_INT)
736 return CCURmode;
737 return CCUmode;
739 default:
740 gcc_unreachable ();
744 /* Replace the comparison OP0 CODE OP1 by a semantically equivalent one
745 that we can implement more efficiently. */
747 static void
748 s390_canonicalize_comparison (int *code, rtx *op0, rtx *op1,
749 bool op0_preserve_value)
751 if (op0_preserve_value)
752 return;
754 /* Convert ZERO_EXTRACT back to AND to enable TM patterns. */
755 if ((*code == EQ || *code == NE)
756 && *op1 == const0_rtx
757 && GET_CODE (*op0) == ZERO_EXTRACT
758 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
759 && GET_CODE (XEXP (*op0, 2)) == CONST_INT
760 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
762 rtx inner = XEXP (*op0, 0);
763 HOST_WIDE_INT modesize = GET_MODE_BITSIZE (GET_MODE (inner));
764 HOST_WIDE_INT len = INTVAL (XEXP (*op0, 1));
765 HOST_WIDE_INT pos = INTVAL (XEXP (*op0, 2));
767 if (len > 0 && len < modesize
768 && pos >= 0 && pos + len <= modesize
769 && modesize <= HOST_BITS_PER_WIDE_INT)
771 unsigned HOST_WIDE_INT block;
772 block = ((unsigned HOST_WIDE_INT) 1 << len) - 1;
773 block <<= modesize - pos - len;
775 *op0 = gen_rtx_AND (GET_MODE (inner), inner,
776 gen_int_mode (block, GET_MODE (inner)));
780 /* Narrow AND of memory against immediate to enable TM. */
781 if ((*code == EQ || *code == NE)
782 && *op1 == const0_rtx
783 && GET_CODE (*op0) == AND
784 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
785 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
787 rtx inner = XEXP (*op0, 0);
788 rtx mask = XEXP (*op0, 1);
790 /* Ignore paradoxical SUBREGs if all extra bits are masked out. */
791 if (GET_CODE (inner) == SUBREG
792 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (inner)))
793 && (GET_MODE_SIZE (GET_MODE (inner))
794 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
795 && ((INTVAL (mask)
796 & GET_MODE_MASK (GET_MODE (inner))
797 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (inner))))
798 == 0))
799 inner = SUBREG_REG (inner);
801 /* Do not change volatile MEMs. */
802 if (MEM_P (inner) && !MEM_VOLATILE_P (inner))
804 int part = s390_single_part (XEXP (*op0, 1),
805 GET_MODE (inner), QImode, 0);
806 if (part >= 0)
808 mask = gen_int_mode (s390_extract_part (mask, QImode, 0), QImode);
809 inner = adjust_address_nv (inner, QImode, part);
810 *op0 = gen_rtx_AND (QImode, inner, mask);
815 /* Narrow comparisons against 0xffff to HImode if possible. */
816 if ((*code == EQ || *code == NE)
817 && GET_CODE (*op1) == CONST_INT
818 && INTVAL (*op1) == 0xffff
819 && SCALAR_INT_MODE_P (GET_MODE (*op0))
820 && (nonzero_bits (*op0, GET_MODE (*op0))
821 & ~(unsigned HOST_WIDE_INT) 0xffff) == 0)
823 *op0 = gen_lowpart (HImode, *op0);
824 *op1 = constm1_rtx;
827 /* Remove redundant UNSPEC_CCU_TO_INT conversions if possible. */
828 if (GET_CODE (*op0) == UNSPEC
829 && XINT (*op0, 1) == UNSPEC_CCU_TO_INT
830 && XVECLEN (*op0, 0) == 1
831 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCUmode
832 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
833 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
834 && *op1 == const0_rtx)
836 enum rtx_code new_code = UNKNOWN;
837 switch (*code)
839 case EQ: new_code = EQ; break;
840 case NE: new_code = NE; break;
841 case LT: new_code = GTU; break;
842 case GT: new_code = LTU; break;
843 case LE: new_code = GEU; break;
844 case GE: new_code = LEU; break;
845 default: break;
848 if (new_code != UNKNOWN)
850 *op0 = XVECEXP (*op0, 0, 0);
851 *code = new_code;
855 /* Remove redundant UNSPEC_CCZ_TO_INT conversions if possible. */
856 if (GET_CODE (*op0) == UNSPEC
857 && XINT (*op0, 1) == UNSPEC_CCZ_TO_INT
858 && XVECLEN (*op0, 0) == 1
859 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCZmode
860 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
861 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
862 && *op1 == const0_rtx)
864 enum rtx_code new_code = UNKNOWN;
865 switch (*code)
867 case EQ: new_code = EQ; break;
868 case NE: new_code = NE; break;
869 default: break;
872 if (new_code != UNKNOWN)
874 *op0 = XVECEXP (*op0, 0, 0);
875 *code = new_code;
879 /* Simplify cascaded EQ, NE with const0_rtx. */
880 if ((*code == NE || *code == EQ)
881 && (GET_CODE (*op0) == EQ || GET_CODE (*op0) == NE)
882 && GET_MODE (*op0) == SImode
883 && GET_MODE (XEXP (*op0, 0)) == CCZ1mode
884 && REG_P (XEXP (*op0, 0))
885 && XEXP (*op0, 1) == const0_rtx
886 && *op1 == const0_rtx)
888 if ((*code == EQ && GET_CODE (*op0) == NE)
889 || (*code == NE && GET_CODE (*op0) == EQ))
890 *code = EQ;
891 else
892 *code = NE;
893 *op0 = XEXP (*op0, 0);
896 /* Prefer register over memory as first operand. */
897 if (MEM_P (*op0) && REG_P (*op1))
899 rtx tem = *op0; *op0 = *op1; *op1 = tem;
900 *code = (int)swap_condition ((enum rtx_code)*code);
904 /* Emit a compare instruction suitable to implement the comparison
905 OP0 CODE OP1. Return the correct condition RTL to be placed in
906 the IF_THEN_ELSE of the conditional branch testing the result. */
909 s390_emit_compare (enum rtx_code code, rtx op0, rtx op1)
911 enum machine_mode mode = s390_select_ccmode (code, op0, op1);
912 rtx cc;
914 /* Do not output a redundant compare instruction if a compare_and_swap
915 pattern already computed the result and the machine modes are compatible. */
916 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
918 gcc_assert (s390_cc_modes_compatible (GET_MODE (op0), mode)
919 == GET_MODE (op0));
920 cc = op0;
922 else
924 cc = gen_rtx_REG (mode, CC_REGNUM);
925 emit_insn (gen_rtx_SET (VOIDmode, cc, gen_rtx_COMPARE (mode, op0, op1)));
928 return gen_rtx_fmt_ee (code, VOIDmode, cc, const0_rtx);
931 /* Emit a SImode compare and swap instruction setting MEM to NEW_RTX if OLD
932 matches CMP.
933 Return the correct condition RTL to be placed in the IF_THEN_ELSE of the
934 conditional branch testing the result. */
936 static rtx
937 s390_emit_compare_and_swap (enum rtx_code code, rtx old, rtx mem,
938 rtx cmp, rtx new_rtx)
940 emit_insn (gen_atomic_compare_and_swapsi_internal (old, mem, cmp, new_rtx));
941 return s390_emit_compare (code, gen_rtx_REG (CCZ1mode, CC_REGNUM),
942 const0_rtx);
945 /* Emit a jump instruction to TARGET. If COND is NULL_RTX, emit an
946 unconditional jump, else a conditional jump under condition COND. */
948 void
949 s390_emit_jump (rtx target, rtx cond)
951 rtx insn;
953 target = gen_rtx_LABEL_REF (VOIDmode, target);
954 if (cond)
955 target = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, target, pc_rtx);
957 insn = gen_rtx_SET (VOIDmode, pc_rtx, target);
958 emit_jump_insn (insn);
961 /* Return branch condition mask to implement a branch
962 specified by CODE. Return -1 for invalid comparisons. */
965 s390_branch_condition_mask (rtx code)
967 const int CC0 = 1 << 3;
968 const int CC1 = 1 << 2;
969 const int CC2 = 1 << 1;
970 const int CC3 = 1 << 0;
972 gcc_assert (GET_CODE (XEXP (code, 0)) == REG);
973 gcc_assert (REGNO (XEXP (code, 0)) == CC_REGNUM);
974 gcc_assert (XEXP (code, 1) == const0_rtx);
976 switch (GET_MODE (XEXP (code, 0)))
978 case CCZmode:
979 case CCZ1mode:
980 switch (GET_CODE (code))
982 case EQ: return CC0;
983 case NE: return CC1 | CC2 | CC3;
984 default: return -1;
986 break;
988 case CCT1mode:
989 switch (GET_CODE (code))
991 case EQ: return CC1;
992 case NE: return CC0 | CC2 | CC3;
993 default: return -1;
995 break;
997 case CCT2mode:
998 switch (GET_CODE (code))
1000 case EQ: return CC2;
1001 case NE: return CC0 | CC1 | CC3;
1002 default: return -1;
1004 break;
1006 case CCT3mode:
1007 switch (GET_CODE (code))
1009 case EQ: return CC3;
1010 case NE: return CC0 | CC1 | CC2;
1011 default: return -1;
1013 break;
1015 case CCLmode:
1016 switch (GET_CODE (code))
1018 case EQ: return CC0 | CC2;
1019 case NE: return CC1 | CC3;
1020 default: return -1;
1022 break;
1024 case CCL1mode:
1025 switch (GET_CODE (code))
1027 case LTU: return CC2 | CC3; /* carry */
1028 case GEU: return CC0 | CC1; /* no carry */
1029 default: return -1;
1031 break;
1033 case CCL2mode:
1034 switch (GET_CODE (code))
1036 case GTU: return CC0 | CC1; /* borrow */
1037 case LEU: return CC2 | CC3; /* no borrow */
1038 default: return -1;
1040 break;
1042 case CCL3mode:
1043 switch (GET_CODE (code))
1045 case EQ: return CC0 | CC2;
1046 case NE: return CC1 | CC3;
1047 case LTU: return CC1;
1048 case GTU: return CC3;
1049 case LEU: return CC1 | CC2;
1050 case GEU: return CC2 | CC3;
1051 default: return -1;
1054 case CCUmode:
1055 switch (GET_CODE (code))
1057 case EQ: return CC0;
1058 case NE: return CC1 | CC2 | CC3;
1059 case LTU: return CC1;
1060 case GTU: return CC2;
1061 case LEU: return CC0 | CC1;
1062 case GEU: return CC0 | CC2;
1063 default: return -1;
1065 break;
1067 case CCURmode:
1068 switch (GET_CODE (code))
1070 case EQ: return CC0;
1071 case NE: return CC2 | CC1 | CC3;
1072 case LTU: return CC2;
1073 case GTU: return CC1;
1074 case LEU: return CC0 | CC2;
1075 case GEU: return CC0 | CC1;
1076 default: return -1;
1078 break;
1080 case CCAPmode:
1081 switch (GET_CODE (code))
1083 case EQ: return CC0;
1084 case NE: return CC1 | CC2 | CC3;
1085 case LT: return CC1 | CC3;
1086 case GT: return CC2;
1087 case LE: return CC0 | CC1 | CC3;
1088 case GE: return CC0 | CC2;
1089 default: return -1;
1091 break;
1093 case CCANmode:
1094 switch (GET_CODE (code))
1096 case EQ: return CC0;
1097 case NE: return CC1 | CC2 | CC3;
1098 case LT: return CC1;
1099 case GT: return CC2 | CC3;
1100 case LE: return CC0 | CC1;
1101 case GE: return CC0 | CC2 | CC3;
1102 default: return -1;
1104 break;
1106 case CCSmode:
1107 switch (GET_CODE (code))
1109 case EQ: return CC0;
1110 case NE: return CC1 | CC2 | CC3;
1111 case LT: return CC1;
1112 case GT: return CC2;
1113 case LE: return CC0 | CC1;
1114 case GE: return CC0 | CC2;
1115 case UNORDERED: return CC3;
1116 case ORDERED: return CC0 | CC1 | CC2;
1117 case UNEQ: return CC0 | CC3;
1118 case UNLT: return CC1 | CC3;
1119 case UNGT: return CC2 | CC3;
1120 case UNLE: return CC0 | CC1 | CC3;
1121 case UNGE: return CC0 | CC2 | CC3;
1122 case LTGT: return CC1 | CC2;
1123 default: return -1;
1125 break;
1127 case CCSRmode:
1128 switch (GET_CODE (code))
1130 case EQ: return CC0;
1131 case NE: return CC2 | CC1 | CC3;
1132 case LT: return CC2;
1133 case GT: return CC1;
1134 case LE: return CC0 | CC2;
1135 case GE: return CC0 | CC1;
1136 case UNORDERED: return CC3;
1137 case ORDERED: return CC0 | CC2 | CC1;
1138 case UNEQ: return CC0 | CC3;
1139 case UNLT: return CC2 | CC3;
1140 case UNGT: return CC1 | CC3;
1141 case UNLE: return CC0 | CC2 | CC3;
1142 case UNGE: return CC0 | CC1 | CC3;
1143 case LTGT: return CC2 | CC1;
1144 default: return -1;
1146 break;
1148 default:
1149 return -1;
1154 /* Return branch condition mask to implement a compare and branch
1155 specified by CODE. Return -1 for invalid comparisons. */
1158 s390_compare_and_branch_condition_mask (rtx code)
1160 const int CC0 = 1 << 3;
1161 const int CC1 = 1 << 2;
1162 const int CC2 = 1 << 1;
1164 switch (GET_CODE (code))
1166 case EQ:
1167 return CC0;
1168 case NE:
1169 return CC1 | CC2;
1170 case LT:
1171 case LTU:
1172 return CC1;
1173 case GT:
1174 case GTU:
1175 return CC2;
1176 case LE:
1177 case LEU:
1178 return CC0 | CC1;
1179 case GE:
1180 case GEU:
1181 return CC0 | CC2;
1182 default:
1183 gcc_unreachable ();
1185 return -1;
1188 /* If INV is false, return assembler mnemonic string to implement
1189 a branch specified by CODE. If INV is true, return mnemonic
1190 for the corresponding inverted branch. */
1192 static const char *
1193 s390_branch_condition_mnemonic (rtx code, int inv)
1195 int mask;
1197 static const char *const mnemonic[16] =
1199 NULL, "o", "h", "nle",
1200 "l", "nhe", "lh", "ne",
1201 "e", "nlh", "he", "nl",
1202 "le", "nh", "no", NULL
1205 if (GET_CODE (XEXP (code, 0)) == REG
1206 && REGNO (XEXP (code, 0)) == CC_REGNUM
1207 && XEXP (code, 1) == const0_rtx)
1208 mask = s390_branch_condition_mask (code);
1209 else
1210 mask = s390_compare_and_branch_condition_mask (code);
1212 gcc_assert (mask >= 0);
1214 if (inv)
1215 mask ^= 15;
1217 gcc_assert (mask >= 1 && mask <= 14);
1219 return mnemonic[mask];
1222 /* Return the part of op which has a value different from def.
1223 The size of the part is determined by mode.
1224 Use this function only if you already know that op really
1225 contains such a part. */
1227 unsigned HOST_WIDE_INT
1228 s390_extract_part (rtx op, enum machine_mode mode, int def)
1230 unsigned HOST_WIDE_INT value = 0;
1231 int max_parts = HOST_BITS_PER_WIDE_INT / GET_MODE_BITSIZE (mode);
1232 int part_bits = GET_MODE_BITSIZE (mode);
1233 unsigned HOST_WIDE_INT part_mask
1234 = ((unsigned HOST_WIDE_INT)1 << part_bits) - 1;
1235 int i;
1237 for (i = 0; i < max_parts; i++)
1239 if (i == 0)
1240 value = (unsigned HOST_WIDE_INT) INTVAL (op);
1241 else
1242 value >>= part_bits;
1244 if ((value & part_mask) != (def & part_mask))
1245 return value & part_mask;
1248 gcc_unreachable ();
1251 /* If OP is an integer constant of mode MODE with exactly one
1252 part of mode PART_MODE unequal to DEF, return the number of that
1253 part. Otherwise, return -1. */
1256 s390_single_part (rtx op,
1257 enum machine_mode mode,
1258 enum machine_mode part_mode,
1259 int def)
1261 unsigned HOST_WIDE_INT value = 0;
1262 int n_parts = GET_MODE_SIZE (mode) / GET_MODE_SIZE (part_mode);
1263 unsigned HOST_WIDE_INT part_mask
1264 = ((unsigned HOST_WIDE_INT)1 << GET_MODE_BITSIZE (part_mode)) - 1;
1265 int i, part = -1;
1267 if (GET_CODE (op) != CONST_INT)
1268 return -1;
1270 for (i = 0; i < n_parts; i++)
1272 if (i == 0)
1273 value = (unsigned HOST_WIDE_INT) INTVAL (op);
1274 else
1275 value >>= GET_MODE_BITSIZE (part_mode);
1277 if ((value & part_mask) != (def & part_mask))
1279 if (part != -1)
1280 return -1;
1281 else
1282 part = i;
1285 return part == -1 ? -1 : n_parts - 1 - part;
1288 /* Return true if IN contains a contiguous bitfield in the lower SIZE
1289 bits and no other bits are set in IN. POS and LENGTH can be used
1290 to obtain the start position and the length of the bitfield.
1292 POS gives the position of the first bit of the bitfield counting
1293 from the lowest order bit starting with zero. In order to use this
1294 value for S/390 instructions this has to be converted to "bits big
1295 endian" style. */
1297 bool
1298 s390_contiguous_bitmask_p (unsigned HOST_WIDE_INT in, int size,
1299 int *pos, int *length)
1301 int tmp_pos = 0;
1302 int tmp_length = 0;
1303 int i;
1304 unsigned HOST_WIDE_INT mask = 1ULL;
1305 bool contiguous = false;
1307 for (i = 0; i < size; mask <<= 1, i++)
1309 if (contiguous)
1311 if (mask & in)
1312 tmp_length++;
1313 else
1314 break;
1316 else
1318 if (mask & in)
1320 contiguous = true;
1321 tmp_length++;
1323 else
1324 tmp_pos++;
1328 if (!tmp_length)
1329 return false;
1331 /* Calculate a mask for all bits beyond the contiguous bits. */
1332 mask = (-1LL & ~(((1ULL << (tmp_length + tmp_pos - 1)) << 1) - 1));
1334 if (mask & in)
1335 return false;
1337 if (tmp_length + tmp_pos - 1 > size)
1338 return false;
1340 if (length)
1341 *length = tmp_length;
1343 if (pos)
1344 *pos = tmp_pos;
1346 return true;
1349 /* Check whether a rotate of ROTL followed by an AND of CONTIG is
1350 equivalent to a shift followed by the AND. In particular, CONTIG
1351 should not overlap the (rotated) bit 0/bit 63 gap. Negative values
1352 for ROTL indicate a rotate to the right. */
1354 bool
1355 s390_extzv_shift_ok (int bitsize, int rotl, unsigned HOST_WIDE_INT contig)
1357 int pos, len;
1358 bool ok;
1360 ok = s390_contiguous_bitmask_p (contig, bitsize, &pos, &len);
1361 gcc_assert (ok);
1363 return ((rotl >= 0 && rotl <= pos)
1364 || (rotl < 0 && -rotl <= bitsize - len - pos));
1367 /* Check whether we can (and want to) split a double-word
1368 move in mode MODE from SRC to DST into two single-word
1369 moves, moving the subword FIRST_SUBWORD first. */
1371 bool
1372 s390_split_ok_p (rtx dst, rtx src, enum machine_mode mode, int first_subword)
1374 /* Floating point registers cannot be split. */
1375 if (FP_REG_P (src) || FP_REG_P (dst))
1376 return false;
1378 /* We don't need to split if operands are directly accessible. */
1379 if (s_operand (src, mode) || s_operand (dst, mode))
1380 return false;
1382 /* Non-offsettable memory references cannot be split. */
1383 if ((GET_CODE (src) == MEM && !offsettable_memref_p (src))
1384 || (GET_CODE (dst) == MEM && !offsettable_memref_p (dst)))
1385 return false;
1387 /* Moving the first subword must not clobber a register
1388 needed to move the second subword. */
1389 if (register_operand (dst, mode))
1391 rtx subreg = operand_subword (dst, first_subword, 0, mode);
1392 if (reg_overlap_mentioned_p (subreg, src))
1393 return false;
1396 return true;
1399 /* Return true if it can be proven that [MEM1, MEM1 + SIZE]
1400 and [MEM2, MEM2 + SIZE] do overlap and false
1401 otherwise. */
1403 bool
1404 s390_overlap_p (rtx mem1, rtx mem2, HOST_WIDE_INT size)
1406 rtx addr1, addr2, addr_delta;
1407 HOST_WIDE_INT delta;
1409 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
1410 return true;
1412 if (size == 0)
1413 return false;
1415 addr1 = XEXP (mem1, 0);
1416 addr2 = XEXP (mem2, 0);
1418 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
1420 /* This overlapping check is used by peepholes merging memory block operations.
1421 Overlapping operations would otherwise be recognized by the S/390 hardware
1422 and would fall back to a slower implementation. Allowing overlapping
1423 operations would lead to slow code but not to wrong code. Therefore we are
1424 somewhat optimistic if we cannot prove that the memory blocks are
1425 overlapping.
1426 That's why we return false here although this may accept operations on
1427 overlapping memory areas. */
1428 if (!addr_delta || GET_CODE (addr_delta) != CONST_INT)
1429 return false;
1431 delta = INTVAL (addr_delta);
1433 if (delta == 0
1434 || (delta > 0 && delta < size)
1435 || (delta < 0 && -delta < size))
1436 return true;
1438 return false;
1441 /* Check whether the address of memory reference MEM2 equals exactly
1442 the address of memory reference MEM1 plus DELTA. Return true if
1443 we can prove this to be the case, false otherwise. */
1445 bool
1446 s390_offset_p (rtx mem1, rtx mem2, rtx delta)
1448 rtx addr1, addr2, addr_delta;
1450 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
1451 return false;
1453 addr1 = XEXP (mem1, 0);
1454 addr2 = XEXP (mem2, 0);
1456 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
1457 if (!addr_delta || !rtx_equal_p (addr_delta, delta))
1458 return false;
1460 return true;
1463 /* Expand logical operator CODE in mode MODE with operands OPERANDS. */
1465 void
1466 s390_expand_logical_operator (enum rtx_code code, enum machine_mode mode,
1467 rtx *operands)
1469 enum machine_mode wmode = mode;
1470 rtx dst = operands[0];
1471 rtx src1 = operands[1];
1472 rtx src2 = operands[2];
1473 rtx op, clob, tem;
1475 /* If we cannot handle the operation directly, use a temp register. */
1476 if (!s390_logical_operator_ok_p (operands))
1477 dst = gen_reg_rtx (mode);
1479 /* QImode and HImode patterns make sense only if we have a destination
1480 in memory. Otherwise perform the operation in SImode. */
1481 if ((mode == QImode || mode == HImode) && GET_CODE (dst) != MEM)
1482 wmode = SImode;
1484 /* Widen operands if required. */
1485 if (mode != wmode)
1487 if (GET_CODE (dst) == SUBREG
1488 && (tem = simplify_subreg (wmode, dst, mode, 0)) != 0)
1489 dst = tem;
1490 else if (REG_P (dst))
1491 dst = gen_rtx_SUBREG (wmode, dst, 0);
1492 else
1493 dst = gen_reg_rtx (wmode);
1495 if (GET_CODE (src1) == SUBREG
1496 && (tem = simplify_subreg (wmode, src1, mode, 0)) != 0)
1497 src1 = tem;
1498 else if (GET_MODE (src1) != VOIDmode)
1499 src1 = gen_rtx_SUBREG (wmode, force_reg (mode, src1), 0);
1501 if (GET_CODE (src2) == SUBREG
1502 && (tem = simplify_subreg (wmode, src2, mode, 0)) != 0)
1503 src2 = tem;
1504 else if (GET_MODE (src2) != VOIDmode)
1505 src2 = gen_rtx_SUBREG (wmode, force_reg (mode, src2), 0);
1508 /* Emit the instruction. */
1509 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, wmode, src1, src2));
1510 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
1511 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
1513 /* Fix up the destination if needed. */
1514 if (dst != operands[0])
1515 emit_move_insn (operands[0], gen_lowpart (mode, dst));
1518 /* Check whether OPERANDS are OK for a logical operation (AND, IOR, XOR). */
1520 bool
1521 s390_logical_operator_ok_p (rtx *operands)
1523 /* If the destination operand is in memory, it needs to coincide
1524 with one of the source operands. After reload, it has to be
1525 the first source operand. */
1526 if (GET_CODE (operands[0]) == MEM)
1527 return rtx_equal_p (operands[0], operands[1])
1528 || (!reload_completed && rtx_equal_p (operands[0], operands[2]));
1530 return true;
1533 /* Narrow logical operation CODE of memory operand MEMOP with immediate
1534 operand IMMOP to switch from SS to SI type instructions. */
1536 void
1537 s390_narrow_logical_operator (enum rtx_code code, rtx *memop, rtx *immop)
1539 int def = code == AND ? -1 : 0;
1540 HOST_WIDE_INT mask;
1541 int part;
1543 gcc_assert (GET_CODE (*memop) == MEM);
1544 gcc_assert (!MEM_VOLATILE_P (*memop));
1546 mask = s390_extract_part (*immop, QImode, def);
1547 part = s390_single_part (*immop, GET_MODE (*memop), QImode, def);
1548 gcc_assert (part >= 0);
1550 *memop = adjust_address (*memop, QImode, part);
1551 *immop = gen_int_mode (mask, QImode);
1555 /* How to allocate a 'struct machine_function'. */
1557 static struct machine_function *
1558 s390_init_machine_status (void)
1560 return ggc_alloc_cleared_machine_function ();
1563 static void
1564 s390_option_override (void)
1566 /* Set up function hooks. */
1567 init_machine_status = s390_init_machine_status;
1569 /* Architecture mode defaults according to ABI. */
1570 if (!(target_flags_explicit & MASK_ZARCH))
1572 if (TARGET_64BIT)
1573 target_flags |= MASK_ZARCH;
1574 else
1575 target_flags &= ~MASK_ZARCH;
1578 /* Set the march default in case it hasn't been specified on
1579 cmdline. */
1580 if (s390_arch == PROCESSOR_max)
1582 s390_arch_string = TARGET_ZARCH? "z900" : "g5";
1583 s390_arch = TARGET_ZARCH ? PROCESSOR_2064_Z900 : PROCESSOR_9672_G5;
1584 s390_arch_flags = processor_flags_table[(int)s390_arch];
1587 /* Determine processor to tune for. */
1588 if (s390_tune == PROCESSOR_max)
1590 s390_tune = s390_arch;
1591 s390_tune_flags = s390_arch_flags;
1594 /* Sanity checks. */
1595 if (TARGET_ZARCH && !TARGET_CPU_ZARCH)
1596 error ("z/Architecture mode not supported on %s", s390_arch_string);
1597 if (TARGET_64BIT && !TARGET_ZARCH)
1598 error ("64-bit ABI not supported in ESA/390 mode");
1600 /* Use hardware DFP if available and not explicitly disabled by
1601 user. E.g. with -m31 -march=z10 -mzarch */
1602 if (!(target_flags_explicit & MASK_HARD_DFP) && TARGET_DFP)
1603 target_flags |= MASK_HARD_DFP;
1605 if (TARGET_HARD_DFP && !TARGET_DFP)
1607 if (target_flags_explicit & MASK_HARD_DFP)
1609 if (!TARGET_CPU_DFP)
1610 error ("hardware decimal floating point instructions"
1611 " not available on %s", s390_arch_string);
1612 if (!TARGET_ZARCH)
1613 error ("hardware decimal floating point instructions"
1614 " not available in ESA/390 mode");
1616 else
1617 target_flags &= ~MASK_HARD_DFP;
1620 if ((target_flags_explicit & MASK_SOFT_FLOAT) && TARGET_SOFT_FLOAT)
1622 if ((target_flags_explicit & MASK_HARD_DFP) && TARGET_HARD_DFP)
1623 error ("-mhard-dfp can%'t be used in conjunction with -msoft-float");
1625 target_flags &= ~MASK_HARD_DFP;
1628 /* Set processor cost function. */
1629 switch (s390_tune)
1631 case PROCESSOR_2084_Z990:
1632 s390_cost = &z990_cost;
1633 break;
1634 case PROCESSOR_2094_Z9_109:
1635 s390_cost = &z9_109_cost;
1636 break;
1637 case PROCESSOR_2097_Z10:
1638 s390_cost = &z10_cost;
1639 break;
1640 case PROCESSOR_2817_Z196:
1641 s390_cost = &z196_cost;
1642 break;
1643 case PROCESSOR_2827_ZEC12:
1644 s390_cost = &zEC12_cost;
1645 break;
1646 default:
1647 s390_cost = &z900_cost;
1650 if (TARGET_BACKCHAIN && TARGET_PACKED_STACK && TARGET_HARD_FLOAT)
1651 error ("-mbackchain -mpacked-stack -mhard-float are not supported "
1652 "in combination");
1654 if (s390_stack_size)
1656 if (s390_stack_guard >= s390_stack_size)
1657 error ("stack size must be greater than the stack guard value");
1658 else if (s390_stack_size > 1 << 16)
1659 error ("stack size must not be greater than 64k");
1661 else if (s390_stack_guard)
1662 error ("-mstack-guard implies use of -mstack-size");
1664 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
1665 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
1666 target_flags |= MASK_LONG_DOUBLE_128;
1667 #endif
1669 if (s390_tune == PROCESSOR_2097_Z10
1670 || s390_tune == PROCESSOR_2817_Z196
1671 || s390_tune == PROCESSOR_2827_ZEC12)
1673 maybe_set_param_value (PARAM_MAX_UNROLLED_INSNS, 100,
1674 global_options.x_param_values,
1675 global_options_set.x_param_values);
1676 maybe_set_param_value (PARAM_MAX_UNROLL_TIMES, 32,
1677 global_options.x_param_values,
1678 global_options_set.x_param_values);
1679 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 2000,
1680 global_options.x_param_values,
1681 global_options_set.x_param_values);
1682 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEEL_TIMES, 64,
1683 global_options.x_param_values,
1684 global_options_set.x_param_values);
1687 maybe_set_param_value (PARAM_MAX_PENDING_LIST_LENGTH, 256,
1688 global_options.x_param_values,
1689 global_options_set.x_param_values);
1690 /* values for loop prefetching */
1691 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, 256,
1692 global_options.x_param_values,
1693 global_options_set.x_param_values);
1694 maybe_set_param_value (PARAM_L1_CACHE_SIZE, 128,
1695 global_options.x_param_values,
1696 global_options_set.x_param_values);
1697 /* s390 has more than 2 levels and the size is much larger. Since
1698 we are always running virtualized assume that we only get a small
1699 part of the caches above l1. */
1700 maybe_set_param_value (PARAM_L2_CACHE_SIZE, 1500,
1701 global_options.x_param_values,
1702 global_options_set.x_param_values);
1703 maybe_set_param_value (PARAM_PREFETCH_MIN_INSN_TO_MEM_RATIO, 2,
1704 global_options.x_param_values,
1705 global_options_set.x_param_values);
1706 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 6,
1707 global_options.x_param_values,
1708 global_options_set.x_param_values);
1710 /* This cannot reside in s390_option_optimization_table since HAVE_prefetch
1711 requires the arch flags to be evaluated already. Since prefetching
1712 is beneficial on s390, we enable it if available. */
1713 if (flag_prefetch_loop_arrays < 0 && HAVE_prefetch && optimize >= 3)
1714 flag_prefetch_loop_arrays = 1;
1716 /* Use the alternative scheduling-pressure algorithm by default. */
1717 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM, 2,
1718 global_options.x_param_values,
1719 global_options_set.x_param_values);
1721 if (TARGET_TPF)
1723 /* Don't emit DWARF3/4 unless specifically selected. The TPF
1724 debuggers do not yet support DWARF 3/4. */
1725 if (!global_options_set.x_dwarf_strict)
1726 dwarf_strict = 1;
1727 if (!global_options_set.x_dwarf_version)
1728 dwarf_version = 2;
1732 /* Map for smallest class containing reg regno. */
1734 const enum reg_class regclass_map[FIRST_PSEUDO_REGISTER] =
1735 { GENERAL_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1736 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1737 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1738 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1739 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1740 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1741 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1742 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1743 ADDR_REGS, CC_REGS, ADDR_REGS, ADDR_REGS,
1744 ACCESS_REGS, ACCESS_REGS
1747 /* Return attribute type of insn. */
1749 static enum attr_type
1750 s390_safe_attr_type (rtx insn)
1752 if (recog_memoized (insn) >= 0)
1753 return get_attr_type (insn);
1754 else
1755 return TYPE_NONE;
1758 /* Return true if DISP is a valid short displacement. */
1760 static bool
1761 s390_short_displacement (rtx disp)
1763 /* No displacement is OK. */
1764 if (!disp)
1765 return true;
1767 /* Without the long displacement facility we don't need to
1768 distingiush between long and short displacement. */
1769 if (!TARGET_LONG_DISPLACEMENT)
1770 return true;
1772 /* Integer displacement in range. */
1773 if (GET_CODE (disp) == CONST_INT)
1774 return INTVAL (disp) >= 0 && INTVAL (disp) < 4096;
1776 /* GOT offset is not OK, the GOT can be large. */
1777 if (GET_CODE (disp) == CONST
1778 && GET_CODE (XEXP (disp, 0)) == UNSPEC
1779 && (XINT (XEXP (disp, 0), 1) == UNSPEC_GOT
1780 || XINT (XEXP (disp, 0), 1) == UNSPEC_GOTNTPOFF))
1781 return false;
1783 /* All other symbolic constants are literal pool references,
1784 which are OK as the literal pool must be small. */
1785 if (GET_CODE (disp) == CONST)
1786 return true;
1788 return false;
1791 /* Decompose a RTL expression ADDR for a memory address into
1792 its components, returned in OUT.
1794 Returns false if ADDR is not a valid memory address, true
1795 otherwise. If OUT is NULL, don't return the components,
1796 but check for validity only.
1798 Note: Only addresses in canonical form are recognized.
1799 LEGITIMIZE_ADDRESS should convert non-canonical forms to the
1800 canonical form so that they will be recognized. */
1802 static int
1803 s390_decompose_address (rtx addr, struct s390_address *out)
1805 HOST_WIDE_INT offset = 0;
1806 rtx base = NULL_RTX;
1807 rtx indx = NULL_RTX;
1808 rtx disp = NULL_RTX;
1809 rtx orig_disp;
1810 bool pointer = false;
1811 bool base_ptr = false;
1812 bool indx_ptr = false;
1813 bool literal_pool = false;
1815 /* We may need to substitute the literal pool base register into the address
1816 below. However, at this point we do not know which register is going to
1817 be used as base, so we substitute the arg pointer register. This is going
1818 to be treated as holding a pointer below -- it shouldn't be used for any
1819 other purpose. */
1820 rtx fake_pool_base = gen_rtx_REG (Pmode, ARG_POINTER_REGNUM);
1822 /* Decompose address into base + index + displacement. */
1824 if (GET_CODE (addr) == REG || GET_CODE (addr) == UNSPEC)
1825 base = addr;
1827 else if (GET_CODE (addr) == PLUS)
1829 rtx op0 = XEXP (addr, 0);
1830 rtx op1 = XEXP (addr, 1);
1831 enum rtx_code code0 = GET_CODE (op0);
1832 enum rtx_code code1 = GET_CODE (op1);
1834 if (code0 == REG || code0 == UNSPEC)
1836 if (code1 == REG || code1 == UNSPEC)
1838 indx = op0; /* index + base */
1839 base = op1;
1842 else
1844 base = op0; /* base + displacement */
1845 disp = op1;
1849 else if (code0 == PLUS)
1851 indx = XEXP (op0, 0); /* index + base + disp */
1852 base = XEXP (op0, 1);
1853 disp = op1;
1856 else
1858 return false;
1862 else
1863 disp = addr; /* displacement */
1865 /* Extract integer part of displacement. */
1866 orig_disp = disp;
1867 if (disp)
1869 if (GET_CODE (disp) == CONST_INT)
1871 offset = INTVAL (disp);
1872 disp = NULL_RTX;
1874 else if (GET_CODE (disp) == CONST
1875 && GET_CODE (XEXP (disp, 0)) == PLUS
1876 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
1878 offset = INTVAL (XEXP (XEXP (disp, 0), 1));
1879 disp = XEXP (XEXP (disp, 0), 0);
1883 /* Strip off CONST here to avoid special case tests later. */
1884 if (disp && GET_CODE (disp) == CONST)
1885 disp = XEXP (disp, 0);
1887 /* We can convert literal pool addresses to
1888 displacements by basing them off the base register. */
1889 if (disp && GET_CODE (disp) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (disp))
1891 /* Either base or index must be free to hold the base register. */
1892 if (!base)
1893 base = fake_pool_base, literal_pool = true;
1894 else if (!indx)
1895 indx = fake_pool_base, literal_pool = true;
1896 else
1897 return false;
1899 /* Mark up the displacement. */
1900 disp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, disp),
1901 UNSPEC_LTREL_OFFSET);
1904 /* Validate base register. */
1905 if (base)
1907 if (GET_CODE (base) == UNSPEC)
1908 switch (XINT (base, 1))
1910 case UNSPEC_LTREF:
1911 if (!disp)
1912 disp = gen_rtx_UNSPEC (Pmode,
1913 gen_rtvec (1, XVECEXP (base, 0, 0)),
1914 UNSPEC_LTREL_OFFSET);
1915 else
1916 return false;
1918 base = XVECEXP (base, 0, 1);
1919 break;
1921 case UNSPEC_LTREL_BASE:
1922 if (XVECLEN (base, 0) == 1)
1923 base = fake_pool_base, literal_pool = true;
1924 else
1925 base = XVECEXP (base, 0, 1);
1926 break;
1928 default:
1929 return false;
1932 if (!REG_P (base)
1933 || (GET_MODE (base) != SImode
1934 && GET_MODE (base) != Pmode))
1935 return false;
1937 if (REGNO (base) == STACK_POINTER_REGNUM
1938 || REGNO (base) == FRAME_POINTER_REGNUM
1939 || ((reload_completed || reload_in_progress)
1940 && frame_pointer_needed
1941 && REGNO (base) == HARD_FRAME_POINTER_REGNUM)
1942 || REGNO (base) == ARG_POINTER_REGNUM
1943 || (flag_pic
1944 && REGNO (base) == PIC_OFFSET_TABLE_REGNUM))
1945 pointer = base_ptr = true;
1947 if ((reload_completed || reload_in_progress)
1948 && base == cfun->machine->base_reg)
1949 pointer = base_ptr = literal_pool = true;
1952 /* Validate index register. */
1953 if (indx)
1955 if (GET_CODE (indx) == UNSPEC)
1956 switch (XINT (indx, 1))
1958 case UNSPEC_LTREF:
1959 if (!disp)
1960 disp = gen_rtx_UNSPEC (Pmode,
1961 gen_rtvec (1, XVECEXP (indx, 0, 0)),
1962 UNSPEC_LTREL_OFFSET);
1963 else
1964 return false;
1966 indx = XVECEXP (indx, 0, 1);
1967 break;
1969 case UNSPEC_LTREL_BASE:
1970 if (XVECLEN (indx, 0) == 1)
1971 indx = fake_pool_base, literal_pool = true;
1972 else
1973 indx = XVECEXP (indx, 0, 1);
1974 break;
1976 default:
1977 return false;
1980 if (!REG_P (indx)
1981 || (GET_MODE (indx) != SImode
1982 && GET_MODE (indx) != Pmode))
1983 return false;
1985 if (REGNO (indx) == STACK_POINTER_REGNUM
1986 || REGNO (indx) == FRAME_POINTER_REGNUM
1987 || ((reload_completed || reload_in_progress)
1988 && frame_pointer_needed
1989 && REGNO (indx) == HARD_FRAME_POINTER_REGNUM)
1990 || REGNO (indx) == ARG_POINTER_REGNUM
1991 || (flag_pic
1992 && REGNO (indx) == PIC_OFFSET_TABLE_REGNUM))
1993 pointer = indx_ptr = true;
1995 if ((reload_completed || reload_in_progress)
1996 && indx == cfun->machine->base_reg)
1997 pointer = indx_ptr = literal_pool = true;
2000 /* Prefer to use pointer as base, not index. */
2001 if (base && indx && !base_ptr
2002 && (indx_ptr || (!REG_POINTER (base) && REG_POINTER (indx))))
2004 rtx tmp = base;
2005 base = indx;
2006 indx = tmp;
2009 /* Validate displacement. */
2010 if (!disp)
2012 /* If virtual registers are involved, the displacement will change later
2013 anyway as the virtual registers get eliminated. This could make a
2014 valid displacement invalid, but it is more likely to make an invalid
2015 displacement valid, because we sometimes access the register save area
2016 via negative offsets to one of those registers.
2017 Thus we don't check the displacement for validity here. If after
2018 elimination the displacement turns out to be invalid after all,
2019 this is fixed up by reload in any case. */
2020 /* LRA maintains always displacements up to date and we need to
2021 know the displacement is right during all LRA not only at the
2022 final elimination. */
2023 if (lra_in_progress
2024 || (base != arg_pointer_rtx
2025 && indx != arg_pointer_rtx
2026 && base != return_address_pointer_rtx
2027 && indx != return_address_pointer_rtx
2028 && base != frame_pointer_rtx
2029 && indx != frame_pointer_rtx
2030 && base != virtual_stack_vars_rtx
2031 && indx != virtual_stack_vars_rtx))
2032 if (!DISP_IN_RANGE (offset))
2033 return false;
2035 else
2037 /* All the special cases are pointers. */
2038 pointer = true;
2040 /* In the small-PIC case, the linker converts @GOT
2041 and @GOTNTPOFF offsets to possible displacements. */
2042 if (GET_CODE (disp) == UNSPEC
2043 && (XINT (disp, 1) == UNSPEC_GOT
2044 || XINT (disp, 1) == UNSPEC_GOTNTPOFF)
2045 && flag_pic == 1)
2050 /* Accept pool label offsets. */
2051 else if (GET_CODE (disp) == UNSPEC
2052 && XINT (disp, 1) == UNSPEC_POOL_OFFSET)
2055 /* Accept literal pool references. */
2056 else if (GET_CODE (disp) == UNSPEC
2057 && XINT (disp, 1) == UNSPEC_LTREL_OFFSET)
2059 /* In case CSE pulled a non literal pool reference out of
2060 the pool we have to reject the address. This is
2061 especially important when loading the GOT pointer on non
2062 zarch CPUs. In this case the literal pool contains an lt
2063 relative offset to the _GLOBAL_OFFSET_TABLE_ label which
2064 will most likely exceed the displacement. */
2065 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
2066 || !CONSTANT_POOL_ADDRESS_P (XVECEXP (disp, 0, 0)))
2067 return false;
2069 orig_disp = gen_rtx_CONST (Pmode, disp);
2070 if (offset)
2072 /* If we have an offset, make sure it does not
2073 exceed the size of the constant pool entry. */
2074 rtx sym = XVECEXP (disp, 0, 0);
2075 if (offset >= GET_MODE_SIZE (get_pool_mode (sym)))
2076 return false;
2078 orig_disp = plus_constant (Pmode, orig_disp, offset);
2082 else
2083 return false;
2086 if (!base && !indx)
2087 pointer = true;
2089 if (out)
2091 out->base = base;
2092 out->indx = indx;
2093 out->disp = orig_disp;
2094 out->pointer = pointer;
2095 out->literal_pool = literal_pool;
2098 return true;
2101 /* Decompose a RTL expression OP for a shift count into its components,
2102 and return the base register in BASE and the offset in OFFSET.
2104 Return true if OP is a valid shift count, false if not. */
2106 bool
2107 s390_decompose_shift_count (rtx op, rtx *base, HOST_WIDE_INT *offset)
2109 HOST_WIDE_INT off = 0;
2111 /* We can have an integer constant, an address register,
2112 or a sum of the two. */
2113 if (GET_CODE (op) == CONST_INT)
2115 off = INTVAL (op);
2116 op = NULL_RTX;
2118 if (op && GET_CODE (op) == PLUS && GET_CODE (XEXP (op, 1)) == CONST_INT)
2120 off = INTVAL (XEXP (op, 1));
2121 op = XEXP (op, 0);
2123 while (op && GET_CODE (op) == SUBREG)
2124 op = SUBREG_REG (op);
2126 if (op && GET_CODE (op) != REG)
2127 return false;
2129 if (offset)
2130 *offset = off;
2131 if (base)
2132 *base = op;
2134 return true;
2138 /* Return true if CODE is a valid address without index. */
2140 bool
2141 s390_legitimate_address_without_index_p (rtx op)
2143 struct s390_address addr;
2145 if (!s390_decompose_address (XEXP (op, 0), &addr))
2146 return false;
2147 if (addr.indx)
2148 return false;
2150 return true;
2154 /* Return TRUE if ADDR is an operand valid for a load/store relative
2155 instruction. Be aware that the alignment of the operand needs to
2156 be checked separately.
2157 Valid addresses are single references or a sum of a reference and a
2158 constant integer. Return these parts in SYMREF and ADDEND. You can
2159 pass NULL in REF and/or ADDEND if you are not interested in these
2160 values. Literal pool references are *not* considered symbol
2161 references. */
2163 static bool
2164 s390_loadrelative_operand_p (rtx addr, rtx *symref, HOST_WIDE_INT *addend)
2166 HOST_WIDE_INT tmpaddend = 0;
2168 if (GET_CODE (addr) == CONST)
2169 addr = XEXP (addr, 0);
2171 if (GET_CODE (addr) == PLUS)
2173 if (!CONST_INT_P (XEXP (addr, 1)))
2174 return false;
2176 tmpaddend = INTVAL (XEXP (addr, 1));
2177 addr = XEXP (addr, 0);
2180 if ((GET_CODE (addr) == SYMBOL_REF && !CONSTANT_POOL_ADDRESS_P (addr))
2181 || (GET_CODE (addr) == UNSPEC
2182 && (XINT (addr, 1) == UNSPEC_GOTENT
2183 || (TARGET_CPU_ZARCH && XINT (addr, 1) == UNSPEC_PLT))))
2185 if (symref)
2186 *symref = addr;
2187 if (addend)
2188 *addend = tmpaddend;
2190 return true;
2192 return false;
2195 /* Return true if the address in OP is valid for constraint letter C
2196 if wrapped in a MEM rtx. Set LIT_POOL_OK to true if it literal
2197 pool MEMs should be accepted. Only the Q, R, S, T constraint
2198 letters are allowed for C. */
2200 static int
2201 s390_check_qrst_address (char c, rtx op, bool lit_pool_ok)
2203 struct s390_address addr;
2204 bool decomposed = false;
2206 /* This check makes sure that no symbolic address (except literal
2207 pool references) are accepted by the R or T constraints. */
2208 if (s390_loadrelative_operand_p (op, NULL, NULL))
2209 return 0;
2211 /* Ensure literal pool references are only accepted if LIT_POOL_OK. */
2212 if (!lit_pool_ok)
2214 if (!s390_decompose_address (op, &addr))
2215 return 0;
2216 if (addr.literal_pool)
2217 return 0;
2218 decomposed = true;
2221 switch (c)
2223 case 'Q': /* no index short displacement */
2224 if (!decomposed && !s390_decompose_address (op, &addr))
2225 return 0;
2226 if (addr.indx)
2227 return 0;
2228 if (!s390_short_displacement (addr.disp))
2229 return 0;
2230 break;
2232 case 'R': /* with index short displacement */
2233 if (TARGET_LONG_DISPLACEMENT)
2235 if (!decomposed && !s390_decompose_address (op, &addr))
2236 return 0;
2237 if (!s390_short_displacement (addr.disp))
2238 return 0;
2240 /* Any invalid address here will be fixed up by reload,
2241 so accept it for the most generic constraint. */
2242 break;
2244 case 'S': /* no index long displacement */
2245 if (!TARGET_LONG_DISPLACEMENT)
2246 return 0;
2247 if (!decomposed && !s390_decompose_address (op, &addr))
2248 return 0;
2249 if (addr.indx)
2250 return 0;
2251 if (s390_short_displacement (addr.disp))
2252 return 0;
2253 break;
2255 case 'T': /* with index long displacement */
2256 if (!TARGET_LONG_DISPLACEMENT)
2257 return 0;
2258 /* Any invalid address here will be fixed up by reload,
2259 so accept it for the most generic constraint. */
2260 if ((decomposed || s390_decompose_address (op, &addr))
2261 && s390_short_displacement (addr.disp))
2262 return 0;
2263 break;
2264 default:
2265 return 0;
2267 return 1;
2271 /* Evaluates constraint strings described by the regular expression
2272 ([A|B|Z](Q|R|S|T))|U|W|Y and returns 1 if OP is a valid operand for
2273 the constraint given in STR, or 0 else. */
2276 s390_mem_constraint (const char *str, rtx op)
2278 char c = str[0];
2280 switch (c)
2282 case 'A':
2283 /* Check for offsettable variants of memory constraints. */
2284 if (!MEM_P (op) || MEM_VOLATILE_P (op))
2285 return 0;
2286 if ((reload_completed || reload_in_progress)
2287 ? !offsettable_memref_p (op) : !offsettable_nonstrict_memref_p (op))
2288 return 0;
2289 return s390_check_qrst_address (str[1], XEXP (op, 0), true);
2290 case 'B':
2291 /* Check for non-literal-pool variants of memory constraints. */
2292 if (!MEM_P (op))
2293 return 0;
2294 return s390_check_qrst_address (str[1], XEXP (op, 0), false);
2295 case 'Q':
2296 case 'R':
2297 case 'S':
2298 case 'T':
2299 if (GET_CODE (op) != MEM)
2300 return 0;
2301 return s390_check_qrst_address (c, XEXP (op, 0), true);
2302 case 'U':
2303 return (s390_check_qrst_address ('Q', op, true)
2304 || s390_check_qrst_address ('R', op, true));
2305 case 'W':
2306 return (s390_check_qrst_address ('S', op, true)
2307 || s390_check_qrst_address ('T', op, true));
2308 case 'Y':
2309 /* Simply check for the basic form of a shift count. Reload will
2310 take care of making sure we have a proper base register. */
2311 if (!s390_decompose_shift_count (op, NULL, NULL))
2312 return 0;
2313 break;
2314 case 'Z':
2315 return s390_check_qrst_address (str[1], op, true);
2316 default:
2317 return 0;
2319 return 1;
2323 /* Evaluates constraint strings starting with letter O. Input
2324 parameter C is the second letter following the "O" in the constraint
2325 string. Returns 1 if VALUE meets the respective constraint and 0
2326 otherwise. */
2329 s390_O_constraint_str (const char c, HOST_WIDE_INT value)
2331 if (!TARGET_EXTIMM)
2332 return 0;
2334 switch (c)
2336 case 's':
2337 return trunc_int_for_mode (value, SImode) == value;
2339 case 'p':
2340 return value == 0
2341 || s390_single_part (GEN_INT (value), DImode, SImode, 0) == 1;
2343 case 'n':
2344 return s390_single_part (GEN_INT (value - 1), DImode, SImode, -1) == 1;
2346 default:
2347 gcc_unreachable ();
2352 /* Evaluates constraint strings starting with letter N. Parameter STR
2353 contains the letters following letter "N" in the constraint string.
2354 Returns true if VALUE matches the constraint. */
2357 s390_N_constraint_str (const char *str, HOST_WIDE_INT value)
2359 enum machine_mode mode, part_mode;
2360 int def;
2361 int part, part_goal;
2364 if (str[0] == 'x')
2365 part_goal = -1;
2366 else
2367 part_goal = str[0] - '0';
2369 switch (str[1])
2371 case 'Q':
2372 part_mode = QImode;
2373 break;
2374 case 'H':
2375 part_mode = HImode;
2376 break;
2377 case 'S':
2378 part_mode = SImode;
2379 break;
2380 default:
2381 return 0;
2384 switch (str[2])
2386 case 'H':
2387 mode = HImode;
2388 break;
2389 case 'S':
2390 mode = SImode;
2391 break;
2392 case 'D':
2393 mode = DImode;
2394 break;
2395 default:
2396 return 0;
2399 switch (str[3])
2401 case '0':
2402 def = 0;
2403 break;
2404 case 'F':
2405 def = -1;
2406 break;
2407 default:
2408 return 0;
2411 if (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (part_mode))
2412 return 0;
2414 part = s390_single_part (GEN_INT (value), mode, part_mode, def);
2415 if (part < 0)
2416 return 0;
2417 if (part_goal != -1 && part_goal != part)
2418 return 0;
2420 return 1;
2424 /* Returns true if the input parameter VALUE is a float zero. */
2427 s390_float_const_zero_p (rtx value)
2429 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
2430 && value == CONST0_RTX (GET_MODE (value)));
2433 /* Implement TARGET_REGISTER_MOVE_COST. */
2435 static int
2436 s390_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2437 reg_class_t from, reg_class_t to)
2439 /* On s390, copy between fprs and gprs is expensive as long as no
2440 ldgr/lgdr can be used. */
2441 if ((!TARGET_Z10 || GET_MODE_SIZE (mode) != 8)
2442 && ((reg_classes_intersect_p (from, GENERAL_REGS)
2443 && reg_classes_intersect_p (to, FP_REGS))
2444 || (reg_classes_intersect_p (from, FP_REGS)
2445 && reg_classes_intersect_p (to, GENERAL_REGS))))
2446 return 10;
2448 return 1;
2451 /* Implement TARGET_MEMORY_MOVE_COST. */
2453 static int
2454 s390_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2455 reg_class_t rclass ATTRIBUTE_UNUSED,
2456 bool in ATTRIBUTE_UNUSED)
2458 return 1;
2461 /* Compute a (partial) cost for rtx X. Return true if the complete
2462 cost has been computed, and false if subexpressions should be
2463 scanned. In either case, *TOTAL contains the cost result.
2464 CODE contains GET_CODE (x), OUTER_CODE contains the code
2465 of the superexpression of x. */
2467 static bool
2468 s390_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
2469 int *total, bool speed ATTRIBUTE_UNUSED)
2471 switch (code)
2473 case CONST:
2474 case CONST_INT:
2475 case LABEL_REF:
2476 case SYMBOL_REF:
2477 case CONST_DOUBLE:
2478 case MEM:
2479 *total = 0;
2480 return true;
2482 case ASHIFT:
2483 case ASHIFTRT:
2484 case LSHIFTRT:
2485 case ROTATE:
2486 case ROTATERT:
2487 case AND:
2488 case IOR:
2489 case XOR:
2490 case NEG:
2491 case NOT:
2492 *total = COSTS_N_INSNS (1);
2493 return false;
2495 case PLUS:
2496 case MINUS:
2497 *total = COSTS_N_INSNS (1);
2498 return false;
2500 case MULT:
2501 switch (GET_MODE (x))
2503 case SImode:
2505 rtx left = XEXP (x, 0);
2506 rtx right = XEXP (x, 1);
2507 if (GET_CODE (right) == CONST_INT
2508 && CONST_OK_FOR_K (INTVAL (right)))
2509 *total = s390_cost->mhi;
2510 else if (GET_CODE (left) == SIGN_EXTEND)
2511 *total = s390_cost->mh;
2512 else
2513 *total = s390_cost->ms; /* msr, ms, msy */
2514 break;
2516 case DImode:
2518 rtx left = XEXP (x, 0);
2519 rtx right = XEXP (x, 1);
2520 if (TARGET_ZARCH)
2522 if (GET_CODE (right) == CONST_INT
2523 && CONST_OK_FOR_K (INTVAL (right)))
2524 *total = s390_cost->mghi;
2525 else if (GET_CODE (left) == SIGN_EXTEND)
2526 *total = s390_cost->msgf;
2527 else
2528 *total = s390_cost->msg; /* msgr, msg */
2530 else /* TARGET_31BIT */
2532 if (GET_CODE (left) == SIGN_EXTEND
2533 && GET_CODE (right) == SIGN_EXTEND)
2534 /* mulsidi case: mr, m */
2535 *total = s390_cost->m;
2536 else if (GET_CODE (left) == ZERO_EXTEND
2537 && GET_CODE (right) == ZERO_EXTEND
2538 && TARGET_CPU_ZARCH)
2539 /* umulsidi case: ml, mlr */
2540 *total = s390_cost->ml;
2541 else
2542 /* Complex calculation is required. */
2543 *total = COSTS_N_INSNS (40);
2545 break;
2547 case SFmode:
2548 case DFmode:
2549 *total = s390_cost->mult_df;
2550 break;
2551 case TFmode:
2552 *total = s390_cost->mxbr;
2553 break;
2554 default:
2555 return false;
2557 return false;
2559 case FMA:
2560 switch (GET_MODE (x))
2562 case DFmode:
2563 *total = s390_cost->madbr;
2564 break;
2565 case SFmode:
2566 *total = s390_cost->maebr;
2567 break;
2568 default:
2569 return false;
2571 /* Negate in the third argument is free: FMSUB. */
2572 if (GET_CODE (XEXP (x, 2)) == NEG)
2574 *total += (rtx_cost (XEXP (x, 0), FMA, 0, speed)
2575 + rtx_cost (XEXP (x, 1), FMA, 1, speed)
2576 + rtx_cost (XEXP (XEXP (x, 2), 0), FMA, 2, speed));
2577 return true;
2579 return false;
2581 case UDIV:
2582 case UMOD:
2583 if (GET_MODE (x) == TImode) /* 128 bit division */
2584 *total = s390_cost->dlgr;
2585 else if (GET_MODE (x) == DImode)
2587 rtx right = XEXP (x, 1);
2588 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
2589 *total = s390_cost->dlr;
2590 else /* 64 by 64 bit division */
2591 *total = s390_cost->dlgr;
2593 else if (GET_MODE (x) == SImode) /* 32 bit division */
2594 *total = s390_cost->dlr;
2595 return false;
2597 case DIV:
2598 case MOD:
2599 if (GET_MODE (x) == DImode)
2601 rtx right = XEXP (x, 1);
2602 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
2603 if (TARGET_ZARCH)
2604 *total = s390_cost->dsgfr;
2605 else
2606 *total = s390_cost->dr;
2607 else /* 64 by 64 bit division */
2608 *total = s390_cost->dsgr;
2610 else if (GET_MODE (x) == SImode) /* 32 bit division */
2611 *total = s390_cost->dlr;
2612 else if (GET_MODE (x) == SFmode)
2614 *total = s390_cost->debr;
2616 else if (GET_MODE (x) == DFmode)
2618 *total = s390_cost->ddbr;
2620 else if (GET_MODE (x) == TFmode)
2622 *total = s390_cost->dxbr;
2624 return false;
2626 case SQRT:
2627 if (GET_MODE (x) == SFmode)
2628 *total = s390_cost->sqebr;
2629 else if (GET_MODE (x) == DFmode)
2630 *total = s390_cost->sqdbr;
2631 else /* TFmode */
2632 *total = s390_cost->sqxbr;
2633 return false;
2635 case SIGN_EXTEND:
2636 case ZERO_EXTEND:
2637 if (outer_code == MULT || outer_code == DIV || outer_code == MOD
2638 || outer_code == PLUS || outer_code == MINUS
2639 || outer_code == COMPARE)
2640 *total = 0;
2641 return false;
2643 case COMPARE:
2644 *total = COSTS_N_INSNS (1);
2645 if (GET_CODE (XEXP (x, 0)) == AND
2646 && GET_CODE (XEXP (x, 1)) == CONST_INT
2647 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
2649 rtx op0 = XEXP (XEXP (x, 0), 0);
2650 rtx op1 = XEXP (XEXP (x, 0), 1);
2651 rtx op2 = XEXP (x, 1);
2653 if (memory_operand (op0, GET_MODE (op0))
2654 && s390_tm_ccmode (op1, op2, 0) != VOIDmode)
2655 return true;
2656 if (register_operand (op0, GET_MODE (op0))
2657 && s390_tm_ccmode (op1, op2, 1) != VOIDmode)
2658 return true;
2660 return false;
2662 default:
2663 return false;
2667 /* Return the cost of an address rtx ADDR. */
2669 static int
2670 s390_address_cost (rtx addr, enum machine_mode mode ATTRIBUTE_UNUSED,
2671 addr_space_t as ATTRIBUTE_UNUSED,
2672 bool speed ATTRIBUTE_UNUSED)
2674 struct s390_address ad;
2675 if (!s390_decompose_address (addr, &ad))
2676 return 1000;
2678 return ad.indx? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (1);
2681 /* If OP is a SYMBOL_REF of a thread-local symbol, return its TLS mode,
2682 otherwise return 0. */
2685 tls_symbolic_operand (rtx op)
2687 if (GET_CODE (op) != SYMBOL_REF)
2688 return 0;
2689 return SYMBOL_REF_TLS_MODEL (op);
2692 /* Split DImode access register reference REG (on 64-bit) into its constituent
2693 low and high parts, and store them into LO and HI. Note that gen_lowpart/
2694 gen_highpart cannot be used as they assume all registers are word-sized,
2695 while our access registers have only half that size. */
2697 void
2698 s390_split_access_reg (rtx reg, rtx *lo, rtx *hi)
2700 gcc_assert (TARGET_64BIT);
2701 gcc_assert (ACCESS_REG_P (reg));
2702 gcc_assert (GET_MODE (reg) == DImode);
2703 gcc_assert (!(REGNO (reg) & 1));
2705 *lo = gen_rtx_REG (SImode, REGNO (reg) + 1);
2706 *hi = gen_rtx_REG (SImode, REGNO (reg));
2709 /* Return true if OP contains a symbol reference */
2711 bool
2712 symbolic_reference_mentioned_p (rtx op)
2714 const char *fmt;
2715 int i;
2717 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
2718 return 1;
2720 fmt = GET_RTX_FORMAT (GET_CODE (op));
2721 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
2723 if (fmt[i] == 'E')
2725 int j;
2727 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
2728 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
2729 return 1;
2732 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
2733 return 1;
2736 return 0;
2739 /* Return true if OP contains a reference to a thread-local symbol. */
2741 bool
2742 tls_symbolic_reference_mentioned_p (rtx op)
2744 const char *fmt;
2745 int i;
2747 if (GET_CODE (op) == SYMBOL_REF)
2748 return tls_symbolic_operand (op);
2750 fmt = GET_RTX_FORMAT (GET_CODE (op));
2751 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
2753 if (fmt[i] == 'E')
2755 int j;
2757 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
2758 if (tls_symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
2759 return true;
2762 else if (fmt[i] == 'e' && tls_symbolic_reference_mentioned_p (XEXP (op, i)))
2763 return true;
2766 return false;
2770 /* Return true if OP is a legitimate general operand when
2771 generating PIC code. It is given that flag_pic is on
2772 and that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2775 legitimate_pic_operand_p (rtx op)
2777 /* Accept all non-symbolic constants. */
2778 if (!SYMBOLIC_CONST (op))
2779 return 1;
2781 /* Reject everything else; must be handled
2782 via emit_symbolic_move. */
2783 return 0;
2786 /* Returns true if the constant value OP is a legitimate general operand.
2787 It is given that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2789 static bool
2790 s390_legitimate_constant_p (enum machine_mode mode, rtx op)
2792 /* Accept all non-symbolic constants. */
2793 if (!SYMBOLIC_CONST (op))
2794 return 1;
2796 /* Accept immediate LARL operands. */
2797 if (TARGET_CPU_ZARCH && larl_operand (op, mode))
2798 return 1;
2800 /* Thread-local symbols are never legal constants. This is
2801 so that emit_call knows that computing such addresses
2802 might require a function call. */
2803 if (TLS_SYMBOLIC_CONST (op))
2804 return 0;
2806 /* In the PIC case, symbolic constants must *not* be
2807 forced into the literal pool. We accept them here,
2808 so that they will be handled by emit_symbolic_move. */
2809 if (flag_pic)
2810 return 1;
2812 /* All remaining non-PIC symbolic constants are
2813 forced into the literal pool. */
2814 return 0;
2817 /* Determine if it's legal to put X into the constant pool. This
2818 is not possible if X contains the address of a symbol that is
2819 not constant (TLS) or not known at final link time (PIC). */
2821 static bool
2822 s390_cannot_force_const_mem (enum machine_mode mode, rtx x)
2824 switch (GET_CODE (x))
2826 case CONST_INT:
2827 case CONST_DOUBLE:
2828 /* Accept all non-symbolic constants. */
2829 return false;
2831 case LABEL_REF:
2832 /* Labels are OK iff we are non-PIC. */
2833 return flag_pic != 0;
2835 case SYMBOL_REF:
2836 /* 'Naked' TLS symbol references are never OK,
2837 non-TLS symbols are OK iff we are non-PIC. */
2838 if (tls_symbolic_operand (x))
2839 return true;
2840 else
2841 return flag_pic != 0;
2843 case CONST:
2844 return s390_cannot_force_const_mem (mode, XEXP (x, 0));
2845 case PLUS:
2846 case MINUS:
2847 return s390_cannot_force_const_mem (mode, XEXP (x, 0))
2848 || s390_cannot_force_const_mem (mode, XEXP (x, 1));
2850 case UNSPEC:
2851 switch (XINT (x, 1))
2853 /* Only lt-relative or GOT-relative UNSPECs are OK. */
2854 case UNSPEC_LTREL_OFFSET:
2855 case UNSPEC_GOT:
2856 case UNSPEC_GOTOFF:
2857 case UNSPEC_PLTOFF:
2858 case UNSPEC_TLSGD:
2859 case UNSPEC_TLSLDM:
2860 case UNSPEC_NTPOFF:
2861 case UNSPEC_DTPOFF:
2862 case UNSPEC_GOTNTPOFF:
2863 case UNSPEC_INDNTPOFF:
2864 return false;
2866 /* If the literal pool shares the code section, be put
2867 execute template placeholders into the pool as well. */
2868 case UNSPEC_INSN:
2869 return TARGET_CPU_ZARCH;
2871 default:
2872 return true;
2874 break;
2876 default:
2877 gcc_unreachable ();
2881 /* Returns true if the constant value OP is a legitimate general
2882 operand during and after reload. The difference to
2883 legitimate_constant_p is that this function will not accept
2884 a constant that would need to be forced to the literal pool
2885 before it can be used as operand.
2886 This function accepts all constants which can be loaded directly
2887 into a GPR. */
2889 bool
2890 legitimate_reload_constant_p (rtx op)
2892 /* Accept la(y) operands. */
2893 if (GET_CODE (op) == CONST_INT
2894 && DISP_IN_RANGE (INTVAL (op)))
2895 return true;
2897 /* Accept l(g)hi/l(g)fi operands. */
2898 if (GET_CODE (op) == CONST_INT
2899 && (CONST_OK_FOR_K (INTVAL (op)) || CONST_OK_FOR_Os (INTVAL (op))))
2900 return true;
2902 /* Accept lliXX operands. */
2903 if (TARGET_ZARCH
2904 && GET_CODE (op) == CONST_INT
2905 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
2906 && s390_single_part (op, word_mode, HImode, 0) >= 0)
2907 return true;
2909 if (TARGET_EXTIMM
2910 && GET_CODE (op) == CONST_INT
2911 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
2912 && s390_single_part (op, word_mode, SImode, 0) >= 0)
2913 return true;
2915 /* Accept larl operands. */
2916 if (TARGET_CPU_ZARCH
2917 && larl_operand (op, VOIDmode))
2918 return true;
2920 /* Accept floating-point zero operands that fit into a single GPR. */
2921 if (GET_CODE (op) == CONST_DOUBLE
2922 && s390_float_const_zero_p (op)
2923 && GET_MODE_SIZE (GET_MODE (op)) <= UNITS_PER_WORD)
2924 return true;
2926 /* Accept double-word operands that can be split. */
2927 if (GET_CODE (op) == CONST_INT
2928 && trunc_int_for_mode (INTVAL (op), word_mode) != INTVAL (op))
2930 enum machine_mode dword_mode = word_mode == SImode ? DImode : TImode;
2931 rtx hi = operand_subword (op, 0, 0, dword_mode);
2932 rtx lo = operand_subword (op, 1, 0, dword_mode);
2933 return legitimate_reload_constant_p (hi)
2934 && legitimate_reload_constant_p (lo);
2937 /* Everything else cannot be handled without reload. */
2938 return false;
2941 /* Returns true if the constant value OP is a legitimate fp operand
2942 during and after reload.
2943 This function accepts all constants which can be loaded directly
2944 into an FPR. */
2946 static bool
2947 legitimate_reload_fp_constant_p (rtx op)
2949 /* Accept floating-point zero operands if the load zero instruction
2950 can be used. Prior to z196 the load fp zero instruction caused a
2951 performance penalty if the result is used as BFP number. */
2952 if (TARGET_Z196
2953 && GET_CODE (op) == CONST_DOUBLE
2954 && s390_float_const_zero_p (op))
2955 return true;
2957 return false;
2960 /* Given an rtx OP being reloaded into a reg required to be in class RCLASS,
2961 return the class of reg to actually use. */
2963 static reg_class_t
2964 s390_preferred_reload_class (rtx op, reg_class_t rclass)
2966 switch (GET_CODE (op))
2968 /* Constants we cannot reload into general registers
2969 must be forced into the literal pool. */
2970 case CONST_DOUBLE:
2971 case CONST_INT:
2972 if (reg_class_subset_p (GENERAL_REGS, rclass)
2973 && legitimate_reload_constant_p (op))
2974 return GENERAL_REGS;
2975 else if (reg_class_subset_p (ADDR_REGS, rclass)
2976 && legitimate_reload_constant_p (op))
2977 return ADDR_REGS;
2978 else if (reg_class_subset_p (FP_REGS, rclass)
2979 && legitimate_reload_fp_constant_p (op))
2980 return FP_REGS;
2981 return NO_REGS;
2983 /* If a symbolic constant or a PLUS is reloaded,
2984 it is most likely being used as an address, so
2985 prefer ADDR_REGS. If 'class' is not a superset
2986 of ADDR_REGS, e.g. FP_REGS, reject this reload. */
2987 case CONST:
2988 /* A larl operand with odd addend will get fixed via secondary
2989 reload. So don't request it to be pushed into literal
2990 pool. */
2991 if (TARGET_CPU_ZARCH
2992 && GET_CODE (XEXP (op, 0)) == PLUS
2993 && GET_CODE (XEXP (XEXP(op, 0), 0)) == SYMBOL_REF
2994 && GET_CODE (XEXP (XEXP(op, 0), 1)) == CONST_INT)
2996 if (reg_class_subset_p (ADDR_REGS, rclass))
2997 return ADDR_REGS;
2998 else
2999 return NO_REGS;
3001 /* fallthrough */
3002 case LABEL_REF:
3003 case SYMBOL_REF:
3004 if (!legitimate_reload_constant_p (op))
3005 return NO_REGS;
3006 /* fallthrough */
3007 case PLUS:
3008 /* load address will be used. */
3009 if (reg_class_subset_p (ADDR_REGS, rclass))
3010 return ADDR_REGS;
3011 else
3012 return NO_REGS;
3014 default:
3015 break;
3018 return rclass;
3021 /* Return true if ADDR is SYMBOL_REF + addend with addend being a
3022 multiple of ALIGNMENT and the SYMBOL_REF being naturally
3023 aligned. */
3025 bool
3026 s390_check_symref_alignment (rtx addr, HOST_WIDE_INT alignment)
3028 HOST_WIDE_INT addend;
3029 rtx symref;
3031 if (!s390_loadrelative_operand_p (addr, &symref, &addend))
3032 return false;
3034 if (addend & (alignment - 1))
3035 return false;
3037 if (GET_CODE (symref) == SYMBOL_REF
3038 && !SYMBOL_REF_NOT_NATURALLY_ALIGNED_P (symref))
3039 return true;
3041 if (GET_CODE (symref) == UNSPEC
3042 && alignment <= UNITS_PER_LONG)
3043 return true;
3045 return false;
3048 /* ADDR is moved into REG using larl. If ADDR isn't a valid larl
3049 operand SCRATCH is used to reload the even part of the address and
3050 adding one. */
3052 void
3053 s390_reload_larl_operand (rtx reg, rtx addr, rtx scratch)
3055 HOST_WIDE_INT addend;
3056 rtx symref;
3058 if (!s390_loadrelative_operand_p (addr, &symref, &addend))
3059 gcc_unreachable ();
3061 if (!(addend & 1))
3062 /* Easy case. The addend is even so larl will do fine. */
3063 emit_move_insn (reg, addr);
3064 else
3066 /* We can leave the scratch register untouched if the target
3067 register is a valid base register. */
3068 if (REGNO (reg) < FIRST_PSEUDO_REGISTER
3069 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS)
3070 scratch = reg;
3072 gcc_assert (REGNO (scratch) < FIRST_PSEUDO_REGISTER);
3073 gcc_assert (REGNO_REG_CLASS (REGNO (scratch)) == ADDR_REGS);
3075 if (addend != 1)
3076 emit_move_insn (scratch,
3077 gen_rtx_CONST (Pmode,
3078 gen_rtx_PLUS (Pmode, symref,
3079 GEN_INT (addend - 1))));
3080 else
3081 emit_move_insn (scratch, symref);
3083 /* Increment the address using la in order to avoid clobbering cc. */
3084 s390_load_address (reg, gen_rtx_PLUS (Pmode, scratch, const1_rtx));
3088 /* Generate what is necessary to move between REG and MEM using
3089 SCRATCH. The direction is given by TOMEM. */
3091 void
3092 s390_reload_symref_address (rtx reg, rtx mem, rtx scratch, bool tomem)
3094 /* Reload might have pulled a constant out of the literal pool.
3095 Force it back in. */
3096 if (CONST_INT_P (mem) || GET_CODE (mem) == CONST_DOUBLE
3097 || GET_CODE (mem) == CONST)
3098 mem = force_const_mem (GET_MODE (reg), mem);
3100 gcc_assert (MEM_P (mem));
3102 /* For a load from memory we can leave the scratch register
3103 untouched if the target register is a valid base register. */
3104 if (!tomem
3105 && REGNO (reg) < FIRST_PSEUDO_REGISTER
3106 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS
3107 && GET_MODE (reg) == GET_MODE (scratch))
3108 scratch = reg;
3110 /* Load address into scratch register. Since we can't have a
3111 secondary reload for a secondary reload we have to cover the case
3112 where larl would need a secondary reload here as well. */
3113 s390_reload_larl_operand (scratch, XEXP (mem, 0), scratch);
3115 /* Now we can use a standard load/store to do the move. */
3116 if (tomem)
3117 emit_move_insn (replace_equiv_address (mem, scratch), reg);
3118 else
3119 emit_move_insn (reg, replace_equiv_address (mem, scratch));
3122 /* Inform reload about cases where moving X with a mode MODE to a register in
3123 RCLASS requires an extra scratch or immediate register. Return the class
3124 needed for the immediate register. */
3126 static reg_class_t
3127 s390_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
3128 enum machine_mode mode, secondary_reload_info *sri)
3130 enum reg_class rclass = (enum reg_class) rclass_i;
3132 /* Intermediate register needed. */
3133 if (reg_classes_intersect_p (CC_REGS, rclass))
3134 return GENERAL_REGS;
3136 if (TARGET_Z10)
3138 HOST_WIDE_INT offset;
3139 rtx symref;
3141 /* On z10 several optimizer steps may generate larl operands with
3142 an odd addend. */
3143 if (in_p
3144 && s390_loadrelative_operand_p (x, &symref, &offset)
3145 && mode == Pmode
3146 && !SYMBOL_REF_ALIGN1_P (symref)
3147 && (offset & 1) == 1)
3148 sri->icode = ((mode == DImode) ? CODE_FOR_reloaddi_larl_odd_addend_z10
3149 : CODE_FOR_reloadsi_larl_odd_addend_z10);
3151 /* On z10 we need a scratch register when moving QI, TI or floating
3152 point mode values from or to a memory location with a SYMBOL_REF
3153 or if the symref addend of a SI or DI move is not aligned to the
3154 width of the access. */
3155 if (MEM_P (x)
3156 && s390_loadrelative_operand_p (XEXP (x, 0), NULL, NULL)
3157 && (mode == QImode || mode == TImode || FLOAT_MODE_P (mode)
3158 || (!TARGET_ZARCH && mode == DImode)
3159 || ((mode == HImode || mode == SImode || mode == DImode)
3160 && (!s390_check_symref_alignment (XEXP (x, 0),
3161 GET_MODE_SIZE (mode))))))
3163 #define __SECONDARY_RELOAD_CASE(M,m) \
3164 case M##mode: \
3165 if (TARGET_64BIT) \
3166 sri->icode = in_p ? CODE_FOR_reload##m##di_toreg_z10 : \
3167 CODE_FOR_reload##m##di_tomem_z10; \
3168 else \
3169 sri->icode = in_p ? CODE_FOR_reload##m##si_toreg_z10 : \
3170 CODE_FOR_reload##m##si_tomem_z10; \
3171 break;
3173 switch (GET_MODE (x))
3175 __SECONDARY_RELOAD_CASE (QI, qi);
3176 __SECONDARY_RELOAD_CASE (HI, hi);
3177 __SECONDARY_RELOAD_CASE (SI, si);
3178 __SECONDARY_RELOAD_CASE (DI, di);
3179 __SECONDARY_RELOAD_CASE (TI, ti);
3180 __SECONDARY_RELOAD_CASE (SF, sf);
3181 __SECONDARY_RELOAD_CASE (DF, df);
3182 __SECONDARY_RELOAD_CASE (TF, tf);
3183 __SECONDARY_RELOAD_CASE (SD, sd);
3184 __SECONDARY_RELOAD_CASE (DD, dd);
3185 __SECONDARY_RELOAD_CASE (TD, td);
3187 default:
3188 gcc_unreachable ();
3190 #undef __SECONDARY_RELOAD_CASE
3194 /* We need a scratch register when loading a PLUS expression which
3195 is not a legitimate operand of the LOAD ADDRESS instruction. */
3196 /* LRA can deal with transformation of plus op very well -- so we
3197 don't need to prompt LRA in this case. */
3198 if (! lra_in_progress && in_p && s390_plus_operand (x, mode))
3199 sri->icode = (TARGET_64BIT ?
3200 CODE_FOR_reloaddi_plus : CODE_FOR_reloadsi_plus);
3202 /* Performing a multiword move from or to memory we have to make sure the
3203 second chunk in memory is addressable without causing a displacement
3204 overflow. If that would be the case we calculate the address in
3205 a scratch register. */
3206 if (MEM_P (x)
3207 && GET_CODE (XEXP (x, 0)) == PLUS
3208 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3209 && !DISP_IN_RANGE (INTVAL (XEXP (XEXP (x, 0), 1))
3210 + GET_MODE_SIZE (mode) - 1))
3212 /* For GENERAL_REGS a displacement overflow is no problem if occurring
3213 in a s_operand address since we may fallback to lm/stm. So we only
3214 have to care about overflows in the b+i+d case. */
3215 if ((reg_classes_intersect_p (GENERAL_REGS, rclass)
3216 && s390_class_max_nregs (GENERAL_REGS, mode) > 1
3217 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
3218 /* For FP_REGS no lm/stm is available so this check is triggered
3219 for displacement overflows in b+i+d and b+d like addresses. */
3220 || (reg_classes_intersect_p (FP_REGS, rclass)
3221 && s390_class_max_nregs (FP_REGS, mode) > 1))
3223 if (in_p)
3224 sri->icode = (TARGET_64BIT ?
3225 CODE_FOR_reloaddi_nonoffmem_in :
3226 CODE_FOR_reloadsi_nonoffmem_in);
3227 else
3228 sri->icode = (TARGET_64BIT ?
3229 CODE_FOR_reloaddi_nonoffmem_out :
3230 CODE_FOR_reloadsi_nonoffmem_out);
3234 /* A scratch address register is needed when a symbolic constant is
3235 copied to r0 compiling with -fPIC. In other cases the target
3236 register might be used as temporary (see legitimize_pic_address). */
3237 if (in_p && SYMBOLIC_CONST (x) && flag_pic == 2 && rclass != ADDR_REGS)
3238 sri->icode = (TARGET_64BIT ?
3239 CODE_FOR_reloaddi_PIC_addr :
3240 CODE_FOR_reloadsi_PIC_addr);
3242 /* Either scratch or no register needed. */
3243 return NO_REGS;
3246 /* Generate code to load SRC, which is PLUS that is not a
3247 legitimate operand for the LA instruction, into TARGET.
3248 SCRATCH may be used as scratch register. */
3250 void
3251 s390_expand_plus_operand (rtx target, rtx src,
3252 rtx scratch)
3254 rtx sum1, sum2;
3255 struct s390_address ad;
3257 /* src must be a PLUS; get its two operands. */
3258 gcc_assert (GET_CODE (src) == PLUS);
3259 gcc_assert (GET_MODE (src) == Pmode);
3261 /* Check if any of the two operands is already scheduled
3262 for replacement by reload. This can happen e.g. when
3263 float registers occur in an address. */
3264 sum1 = find_replacement (&XEXP (src, 0));
3265 sum2 = find_replacement (&XEXP (src, 1));
3266 src = gen_rtx_PLUS (Pmode, sum1, sum2);
3268 /* If the address is already strictly valid, there's nothing to do. */
3269 if (!s390_decompose_address (src, &ad)
3270 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
3271 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
3273 /* Otherwise, one of the operands cannot be an address register;
3274 we reload its value into the scratch register. */
3275 if (true_regnum (sum1) < 1 || true_regnum (sum1) > 15)
3277 emit_move_insn (scratch, sum1);
3278 sum1 = scratch;
3280 if (true_regnum (sum2) < 1 || true_regnum (sum2) > 15)
3282 emit_move_insn (scratch, sum2);
3283 sum2 = scratch;
3286 /* According to the way these invalid addresses are generated
3287 in reload.c, it should never happen (at least on s390) that
3288 *neither* of the PLUS components, after find_replacements
3289 was applied, is an address register. */
3290 if (sum1 == scratch && sum2 == scratch)
3292 debug_rtx (src);
3293 gcc_unreachable ();
3296 src = gen_rtx_PLUS (Pmode, sum1, sum2);
3299 /* Emit the LOAD ADDRESS pattern. Note that reload of PLUS
3300 is only ever performed on addresses, so we can mark the
3301 sum as legitimate for LA in any case. */
3302 s390_load_address (target, src);
3306 /* Return true if ADDR is a valid memory address.
3307 STRICT specifies whether strict register checking applies. */
3309 static bool
3310 s390_legitimate_address_p (enum machine_mode mode, rtx addr, bool strict)
3312 struct s390_address ad;
3314 if (TARGET_Z10
3315 && larl_operand (addr, VOIDmode)
3316 && (mode == VOIDmode
3317 || s390_check_symref_alignment (addr, GET_MODE_SIZE (mode))))
3318 return true;
3320 if (!s390_decompose_address (addr, &ad))
3321 return false;
3323 if (strict)
3325 if (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
3326 return false;
3328 if (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx)))
3329 return false;
3331 else
3333 if (ad.base
3334 && !(REGNO (ad.base) >= FIRST_PSEUDO_REGISTER
3335 || REGNO_REG_CLASS (REGNO (ad.base)) == ADDR_REGS))
3336 return false;
3338 if (ad.indx
3339 && !(REGNO (ad.indx) >= FIRST_PSEUDO_REGISTER
3340 || REGNO_REG_CLASS (REGNO (ad.indx)) == ADDR_REGS))
3341 return false;
3343 return true;
3346 /* Return true if OP is a valid operand for the LA instruction.
3347 In 31-bit, we need to prove that the result is used as an
3348 address, as LA performs only a 31-bit addition. */
3350 bool
3351 legitimate_la_operand_p (rtx op)
3353 struct s390_address addr;
3354 if (!s390_decompose_address (op, &addr))
3355 return false;
3357 return (TARGET_64BIT || addr.pointer);
3360 /* Return true if it is valid *and* preferable to use LA to
3361 compute the sum of OP1 and OP2. */
3363 bool
3364 preferred_la_operand_p (rtx op1, rtx op2)
3366 struct s390_address addr;
3368 if (op2 != const0_rtx)
3369 op1 = gen_rtx_PLUS (Pmode, op1, op2);
3371 if (!s390_decompose_address (op1, &addr))
3372 return false;
3373 if (addr.base && !REGNO_OK_FOR_BASE_P (REGNO (addr.base)))
3374 return false;
3375 if (addr.indx && !REGNO_OK_FOR_INDEX_P (REGNO (addr.indx)))
3376 return false;
3378 /* Avoid LA instructions with index register on z196; it is
3379 preferable to use regular add instructions when possible.
3380 Starting with zEC12 the la with index register is "uncracked"
3381 again. */
3382 if (addr.indx && s390_tune == PROCESSOR_2817_Z196)
3383 return false;
3385 if (!TARGET_64BIT && !addr.pointer)
3386 return false;
3388 if (addr.pointer)
3389 return true;
3391 if ((addr.base && REG_P (addr.base) && REG_POINTER (addr.base))
3392 || (addr.indx && REG_P (addr.indx) && REG_POINTER (addr.indx)))
3393 return true;
3395 return false;
3398 /* Emit a forced load-address operation to load SRC into DST.
3399 This will use the LOAD ADDRESS instruction even in situations
3400 where legitimate_la_operand_p (SRC) returns false. */
3402 void
3403 s390_load_address (rtx dst, rtx src)
3405 if (TARGET_64BIT)
3406 emit_move_insn (dst, src);
3407 else
3408 emit_insn (gen_force_la_31 (dst, src));
3411 /* Return a legitimate reference for ORIG (an address) using the
3412 register REG. If REG is 0, a new pseudo is generated.
3414 There are two types of references that must be handled:
3416 1. Global data references must load the address from the GOT, via
3417 the PIC reg. An insn is emitted to do this load, and the reg is
3418 returned.
3420 2. Static data references, constant pool addresses, and code labels
3421 compute the address as an offset from the GOT, whose base is in
3422 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
3423 differentiate them from global data objects. The returned
3424 address is the PIC reg + an unspec constant.
3426 TARGET_LEGITIMIZE_ADDRESS_P rejects symbolic references unless the PIC
3427 reg also appears in the address. */
3430 legitimize_pic_address (rtx orig, rtx reg)
3432 rtx addr = orig;
3433 rtx addend = const0_rtx;
3434 rtx new_rtx = orig;
3436 gcc_assert (!TLS_SYMBOLIC_CONST (addr));
3438 if (GET_CODE (addr) == CONST)
3439 addr = XEXP (addr, 0);
3441 if (GET_CODE (addr) == PLUS)
3443 addend = XEXP (addr, 1);
3444 addr = XEXP (addr, 0);
3447 if ((GET_CODE (addr) == LABEL_REF
3448 || (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (addr))
3449 || (GET_CODE (addr) == UNSPEC &&
3450 (XINT (addr, 1) == UNSPEC_GOTENT
3451 || (TARGET_CPU_ZARCH && XINT (addr, 1) == UNSPEC_PLT))))
3452 && GET_CODE (addend) == CONST_INT)
3454 /* This can be locally addressed. */
3456 /* larl_operand requires UNSPECs to be wrapped in a const rtx. */
3457 rtx const_addr = (GET_CODE (addr) == UNSPEC ?
3458 gen_rtx_CONST (Pmode, addr) : addr);
3460 if (TARGET_CPU_ZARCH
3461 && larl_operand (const_addr, VOIDmode)
3462 && INTVAL (addend) < (HOST_WIDE_INT)1 << 31
3463 && INTVAL (addend) >= -((HOST_WIDE_INT)1 << 31))
3465 if (INTVAL (addend) & 1)
3467 /* LARL can't handle odd offsets, so emit a pair of LARL
3468 and LA. */
3469 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3471 if (!DISP_IN_RANGE (INTVAL (addend)))
3473 HOST_WIDE_INT even = INTVAL (addend) - 1;
3474 addr = gen_rtx_PLUS (Pmode, addr, GEN_INT (even));
3475 addr = gen_rtx_CONST (Pmode, addr);
3476 addend = const1_rtx;
3479 emit_move_insn (temp, addr);
3480 new_rtx = gen_rtx_PLUS (Pmode, temp, addend);
3482 if (reg != 0)
3484 s390_load_address (reg, new_rtx);
3485 new_rtx = reg;
3488 else
3490 /* If the offset is even, we can just use LARL. This
3491 will happen automatically. */
3494 else
3496 /* No larl - Access local symbols relative to the GOT. */
3498 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3500 if (reload_in_progress || reload_completed)
3501 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3503 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
3504 if (addend != const0_rtx)
3505 addr = gen_rtx_PLUS (Pmode, addr, addend);
3506 addr = gen_rtx_CONST (Pmode, addr);
3507 addr = force_const_mem (Pmode, addr);
3508 emit_move_insn (temp, addr);
3510 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3511 if (reg != 0)
3513 s390_load_address (reg, new_rtx);
3514 new_rtx = reg;
3518 else if (GET_CODE (addr) == SYMBOL_REF && addend == const0_rtx)
3520 /* A non-local symbol reference without addend.
3522 The symbol ref is wrapped into an UNSPEC to make sure the
3523 proper operand modifier (@GOT or @GOTENT) will be emitted.
3524 This will tell the linker to put the symbol into the GOT.
3526 Additionally the code dereferencing the GOT slot is emitted here.
3528 An addend to the symref needs to be added afterwards.
3529 legitimize_pic_address calls itself recursively to handle
3530 that case. So no need to do it here. */
3532 if (reg == 0)
3533 reg = gen_reg_rtx (Pmode);
3535 if (TARGET_Z10)
3537 /* Use load relative if possible.
3538 lgrl <target>, sym@GOTENT */
3539 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
3540 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3541 new_rtx = gen_const_mem (GET_MODE (reg), new_rtx);
3543 emit_move_insn (reg, new_rtx);
3544 new_rtx = reg;
3546 else if (flag_pic == 1)
3548 /* Assume GOT offset is a valid displacement operand (< 4k
3549 or < 512k with z990). This is handled the same way in
3550 both 31- and 64-bit code (@GOT).
3551 lg <target>, sym@GOT(r12) */
3553 if (reload_in_progress || reload_completed)
3554 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3556 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
3557 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3558 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
3559 new_rtx = gen_const_mem (Pmode, new_rtx);
3560 emit_move_insn (reg, new_rtx);
3561 new_rtx = reg;
3563 else if (TARGET_CPU_ZARCH)
3565 /* If the GOT offset might be >= 4k, we determine the position
3566 of the GOT entry via a PC-relative LARL (@GOTENT).
3567 larl temp, sym@GOTENT
3568 lg <target>, 0(temp) */
3570 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
3572 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
3573 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
3575 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
3576 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3577 emit_move_insn (temp, new_rtx);
3579 new_rtx = gen_const_mem (Pmode, temp);
3580 emit_move_insn (reg, new_rtx);
3582 new_rtx = reg;
3584 else
3586 /* If the GOT offset might be >= 4k, we have to load it
3587 from the literal pool (@GOT).
3589 lg temp, lit-litbase(r13)
3590 lg <target>, 0(temp)
3591 lit: .long sym@GOT */
3593 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
3595 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
3596 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
3598 if (reload_in_progress || reload_completed)
3599 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3601 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
3602 addr = gen_rtx_CONST (Pmode, addr);
3603 addr = force_const_mem (Pmode, addr);
3604 emit_move_insn (temp, addr);
3606 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3607 new_rtx = gen_const_mem (Pmode, new_rtx);
3608 emit_move_insn (reg, new_rtx);
3609 new_rtx = reg;
3612 else if (GET_CODE (addr) == UNSPEC && GET_CODE (addend) == CONST_INT)
3614 gcc_assert (XVECLEN (addr, 0) == 1);
3615 switch (XINT (addr, 1))
3617 /* These address symbols (or PLT slots) relative to the GOT
3618 (not GOT slots!). In general this will exceed the
3619 displacement range so these value belong into the literal
3620 pool. */
3621 case UNSPEC_GOTOFF:
3622 case UNSPEC_PLTOFF:
3623 new_rtx = force_const_mem (Pmode, orig);
3624 break;
3626 /* For -fPIC the GOT size might exceed the displacement
3627 range so make sure the value is in the literal pool. */
3628 case UNSPEC_GOT:
3629 if (flag_pic == 2)
3630 new_rtx = force_const_mem (Pmode, orig);
3631 break;
3633 /* For @GOTENT larl is used. This is handled like local
3634 symbol refs. */
3635 case UNSPEC_GOTENT:
3636 gcc_unreachable ();
3637 break;
3639 /* @PLT is OK as is on 64-bit, must be converted to
3640 GOT-relative @PLTOFF on 31-bit. */
3641 case UNSPEC_PLT:
3642 if (!TARGET_CPU_ZARCH)
3644 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3646 if (reload_in_progress || reload_completed)
3647 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3649 addr = XVECEXP (addr, 0, 0);
3650 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr),
3651 UNSPEC_PLTOFF);
3652 if (addend != const0_rtx)
3653 addr = gen_rtx_PLUS (Pmode, addr, addend);
3654 addr = gen_rtx_CONST (Pmode, addr);
3655 addr = force_const_mem (Pmode, addr);
3656 emit_move_insn (temp, addr);
3658 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3659 if (reg != 0)
3661 s390_load_address (reg, new_rtx);
3662 new_rtx = reg;
3665 else
3666 /* On 64 bit larl can be used. This case is handled like
3667 local symbol refs. */
3668 gcc_unreachable ();
3669 break;
3671 /* Everything else cannot happen. */
3672 default:
3673 gcc_unreachable ();
3676 else if (addend != const0_rtx)
3678 /* Otherwise, compute the sum. */
3680 rtx base = legitimize_pic_address (addr, reg);
3681 new_rtx = legitimize_pic_address (addend,
3682 base == reg ? NULL_RTX : reg);
3683 if (GET_CODE (new_rtx) == CONST_INT)
3684 new_rtx = plus_constant (Pmode, base, INTVAL (new_rtx));
3685 else
3687 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
3689 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
3690 new_rtx = XEXP (new_rtx, 1);
3692 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
3695 if (GET_CODE (new_rtx) == CONST)
3696 new_rtx = XEXP (new_rtx, 0);
3697 new_rtx = force_operand (new_rtx, 0);
3700 return new_rtx;
3703 /* Load the thread pointer into a register. */
3706 s390_get_thread_pointer (void)
3708 rtx tp = gen_reg_rtx (Pmode);
3710 emit_move_insn (tp, gen_rtx_REG (Pmode, TP_REGNUM));
3711 mark_reg_pointer (tp, BITS_PER_WORD);
3713 return tp;
3716 /* Emit a tls call insn. The call target is the SYMBOL_REF stored
3717 in s390_tls_symbol which always refers to __tls_get_offset.
3718 The returned offset is written to RESULT_REG and an USE rtx is
3719 generated for TLS_CALL. */
3721 static GTY(()) rtx s390_tls_symbol;
3723 static void
3724 s390_emit_tls_call_insn (rtx result_reg, rtx tls_call)
3726 rtx insn;
3728 if (!flag_pic)
3729 emit_insn (s390_load_got ());
3731 if (!s390_tls_symbol)
3732 s390_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_offset");
3734 insn = s390_emit_call (s390_tls_symbol, tls_call, result_reg,
3735 gen_rtx_REG (Pmode, RETURN_REGNUM));
3737 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), result_reg);
3738 RTL_CONST_CALL_P (insn) = 1;
3741 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3742 this (thread-local) address. REG may be used as temporary. */
3744 static rtx
3745 legitimize_tls_address (rtx addr, rtx reg)
3747 rtx new_rtx, tls_call, temp, base, r2, insn;
3749 if (GET_CODE (addr) == SYMBOL_REF)
3750 switch (tls_symbolic_operand (addr))
3752 case TLS_MODEL_GLOBAL_DYNAMIC:
3753 start_sequence ();
3754 r2 = gen_rtx_REG (Pmode, 2);
3755 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_TLSGD);
3756 new_rtx = gen_rtx_CONST (Pmode, tls_call);
3757 new_rtx = force_const_mem (Pmode, new_rtx);
3758 emit_move_insn (r2, new_rtx);
3759 s390_emit_tls_call_insn (r2, tls_call);
3760 insn = get_insns ();
3761 end_sequence ();
3763 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
3764 temp = gen_reg_rtx (Pmode);
3765 emit_libcall_block (insn, temp, r2, new_rtx);
3767 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3768 if (reg != 0)
3770 s390_load_address (reg, new_rtx);
3771 new_rtx = reg;
3773 break;
3775 case TLS_MODEL_LOCAL_DYNAMIC:
3776 start_sequence ();
3777 r2 = gen_rtx_REG (Pmode, 2);
3778 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM);
3779 new_rtx = gen_rtx_CONST (Pmode, tls_call);
3780 new_rtx = force_const_mem (Pmode, new_rtx);
3781 emit_move_insn (r2, new_rtx);
3782 s390_emit_tls_call_insn (r2, tls_call);
3783 insn = get_insns ();
3784 end_sequence ();
3786 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM_NTPOFF);
3787 temp = gen_reg_rtx (Pmode);
3788 emit_libcall_block (insn, temp, r2, new_rtx);
3790 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3791 base = gen_reg_rtx (Pmode);
3792 s390_load_address (base, new_rtx);
3794 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_DTPOFF);
3795 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3796 new_rtx = force_const_mem (Pmode, new_rtx);
3797 temp = gen_reg_rtx (Pmode);
3798 emit_move_insn (temp, new_rtx);
3800 new_rtx = gen_rtx_PLUS (Pmode, base, temp);
3801 if (reg != 0)
3803 s390_load_address (reg, new_rtx);
3804 new_rtx = reg;
3806 break;
3808 case TLS_MODEL_INITIAL_EXEC:
3809 if (flag_pic == 1)
3811 /* Assume GOT offset < 4k. This is handled the same way
3812 in both 31- and 64-bit code. */
3814 if (reload_in_progress || reload_completed)
3815 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3817 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
3818 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3819 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
3820 new_rtx = gen_const_mem (Pmode, new_rtx);
3821 temp = gen_reg_rtx (Pmode);
3822 emit_move_insn (temp, new_rtx);
3824 else if (TARGET_CPU_ZARCH)
3826 /* If the GOT offset might be >= 4k, we determine the position
3827 of the GOT entry via a PC-relative LARL. */
3829 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
3830 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3831 temp = gen_reg_rtx (Pmode);
3832 emit_move_insn (temp, new_rtx);
3834 new_rtx = gen_const_mem (Pmode, temp);
3835 temp = gen_reg_rtx (Pmode);
3836 emit_move_insn (temp, new_rtx);
3838 else if (flag_pic)
3840 /* If the GOT offset might be >= 4k, we have to load it
3841 from the literal pool. */
3843 if (reload_in_progress || reload_completed)
3844 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3846 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
3847 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3848 new_rtx = force_const_mem (Pmode, new_rtx);
3849 temp = gen_reg_rtx (Pmode);
3850 emit_move_insn (temp, new_rtx);
3852 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3853 new_rtx = gen_const_mem (Pmode, new_rtx);
3855 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
3856 temp = gen_reg_rtx (Pmode);
3857 emit_insn (gen_rtx_SET (Pmode, temp, new_rtx));
3859 else
3861 /* In position-dependent code, load the absolute address of
3862 the GOT entry from the literal pool. */
3864 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
3865 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3866 new_rtx = force_const_mem (Pmode, new_rtx);
3867 temp = gen_reg_rtx (Pmode);
3868 emit_move_insn (temp, new_rtx);
3870 new_rtx = temp;
3871 new_rtx = gen_const_mem (Pmode, new_rtx);
3872 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
3873 temp = gen_reg_rtx (Pmode);
3874 emit_insn (gen_rtx_SET (Pmode, temp, new_rtx));
3877 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3878 if (reg != 0)
3880 s390_load_address (reg, new_rtx);
3881 new_rtx = reg;
3883 break;
3885 case TLS_MODEL_LOCAL_EXEC:
3886 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
3887 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3888 new_rtx = force_const_mem (Pmode, new_rtx);
3889 temp = gen_reg_rtx (Pmode);
3890 emit_move_insn (temp, new_rtx);
3892 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3893 if (reg != 0)
3895 s390_load_address (reg, new_rtx);
3896 new_rtx = reg;
3898 break;
3900 default:
3901 gcc_unreachable ();
3904 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == UNSPEC)
3906 switch (XINT (XEXP (addr, 0), 1))
3908 case UNSPEC_INDNTPOFF:
3909 gcc_assert (TARGET_CPU_ZARCH);
3910 new_rtx = addr;
3911 break;
3913 default:
3914 gcc_unreachable ();
3918 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
3919 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
3921 new_rtx = XEXP (XEXP (addr, 0), 0);
3922 if (GET_CODE (new_rtx) != SYMBOL_REF)
3923 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3925 new_rtx = legitimize_tls_address (new_rtx, reg);
3926 new_rtx = plus_constant (Pmode, new_rtx,
3927 INTVAL (XEXP (XEXP (addr, 0), 1)));
3928 new_rtx = force_operand (new_rtx, 0);
3931 else
3932 gcc_unreachable (); /* for now ... */
3934 return new_rtx;
3937 /* Emit insns making the address in operands[1] valid for a standard
3938 move to operands[0]. operands[1] is replaced by an address which
3939 should be used instead of the former RTX to emit the move
3940 pattern. */
3942 void
3943 emit_symbolic_move (rtx *operands)
3945 rtx temp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (Pmode);
3947 if (GET_CODE (operands[0]) == MEM)
3948 operands[1] = force_reg (Pmode, operands[1]);
3949 else if (TLS_SYMBOLIC_CONST (operands[1]))
3950 operands[1] = legitimize_tls_address (operands[1], temp);
3951 else if (flag_pic)
3952 operands[1] = legitimize_pic_address (operands[1], temp);
3955 /* Try machine-dependent ways of modifying an illegitimate address X
3956 to be legitimate. If we find one, return the new, valid address.
3958 OLDX is the address as it was before break_out_memory_refs was called.
3959 In some cases it is useful to look at this to decide what needs to be done.
3961 MODE is the mode of the operand pointed to by X.
3963 When -fpic is used, special handling is needed for symbolic references.
3964 See comments by legitimize_pic_address for details. */
3966 static rtx
3967 s390_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
3968 enum machine_mode mode ATTRIBUTE_UNUSED)
3970 rtx constant_term = const0_rtx;
3972 if (TLS_SYMBOLIC_CONST (x))
3974 x = legitimize_tls_address (x, 0);
3976 if (s390_legitimate_address_p (mode, x, FALSE))
3977 return x;
3979 else if (GET_CODE (x) == PLUS
3980 && (TLS_SYMBOLIC_CONST (XEXP (x, 0))
3981 || TLS_SYMBOLIC_CONST (XEXP (x, 1))))
3983 return x;
3985 else if (flag_pic)
3987 if (SYMBOLIC_CONST (x)
3988 || (GET_CODE (x) == PLUS
3989 && (SYMBOLIC_CONST (XEXP (x, 0))
3990 || SYMBOLIC_CONST (XEXP (x, 1)))))
3991 x = legitimize_pic_address (x, 0);
3993 if (s390_legitimate_address_p (mode, x, FALSE))
3994 return x;
3997 x = eliminate_constant_term (x, &constant_term);
3999 /* Optimize loading of large displacements by splitting them
4000 into the multiple of 4K and the rest; this allows the
4001 former to be CSE'd if possible.
4003 Don't do this if the displacement is added to a register
4004 pointing into the stack frame, as the offsets will
4005 change later anyway. */
4007 if (GET_CODE (constant_term) == CONST_INT
4008 && !TARGET_LONG_DISPLACEMENT
4009 && !DISP_IN_RANGE (INTVAL (constant_term))
4010 && !(REG_P (x) && REGNO_PTR_FRAME_P (REGNO (x))))
4012 HOST_WIDE_INT lower = INTVAL (constant_term) & 0xfff;
4013 HOST_WIDE_INT upper = INTVAL (constant_term) ^ lower;
4015 rtx temp = gen_reg_rtx (Pmode);
4016 rtx val = force_operand (GEN_INT (upper), temp);
4017 if (val != temp)
4018 emit_move_insn (temp, val);
4020 x = gen_rtx_PLUS (Pmode, x, temp);
4021 constant_term = GEN_INT (lower);
4024 if (GET_CODE (x) == PLUS)
4026 if (GET_CODE (XEXP (x, 0)) == REG)
4028 rtx temp = gen_reg_rtx (Pmode);
4029 rtx val = force_operand (XEXP (x, 1), temp);
4030 if (val != temp)
4031 emit_move_insn (temp, val);
4033 x = gen_rtx_PLUS (Pmode, XEXP (x, 0), temp);
4036 else if (GET_CODE (XEXP (x, 1)) == REG)
4038 rtx temp = gen_reg_rtx (Pmode);
4039 rtx val = force_operand (XEXP (x, 0), temp);
4040 if (val != temp)
4041 emit_move_insn (temp, val);
4043 x = gen_rtx_PLUS (Pmode, temp, XEXP (x, 1));
4047 if (constant_term != const0_rtx)
4048 x = gen_rtx_PLUS (Pmode, x, constant_term);
4050 return x;
4053 /* Try a machine-dependent way of reloading an illegitimate address AD
4054 operand. If we find one, push the reload and return the new address.
4056 MODE is the mode of the enclosing MEM. OPNUM is the operand number
4057 and TYPE is the reload type of the current reload. */
4060 legitimize_reload_address (rtx ad, enum machine_mode mode ATTRIBUTE_UNUSED,
4061 int opnum, int type)
4063 if (!optimize || TARGET_LONG_DISPLACEMENT)
4064 return NULL_RTX;
4066 if (GET_CODE (ad) == PLUS)
4068 rtx tem = simplify_binary_operation (PLUS, Pmode,
4069 XEXP (ad, 0), XEXP (ad, 1));
4070 if (tem)
4071 ad = tem;
4074 if (GET_CODE (ad) == PLUS
4075 && GET_CODE (XEXP (ad, 0)) == REG
4076 && GET_CODE (XEXP (ad, 1)) == CONST_INT
4077 && !DISP_IN_RANGE (INTVAL (XEXP (ad, 1))))
4079 HOST_WIDE_INT lower = INTVAL (XEXP (ad, 1)) & 0xfff;
4080 HOST_WIDE_INT upper = INTVAL (XEXP (ad, 1)) ^ lower;
4081 rtx cst, tem, new_rtx;
4083 cst = GEN_INT (upper);
4084 if (!legitimate_reload_constant_p (cst))
4085 cst = force_const_mem (Pmode, cst);
4087 tem = gen_rtx_PLUS (Pmode, XEXP (ad, 0), cst);
4088 new_rtx = gen_rtx_PLUS (Pmode, tem, GEN_INT (lower));
4090 push_reload (XEXP (tem, 1), 0, &XEXP (tem, 1), 0,
4091 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
4092 opnum, (enum reload_type) type);
4093 return new_rtx;
4096 return NULL_RTX;
4099 /* Emit code to move LEN bytes from DST to SRC. */
4101 bool
4102 s390_expand_movmem (rtx dst, rtx src, rtx len)
4104 /* When tuning for z10 or higher we rely on the Glibc functions to
4105 do the right thing. Only for constant lengths below 64k we will
4106 generate inline code. */
4107 if (s390_tune >= PROCESSOR_2097_Z10
4108 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
4109 return false;
4111 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
4113 if (INTVAL (len) > 0)
4114 emit_insn (gen_movmem_short (dst, src, GEN_INT (INTVAL (len) - 1)));
4117 else if (TARGET_MVCLE)
4119 emit_insn (gen_movmem_long (dst, src, convert_to_mode (Pmode, len, 1)));
4122 else
4124 rtx dst_addr, src_addr, count, blocks, temp;
4125 rtx loop_start_label = gen_label_rtx ();
4126 rtx loop_end_label = gen_label_rtx ();
4127 rtx end_label = gen_label_rtx ();
4128 enum machine_mode mode;
4130 mode = GET_MODE (len);
4131 if (mode == VOIDmode)
4132 mode = Pmode;
4134 dst_addr = gen_reg_rtx (Pmode);
4135 src_addr = gen_reg_rtx (Pmode);
4136 count = gen_reg_rtx (mode);
4137 blocks = gen_reg_rtx (mode);
4139 convert_move (count, len, 1);
4140 emit_cmp_and_jump_insns (count, const0_rtx,
4141 EQ, NULL_RTX, mode, 1, end_label);
4143 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
4144 emit_move_insn (src_addr, force_operand (XEXP (src, 0), NULL_RTX));
4145 dst = change_address (dst, VOIDmode, dst_addr);
4146 src = change_address (src, VOIDmode, src_addr);
4148 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4149 OPTAB_DIRECT);
4150 if (temp != count)
4151 emit_move_insn (count, temp);
4153 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4154 OPTAB_DIRECT);
4155 if (temp != blocks)
4156 emit_move_insn (blocks, temp);
4158 emit_cmp_and_jump_insns (blocks, const0_rtx,
4159 EQ, NULL_RTX, mode, 1, loop_end_label);
4161 emit_label (loop_start_label);
4163 if (TARGET_Z10
4164 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 768))
4166 rtx prefetch;
4168 /* Issue a read prefetch for the +3 cache line. */
4169 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, src_addr, GEN_INT (768)),
4170 const0_rtx, const0_rtx);
4171 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4172 emit_insn (prefetch);
4174 /* Issue a write prefetch for the +3 cache line. */
4175 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (768)),
4176 const1_rtx, const0_rtx);
4177 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4178 emit_insn (prefetch);
4181 emit_insn (gen_movmem_short (dst, src, GEN_INT (255)));
4182 s390_load_address (dst_addr,
4183 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
4184 s390_load_address (src_addr,
4185 gen_rtx_PLUS (Pmode, src_addr, GEN_INT (256)));
4187 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4188 OPTAB_DIRECT);
4189 if (temp != blocks)
4190 emit_move_insn (blocks, temp);
4192 emit_cmp_and_jump_insns (blocks, const0_rtx,
4193 EQ, NULL_RTX, mode, 1, loop_end_label);
4195 emit_jump (loop_start_label);
4196 emit_label (loop_end_label);
4198 emit_insn (gen_movmem_short (dst, src,
4199 convert_to_mode (Pmode, count, 1)));
4200 emit_label (end_label);
4202 return true;
4205 /* Emit code to set LEN bytes at DST to VAL.
4206 Make use of clrmem if VAL is zero. */
4208 void
4209 s390_expand_setmem (rtx dst, rtx len, rtx val)
4211 if (GET_CODE (len) == CONST_INT && INTVAL (len) == 0)
4212 return;
4214 gcc_assert (GET_CODE (val) == CONST_INT || GET_MODE (val) == QImode);
4216 if (GET_CODE (len) == CONST_INT && INTVAL (len) > 0 && INTVAL (len) <= 257)
4218 if (val == const0_rtx && INTVAL (len) <= 256)
4219 emit_insn (gen_clrmem_short (dst, GEN_INT (INTVAL (len) - 1)));
4220 else
4222 /* Initialize memory by storing the first byte. */
4223 emit_move_insn (adjust_address (dst, QImode, 0), val);
4225 if (INTVAL (len) > 1)
4227 /* Initiate 1 byte overlap move.
4228 The first byte of DST is propagated through DSTP1.
4229 Prepare a movmem for: DST+1 = DST (length = LEN - 1).
4230 DST is set to size 1 so the rest of the memory location
4231 does not count as source operand. */
4232 rtx dstp1 = adjust_address (dst, VOIDmode, 1);
4233 set_mem_size (dst, 1);
4235 emit_insn (gen_movmem_short (dstp1, dst,
4236 GEN_INT (INTVAL (len) - 2)));
4241 else if (TARGET_MVCLE)
4243 val = force_not_mem (convert_modes (Pmode, QImode, val, 1));
4244 emit_insn (gen_setmem_long (dst, convert_to_mode (Pmode, len, 1), val));
4247 else
4249 rtx dst_addr, count, blocks, temp, dstp1 = NULL_RTX;
4250 rtx loop_start_label = gen_label_rtx ();
4251 rtx loop_end_label = gen_label_rtx ();
4252 rtx end_label = gen_label_rtx ();
4253 enum machine_mode mode;
4255 mode = GET_MODE (len);
4256 if (mode == VOIDmode)
4257 mode = Pmode;
4259 dst_addr = gen_reg_rtx (Pmode);
4260 count = gen_reg_rtx (mode);
4261 blocks = gen_reg_rtx (mode);
4263 convert_move (count, len, 1);
4264 emit_cmp_and_jump_insns (count, const0_rtx,
4265 EQ, NULL_RTX, mode, 1, end_label);
4267 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
4268 dst = change_address (dst, VOIDmode, dst_addr);
4270 if (val == const0_rtx)
4271 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4272 OPTAB_DIRECT);
4273 else
4275 dstp1 = adjust_address (dst, VOIDmode, 1);
4276 set_mem_size (dst, 1);
4278 /* Initialize memory by storing the first byte. */
4279 emit_move_insn (adjust_address (dst, QImode, 0), val);
4281 /* If count is 1 we are done. */
4282 emit_cmp_and_jump_insns (count, const1_rtx,
4283 EQ, NULL_RTX, mode, 1, end_label);
4285 temp = expand_binop (mode, add_optab, count, GEN_INT (-2), count, 1,
4286 OPTAB_DIRECT);
4288 if (temp != count)
4289 emit_move_insn (count, temp);
4291 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4292 OPTAB_DIRECT);
4293 if (temp != blocks)
4294 emit_move_insn (blocks, temp);
4296 emit_cmp_and_jump_insns (blocks, const0_rtx,
4297 EQ, NULL_RTX, mode, 1, loop_end_label);
4299 emit_label (loop_start_label);
4301 if (TARGET_Z10
4302 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 1024))
4304 /* Issue a write prefetch for the +4 cache line. */
4305 rtx prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr,
4306 GEN_INT (1024)),
4307 const1_rtx, const0_rtx);
4308 emit_insn (prefetch);
4309 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4312 if (val == const0_rtx)
4313 emit_insn (gen_clrmem_short (dst, GEN_INT (255)));
4314 else
4315 emit_insn (gen_movmem_short (dstp1, dst, GEN_INT (255)));
4316 s390_load_address (dst_addr,
4317 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
4319 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4320 OPTAB_DIRECT);
4321 if (temp != blocks)
4322 emit_move_insn (blocks, temp);
4324 emit_cmp_and_jump_insns (blocks, const0_rtx,
4325 EQ, NULL_RTX, mode, 1, loop_end_label);
4327 emit_jump (loop_start_label);
4328 emit_label (loop_end_label);
4330 if (val == const0_rtx)
4331 emit_insn (gen_clrmem_short (dst, convert_to_mode (Pmode, count, 1)));
4332 else
4333 emit_insn (gen_movmem_short (dstp1, dst, convert_to_mode (Pmode, count, 1)));
4334 emit_label (end_label);
4338 /* Emit code to compare LEN bytes at OP0 with those at OP1,
4339 and return the result in TARGET. */
4341 bool
4342 s390_expand_cmpmem (rtx target, rtx op0, rtx op1, rtx len)
4344 rtx ccreg = gen_rtx_REG (CCUmode, CC_REGNUM);
4345 rtx tmp;
4347 /* When tuning for z10 or higher we rely on the Glibc functions to
4348 do the right thing. Only for constant lengths below 64k we will
4349 generate inline code. */
4350 if (s390_tune >= PROCESSOR_2097_Z10
4351 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
4352 return false;
4354 /* As the result of CMPINT is inverted compared to what we need,
4355 we have to swap the operands. */
4356 tmp = op0; op0 = op1; op1 = tmp;
4358 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
4360 if (INTVAL (len) > 0)
4362 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (INTVAL (len) - 1)));
4363 emit_insn (gen_cmpint (target, ccreg));
4365 else
4366 emit_move_insn (target, const0_rtx);
4368 else if (TARGET_MVCLE)
4370 emit_insn (gen_cmpmem_long (op0, op1, convert_to_mode (Pmode, len, 1)));
4371 emit_insn (gen_cmpint (target, ccreg));
4373 else
4375 rtx addr0, addr1, count, blocks, temp;
4376 rtx loop_start_label = gen_label_rtx ();
4377 rtx loop_end_label = gen_label_rtx ();
4378 rtx end_label = gen_label_rtx ();
4379 enum machine_mode mode;
4381 mode = GET_MODE (len);
4382 if (mode == VOIDmode)
4383 mode = Pmode;
4385 addr0 = gen_reg_rtx (Pmode);
4386 addr1 = gen_reg_rtx (Pmode);
4387 count = gen_reg_rtx (mode);
4388 blocks = gen_reg_rtx (mode);
4390 convert_move (count, len, 1);
4391 emit_cmp_and_jump_insns (count, const0_rtx,
4392 EQ, NULL_RTX, mode, 1, end_label);
4394 emit_move_insn (addr0, force_operand (XEXP (op0, 0), NULL_RTX));
4395 emit_move_insn (addr1, force_operand (XEXP (op1, 0), NULL_RTX));
4396 op0 = change_address (op0, VOIDmode, addr0);
4397 op1 = change_address (op1, VOIDmode, addr1);
4399 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4400 OPTAB_DIRECT);
4401 if (temp != count)
4402 emit_move_insn (count, temp);
4404 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4405 OPTAB_DIRECT);
4406 if (temp != blocks)
4407 emit_move_insn (blocks, temp);
4409 emit_cmp_and_jump_insns (blocks, const0_rtx,
4410 EQ, NULL_RTX, mode, 1, loop_end_label);
4412 emit_label (loop_start_label);
4414 if (TARGET_Z10
4415 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 512))
4417 rtx prefetch;
4419 /* Issue a read prefetch for the +2 cache line of operand 1. */
4420 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr0, GEN_INT (512)),
4421 const0_rtx, const0_rtx);
4422 emit_insn (prefetch);
4423 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4425 /* Issue a read prefetch for the +2 cache line of operand 2. */
4426 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr1, GEN_INT (512)),
4427 const0_rtx, const0_rtx);
4428 emit_insn (prefetch);
4429 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4432 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (255)));
4433 temp = gen_rtx_NE (VOIDmode, ccreg, const0_rtx);
4434 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
4435 gen_rtx_LABEL_REF (VOIDmode, end_label), pc_rtx);
4436 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
4437 emit_jump_insn (temp);
4439 s390_load_address (addr0,
4440 gen_rtx_PLUS (Pmode, addr0, GEN_INT (256)));
4441 s390_load_address (addr1,
4442 gen_rtx_PLUS (Pmode, addr1, GEN_INT (256)));
4444 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4445 OPTAB_DIRECT);
4446 if (temp != blocks)
4447 emit_move_insn (blocks, temp);
4449 emit_cmp_and_jump_insns (blocks, const0_rtx,
4450 EQ, NULL_RTX, mode, 1, loop_end_label);
4452 emit_jump (loop_start_label);
4453 emit_label (loop_end_label);
4455 emit_insn (gen_cmpmem_short (op0, op1,
4456 convert_to_mode (Pmode, count, 1)));
4457 emit_label (end_label);
4459 emit_insn (gen_cmpint (target, ccreg));
4461 return true;
4465 /* Expand conditional increment or decrement using alc/slb instructions.
4466 Should generate code setting DST to either SRC or SRC + INCREMENT,
4467 depending on the result of the comparison CMP_OP0 CMP_CODE CMP_OP1.
4468 Returns true if successful, false otherwise.
4470 That makes it possible to implement some if-constructs without jumps e.g.:
4471 (borrow = CC0 | CC1 and carry = CC2 | CC3)
4472 unsigned int a, b, c;
4473 if (a < b) c++; -> CCU b > a -> CC2; c += carry;
4474 if (a < b) c--; -> CCL3 a - b -> borrow; c -= borrow;
4475 if (a <= b) c++; -> CCL3 b - a -> borrow; c += carry;
4476 if (a <= b) c--; -> CCU a <= b -> borrow; c -= borrow;
4478 Checks for EQ and NE with a nonzero value need an additional xor e.g.:
4479 if (a == b) c++; -> CCL3 a ^= b; 0 - a -> borrow; c += carry;
4480 if (a == b) c--; -> CCU a ^= b; a <= 0 -> CC0 | CC1; c -= borrow;
4481 if (a != b) c++; -> CCU a ^= b; a > 0 -> CC2; c += carry;
4482 if (a != b) c--; -> CCL3 a ^= b; 0 - a -> borrow; c -= borrow; */
4484 bool
4485 s390_expand_addcc (enum rtx_code cmp_code, rtx cmp_op0, rtx cmp_op1,
4486 rtx dst, rtx src, rtx increment)
4488 enum machine_mode cmp_mode;
4489 enum machine_mode cc_mode;
4490 rtx op_res;
4491 rtx insn;
4492 rtvec p;
4493 int ret;
4495 if ((GET_MODE (cmp_op0) == SImode || GET_MODE (cmp_op0) == VOIDmode)
4496 && (GET_MODE (cmp_op1) == SImode || GET_MODE (cmp_op1) == VOIDmode))
4497 cmp_mode = SImode;
4498 else if ((GET_MODE (cmp_op0) == DImode || GET_MODE (cmp_op0) == VOIDmode)
4499 && (GET_MODE (cmp_op1) == DImode || GET_MODE (cmp_op1) == VOIDmode))
4500 cmp_mode = DImode;
4501 else
4502 return false;
4504 /* Try ADD LOGICAL WITH CARRY. */
4505 if (increment == const1_rtx)
4507 /* Determine CC mode to use. */
4508 if (cmp_code == EQ || cmp_code == NE)
4510 if (cmp_op1 != const0_rtx)
4512 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
4513 NULL_RTX, 0, OPTAB_WIDEN);
4514 cmp_op1 = const0_rtx;
4517 cmp_code = cmp_code == EQ ? LEU : GTU;
4520 if (cmp_code == LTU || cmp_code == LEU)
4522 rtx tem = cmp_op0;
4523 cmp_op0 = cmp_op1;
4524 cmp_op1 = tem;
4525 cmp_code = swap_condition (cmp_code);
4528 switch (cmp_code)
4530 case GTU:
4531 cc_mode = CCUmode;
4532 break;
4534 case GEU:
4535 cc_mode = CCL3mode;
4536 break;
4538 default:
4539 return false;
4542 /* Emit comparison instruction pattern. */
4543 if (!register_operand (cmp_op0, cmp_mode))
4544 cmp_op0 = force_reg (cmp_mode, cmp_op0);
4546 insn = gen_rtx_SET (VOIDmode, gen_rtx_REG (cc_mode, CC_REGNUM),
4547 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
4548 /* We use insn_invalid_p here to add clobbers if required. */
4549 ret = insn_invalid_p (emit_insn (insn), false);
4550 gcc_assert (!ret);
4552 /* Emit ALC instruction pattern. */
4553 op_res = gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
4554 gen_rtx_REG (cc_mode, CC_REGNUM),
4555 const0_rtx);
4557 if (src != const0_rtx)
4559 if (!register_operand (src, GET_MODE (dst)))
4560 src = force_reg (GET_MODE (dst), src);
4562 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, src);
4563 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, const0_rtx);
4566 p = rtvec_alloc (2);
4567 RTVEC_ELT (p, 0) =
4568 gen_rtx_SET (VOIDmode, dst, op_res);
4569 RTVEC_ELT (p, 1) =
4570 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4571 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
4573 return true;
4576 /* Try SUBTRACT LOGICAL WITH BORROW. */
4577 if (increment == constm1_rtx)
4579 /* Determine CC mode to use. */
4580 if (cmp_code == EQ || cmp_code == NE)
4582 if (cmp_op1 != const0_rtx)
4584 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
4585 NULL_RTX, 0, OPTAB_WIDEN);
4586 cmp_op1 = const0_rtx;
4589 cmp_code = cmp_code == EQ ? LEU : GTU;
4592 if (cmp_code == GTU || cmp_code == GEU)
4594 rtx tem = cmp_op0;
4595 cmp_op0 = cmp_op1;
4596 cmp_op1 = tem;
4597 cmp_code = swap_condition (cmp_code);
4600 switch (cmp_code)
4602 case LEU:
4603 cc_mode = CCUmode;
4604 break;
4606 case LTU:
4607 cc_mode = CCL3mode;
4608 break;
4610 default:
4611 return false;
4614 /* Emit comparison instruction pattern. */
4615 if (!register_operand (cmp_op0, cmp_mode))
4616 cmp_op0 = force_reg (cmp_mode, cmp_op0);
4618 insn = gen_rtx_SET (VOIDmode, gen_rtx_REG (cc_mode, CC_REGNUM),
4619 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
4620 /* We use insn_invalid_p here to add clobbers if required. */
4621 ret = insn_invalid_p (emit_insn (insn), false);
4622 gcc_assert (!ret);
4624 /* Emit SLB instruction pattern. */
4625 if (!register_operand (src, GET_MODE (dst)))
4626 src = force_reg (GET_MODE (dst), src);
4628 op_res = gen_rtx_MINUS (GET_MODE (dst),
4629 gen_rtx_MINUS (GET_MODE (dst), src, const0_rtx),
4630 gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
4631 gen_rtx_REG (cc_mode, CC_REGNUM),
4632 const0_rtx));
4633 p = rtvec_alloc (2);
4634 RTVEC_ELT (p, 0) =
4635 gen_rtx_SET (VOIDmode, dst, op_res);
4636 RTVEC_ELT (p, 1) =
4637 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4638 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
4640 return true;
4643 return false;
4646 /* Expand code for the insv template. Return true if successful. */
4648 bool
4649 s390_expand_insv (rtx dest, rtx op1, rtx op2, rtx src)
4651 int bitsize = INTVAL (op1);
4652 int bitpos = INTVAL (op2);
4653 enum machine_mode mode = GET_MODE (dest);
4654 enum machine_mode smode;
4655 int smode_bsize, mode_bsize;
4656 rtx op, clobber;
4658 if (bitsize + bitpos > GET_MODE_SIZE (mode))
4659 return false;
4661 /* Generate INSERT IMMEDIATE (IILL et al). */
4662 /* (set (ze (reg)) (const_int)). */
4663 if (TARGET_ZARCH
4664 && register_operand (dest, word_mode)
4665 && (bitpos % 16) == 0
4666 && (bitsize % 16) == 0
4667 && const_int_operand (src, VOIDmode))
4669 HOST_WIDE_INT val = INTVAL (src);
4670 int regpos = bitpos + bitsize;
4672 while (regpos > bitpos)
4674 enum machine_mode putmode;
4675 int putsize;
4677 if (TARGET_EXTIMM && (regpos % 32 == 0) && (regpos >= bitpos + 32))
4678 putmode = SImode;
4679 else
4680 putmode = HImode;
4682 putsize = GET_MODE_BITSIZE (putmode);
4683 regpos -= putsize;
4684 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
4685 GEN_INT (putsize),
4686 GEN_INT (regpos)),
4687 gen_int_mode (val, putmode));
4688 val >>= putsize;
4690 gcc_assert (regpos == bitpos);
4691 return true;
4694 smode = smallest_mode_for_size (bitsize, MODE_INT);
4695 smode_bsize = GET_MODE_BITSIZE (smode);
4696 mode_bsize = GET_MODE_BITSIZE (mode);
4698 /* Generate STORE CHARACTERS UNDER MASK (STCM et al). */
4699 if (bitpos == 0
4700 && (bitsize % BITS_PER_UNIT) == 0
4701 && MEM_P (dest)
4702 && (register_operand (src, word_mode)
4703 || const_int_operand (src, VOIDmode)))
4705 /* Emit standard pattern if possible. */
4706 if (smode_bsize == bitsize)
4708 emit_move_insn (adjust_address (dest, smode, 0),
4709 gen_lowpart (smode, src));
4710 return true;
4713 /* (set (ze (mem)) (const_int)). */
4714 else if (const_int_operand (src, VOIDmode))
4716 int size = bitsize / BITS_PER_UNIT;
4717 rtx src_mem = adjust_address (force_const_mem (word_mode, src),
4718 BLKmode,
4719 UNITS_PER_WORD - size);
4721 dest = adjust_address (dest, BLKmode, 0);
4722 set_mem_size (dest, size);
4723 s390_expand_movmem (dest, src_mem, GEN_INT (size));
4724 return true;
4727 /* (set (ze (mem)) (reg)). */
4728 else if (register_operand (src, word_mode))
4730 if (bitsize <= 32)
4731 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, op1,
4732 const0_rtx), src);
4733 else
4735 /* Emit st,stcmh sequence. */
4736 int stcmh_width = bitsize - 32;
4737 int size = stcmh_width / BITS_PER_UNIT;
4739 emit_move_insn (adjust_address (dest, SImode, size),
4740 gen_lowpart (SImode, src));
4741 set_mem_size (dest, size);
4742 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
4743 GEN_INT (stcmh_width),
4744 const0_rtx),
4745 gen_rtx_LSHIFTRT (word_mode, src, GEN_INT (32)));
4747 return true;
4751 /* Generate INSERT CHARACTERS UNDER MASK (IC, ICM et al). */
4752 if ((bitpos % BITS_PER_UNIT) == 0
4753 && (bitsize % BITS_PER_UNIT) == 0
4754 && (bitpos & 32) == ((bitpos + bitsize - 1) & 32)
4755 && MEM_P (src)
4756 && (mode == DImode || mode == SImode)
4757 && register_operand (dest, mode))
4759 /* Emit a strict_low_part pattern if possible. */
4760 if (smode_bsize == bitsize && bitpos == mode_bsize - smode_bsize)
4762 op = gen_rtx_STRICT_LOW_PART (VOIDmode, gen_lowpart (smode, dest));
4763 op = gen_rtx_SET (VOIDmode, op, gen_lowpart (smode, src));
4764 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4765 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber)));
4766 return true;
4769 /* ??? There are more powerful versions of ICM that are not
4770 completely represented in the md file. */
4773 /* For z10, generate ROTATE THEN INSERT SELECTED BITS (RISBG et al). */
4774 if (TARGET_Z10 && (mode == DImode || mode == SImode))
4776 enum machine_mode mode_s = GET_MODE (src);
4778 if (mode_s == VOIDmode)
4780 /* Assume const_int etc already in the proper mode. */
4781 src = force_reg (mode, src);
4783 else if (mode_s != mode)
4785 gcc_assert (GET_MODE_BITSIZE (mode_s) >= bitsize);
4786 src = force_reg (mode_s, src);
4787 src = gen_lowpart (mode, src);
4790 op = gen_rtx_ZERO_EXTRACT (mode, dest, op1, op2),
4791 op = gen_rtx_SET (VOIDmode, op, src);
4793 if (!TARGET_ZEC12)
4795 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4796 op = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber));
4798 emit_insn (op);
4800 return true;
4803 return false;
4806 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic which returns a
4807 register that holds VAL of mode MODE shifted by COUNT bits. */
4809 static inline rtx
4810 s390_expand_mask_and_shift (rtx val, enum machine_mode mode, rtx count)
4812 val = expand_simple_binop (SImode, AND, val, GEN_INT (GET_MODE_MASK (mode)),
4813 NULL_RTX, 1, OPTAB_DIRECT);
4814 return expand_simple_binop (SImode, ASHIFT, val, count,
4815 NULL_RTX, 1, OPTAB_DIRECT);
4818 /* Structure to hold the initial parameters for a compare_and_swap operation
4819 in HImode and QImode. */
4821 struct alignment_context
4823 rtx memsi; /* SI aligned memory location. */
4824 rtx shift; /* Bit offset with regard to lsb. */
4825 rtx modemask; /* Mask of the HQImode shifted by SHIFT bits. */
4826 rtx modemaski; /* ~modemask */
4827 bool aligned; /* True if memory is aligned, false else. */
4830 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic to initialize
4831 structure AC for transparent simplifying, if the memory alignment is known
4832 to be at least 32bit. MEM is the memory location for the actual operation
4833 and MODE its mode. */
4835 static void
4836 init_alignment_context (struct alignment_context *ac, rtx mem,
4837 enum machine_mode mode)
4839 ac->shift = GEN_INT (GET_MODE_SIZE (SImode) - GET_MODE_SIZE (mode));
4840 ac->aligned = (MEM_ALIGN (mem) >= GET_MODE_BITSIZE (SImode));
4842 if (ac->aligned)
4843 ac->memsi = adjust_address (mem, SImode, 0); /* Memory is aligned. */
4844 else
4846 /* Alignment is unknown. */
4847 rtx byteoffset, addr, align;
4849 /* Force the address into a register. */
4850 addr = force_reg (Pmode, XEXP (mem, 0));
4852 /* Align it to SImode. */
4853 align = expand_simple_binop (Pmode, AND, addr,
4854 GEN_INT (-GET_MODE_SIZE (SImode)),
4855 NULL_RTX, 1, OPTAB_DIRECT);
4856 /* Generate MEM. */
4857 ac->memsi = gen_rtx_MEM (SImode, align);
4858 MEM_VOLATILE_P (ac->memsi) = MEM_VOLATILE_P (mem);
4859 set_mem_alias_set (ac->memsi, ALIAS_SET_MEMORY_BARRIER);
4860 set_mem_align (ac->memsi, GET_MODE_BITSIZE (SImode));
4862 /* Calculate shiftcount. */
4863 byteoffset = expand_simple_binop (Pmode, AND, addr,
4864 GEN_INT (GET_MODE_SIZE (SImode) - 1),
4865 NULL_RTX, 1, OPTAB_DIRECT);
4866 /* As we already have some offset, evaluate the remaining distance. */
4867 ac->shift = expand_simple_binop (SImode, MINUS, ac->shift, byteoffset,
4868 NULL_RTX, 1, OPTAB_DIRECT);
4871 /* Shift is the byte count, but we need the bitcount. */
4872 ac->shift = expand_simple_binop (SImode, ASHIFT, ac->shift, GEN_INT (3),
4873 NULL_RTX, 1, OPTAB_DIRECT);
4875 /* Calculate masks. */
4876 ac->modemask = expand_simple_binop (SImode, ASHIFT,
4877 GEN_INT (GET_MODE_MASK (mode)),
4878 ac->shift, NULL_RTX, 1, OPTAB_DIRECT);
4879 ac->modemaski = expand_simple_unop (SImode, NOT, ac->modemask,
4880 NULL_RTX, 1);
4883 /* A subroutine of s390_expand_cs_hqi. Insert INS into VAL. If possible,
4884 use a single insv insn into SEQ2. Otherwise, put prep insns in SEQ1 and
4885 perform the merge in SEQ2. */
4887 static rtx
4888 s390_two_part_insv (struct alignment_context *ac, rtx *seq1, rtx *seq2,
4889 enum machine_mode mode, rtx val, rtx ins)
4891 rtx tmp;
4893 if (ac->aligned)
4895 start_sequence ();
4896 tmp = copy_to_mode_reg (SImode, val);
4897 if (s390_expand_insv (tmp, GEN_INT (GET_MODE_BITSIZE (mode)),
4898 const0_rtx, ins))
4900 *seq1 = NULL;
4901 *seq2 = get_insns ();
4902 end_sequence ();
4903 return tmp;
4905 end_sequence ();
4908 /* Failed to use insv. Generate a two part shift and mask. */
4909 start_sequence ();
4910 tmp = s390_expand_mask_and_shift (ins, mode, ac->shift);
4911 *seq1 = get_insns ();
4912 end_sequence ();
4914 start_sequence ();
4915 tmp = expand_simple_binop (SImode, IOR, tmp, val, NULL_RTX, 1, OPTAB_DIRECT);
4916 *seq2 = get_insns ();
4917 end_sequence ();
4919 return tmp;
4922 /* Expand an atomic compare and swap operation for HImode and QImode. MEM is
4923 the memory location, CMP the old value to compare MEM with and NEW_RTX the
4924 value to set if CMP == MEM. */
4926 void
4927 s390_expand_cs_hqi (enum machine_mode mode, rtx btarget, rtx vtarget, rtx mem,
4928 rtx cmp, rtx new_rtx, bool is_weak)
4930 struct alignment_context ac;
4931 rtx cmpv, newv, val, cc, seq0, seq1, seq2, seq3;
4932 rtx res = gen_reg_rtx (SImode);
4933 rtx csloop = NULL, csend = NULL;
4935 gcc_assert (MEM_P (mem));
4937 init_alignment_context (&ac, mem, mode);
4939 /* Load full word. Subsequent loads are performed by CS. */
4940 val = expand_simple_binop (SImode, AND, ac.memsi, ac.modemaski,
4941 NULL_RTX, 1, OPTAB_DIRECT);
4943 /* Prepare insertions of cmp and new_rtx into the loaded value. When
4944 possible, we try to use insv to make this happen efficiently. If
4945 that fails we'll generate code both inside and outside the loop. */
4946 cmpv = s390_two_part_insv (&ac, &seq0, &seq2, mode, val, cmp);
4947 newv = s390_two_part_insv (&ac, &seq1, &seq3, mode, val, new_rtx);
4949 if (seq0)
4950 emit_insn (seq0);
4951 if (seq1)
4952 emit_insn (seq1);
4954 /* Start CS loop. */
4955 if (!is_weak)
4957 /* Begin assuming success. */
4958 emit_move_insn (btarget, const1_rtx);
4960 csloop = gen_label_rtx ();
4961 csend = gen_label_rtx ();
4962 emit_label (csloop);
4965 /* val = "<mem>00..0<mem>"
4966 * cmp = "00..0<cmp>00..0"
4967 * new = "00..0<new>00..0"
4970 emit_insn (seq2);
4971 emit_insn (seq3);
4973 cc = s390_emit_compare_and_swap (EQ, res, ac.memsi, cmpv, newv);
4974 if (is_weak)
4975 emit_insn (gen_cstorecc4 (btarget, cc, XEXP (cc, 0), XEXP (cc, 1)));
4976 else
4978 rtx tmp;
4980 /* Jump to end if we're done (likely?). */
4981 s390_emit_jump (csend, cc);
4983 /* Check for changes outside mode, and loop internal if so.
4984 Arrange the moves so that the compare is adjacent to the
4985 branch so that we can generate CRJ. */
4986 tmp = copy_to_reg (val);
4987 force_expand_binop (SImode, and_optab, res, ac.modemaski, val,
4988 1, OPTAB_DIRECT);
4989 cc = s390_emit_compare (NE, val, tmp);
4990 s390_emit_jump (csloop, cc);
4992 /* Failed. */
4993 emit_move_insn (btarget, const0_rtx);
4994 emit_label (csend);
4997 /* Return the correct part of the bitfield. */
4998 convert_move (vtarget, expand_simple_binop (SImode, LSHIFTRT, res, ac.shift,
4999 NULL_RTX, 1, OPTAB_DIRECT), 1);
5002 /* Expand an atomic operation CODE of mode MODE. MEM is the memory location
5003 and VAL the value to play with. If AFTER is true then store the value
5004 MEM holds after the operation, if AFTER is false then store the value MEM
5005 holds before the operation. If TARGET is zero then discard that value, else
5006 store it to TARGET. */
5008 void
5009 s390_expand_atomic (enum machine_mode mode, enum rtx_code code,
5010 rtx target, rtx mem, rtx val, bool after)
5012 struct alignment_context ac;
5013 rtx cmp;
5014 rtx new_rtx = gen_reg_rtx (SImode);
5015 rtx orig = gen_reg_rtx (SImode);
5016 rtx csloop = gen_label_rtx ();
5018 gcc_assert (!target || register_operand (target, VOIDmode));
5019 gcc_assert (MEM_P (mem));
5021 init_alignment_context (&ac, mem, mode);
5023 /* Shift val to the correct bit positions.
5024 Preserve "icm", but prevent "ex icm". */
5025 if (!(ac.aligned && code == SET && MEM_P (val)))
5026 val = s390_expand_mask_and_shift (val, mode, ac.shift);
5028 /* Further preparation insns. */
5029 if (code == PLUS || code == MINUS)
5030 emit_move_insn (orig, val);
5031 else if (code == MULT || code == AND) /* val = "11..1<val>11..1" */
5032 val = expand_simple_binop (SImode, XOR, val, ac.modemaski,
5033 NULL_RTX, 1, OPTAB_DIRECT);
5035 /* Load full word. Subsequent loads are performed by CS. */
5036 cmp = force_reg (SImode, ac.memsi);
5038 /* Start CS loop. */
5039 emit_label (csloop);
5040 emit_move_insn (new_rtx, cmp);
5042 /* Patch new with val at correct position. */
5043 switch (code)
5045 case PLUS:
5046 case MINUS:
5047 val = expand_simple_binop (SImode, code, new_rtx, orig,
5048 NULL_RTX, 1, OPTAB_DIRECT);
5049 val = expand_simple_binop (SImode, AND, val, ac.modemask,
5050 NULL_RTX, 1, OPTAB_DIRECT);
5051 /* FALLTHRU */
5052 case SET:
5053 if (ac.aligned && MEM_P (val))
5054 store_bit_field (new_rtx, GET_MODE_BITSIZE (mode), 0,
5055 0, 0, SImode, val);
5056 else
5058 new_rtx = expand_simple_binop (SImode, AND, new_rtx, ac.modemaski,
5059 NULL_RTX, 1, OPTAB_DIRECT);
5060 new_rtx = expand_simple_binop (SImode, IOR, new_rtx, val,
5061 NULL_RTX, 1, OPTAB_DIRECT);
5063 break;
5064 case AND:
5065 case IOR:
5066 case XOR:
5067 new_rtx = expand_simple_binop (SImode, code, new_rtx, val,
5068 NULL_RTX, 1, OPTAB_DIRECT);
5069 break;
5070 case MULT: /* NAND */
5071 new_rtx = expand_simple_binop (SImode, AND, new_rtx, val,
5072 NULL_RTX, 1, OPTAB_DIRECT);
5073 new_rtx = expand_simple_binop (SImode, XOR, new_rtx, ac.modemask,
5074 NULL_RTX, 1, OPTAB_DIRECT);
5075 break;
5076 default:
5077 gcc_unreachable ();
5080 s390_emit_jump (csloop, s390_emit_compare_and_swap (NE, cmp,
5081 ac.memsi, cmp, new_rtx));
5083 /* Return the correct part of the bitfield. */
5084 if (target)
5085 convert_move (target, expand_simple_binop (SImode, LSHIFTRT,
5086 after ? new_rtx : cmp, ac.shift,
5087 NULL_RTX, 1, OPTAB_DIRECT), 1);
5090 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
5091 We need to emit DTP-relative relocations. */
5093 static void s390_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
5095 static void
5096 s390_output_dwarf_dtprel (FILE *file, int size, rtx x)
5098 switch (size)
5100 case 4:
5101 fputs ("\t.long\t", file);
5102 break;
5103 case 8:
5104 fputs ("\t.quad\t", file);
5105 break;
5106 default:
5107 gcc_unreachable ();
5109 output_addr_const (file, x);
5110 fputs ("@DTPOFF", file);
5113 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
5114 /* Implement TARGET_MANGLE_TYPE. */
5116 static const char *
5117 s390_mangle_type (const_tree type)
5119 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
5120 && TARGET_LONG_DOUBLE_128)
5121 return "g";
5123 /* For all other types, use normal C++ mangling. */
5124 return NULL;
5126 #endif
5128 /* In the name of slightly smaller debug output, and to cater to
5129 general assembler lossage, recognize various UNSPEC sequences
5130 and turn them back into a direct symbol reference. */
5132 static rtx
5133 s390_delegitimize_address (rtx orig_x)
5135 rtx x, y;
5137 orig_x = delegitimize_mem_from_attrs (orig_x);
5138 x = orig_x;
5140 /* Extract the symbol ref from:
5141 (plus:SI (reg:SI 12 %r12)
5142 (const:SI (unspec:SI [(symbol_ref/f:SI ("*.LC0"))]
5143 UNSPEC_GOTOFF/PLTOFF)))
5145 (plus:SI (reg:SI 12 %r12)
5146 (const:SI (plus:SI (unspec:SI [(symbol_ref:SI ("L"))]
5147 UNSPEC_GOTOFF/PLTOFF)
5148 (const_int 4 [0x4])))) */
5149 if (GET_CODE (x) == PLUS
5150 && REG_P (XEXP (x, 0))
5151 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM
5152 && GET_CODE (XEXP (x, 1)) == CONST)
5154 HOST_WIDE_INT offset = 0;
5156 /* The const operand. */
5157 y = XEXP (XEXP (x, 1), 0);
5159 if (GET_CODE (y) == PLUS
5160 && GET_CODE (XEXP (y, 1)) == CONST_INT)
5162 offset = INTVAL (XEXP (y, 1));
5163 y = XEXP (y, 0);
5166 if (GET_CODE (y) == UNSPEC
5167 && (XINT (y, 1) == UNSPEC_GOTOFF
5168 || XINT (y, 1) == UNSPEC_PLTOFF))
5169 return plus_constant (Pmode, XVECEXP (y, 0, 0), offset);
5172 if (GET_CODE (x) != MEM)
5173 return orig_x;
5175 x = XEXP (x, 0);
5176 if (GET_CODE (x) == PLUS
5177 && GET_CODE (XEXP (x, 1)) == CONST
5178 && GET_CODE (XEXP (x, 0)) == REG
5179 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
5181 y = XEXP (XEXP (x, 1), 0);
5182 if (GET_CODE (y) == UNSPEC
5183 && XINT (y, 1) == UNSPEC_GOT)
5184 y = XVECEXP (y, 0, 0);
5185 else
5186 return orig_x;
5188 else if (GET_CODE (x) == CONST)
5190 /* Extract the symbol ref from:
5191 (mem:QI (const:DI (unspec:DI [(symbol_ref:DI ("foo"))]
5192 UNSPEC_PLT/GOTENT))) */
5194 y = XEXP (x, 0);
5195 if (GET_CODE (y) == UNSPEC
5196 && (XINT (y, 1) == UNSPEC_GOTENT
5197 || XINT (y, 1) == UNSPEC_PLT))
5198 y = XVECEXP (y, 0, 0);
5199 else
5200 return orig_x;
5202 else
5203 return orig_x;
5205 if (GET_MODE (orig_x) != Pmode)
5207 if (GET_MODE (orig_x) == BLKmode)
5208 return orig_x;
5209 y = lowpart_subreg (GET_MODE (orig_x), y, Pmode);
5210 if (y == NULL_RTX)
5211 return orig_x;
5213 return y;
5216 /* Output operand OP to stdio stream FILE.
5217 OP is an address (register + offset) which is not used to address data;
5218 instead the rightmost bits are interpreted as the value. */
5220 static void
5221 print_shift_count_operand (FILE *file, rtx op)
5223 HOST_WIDE_INT offset;
5224 rtx base;
5226 /* Extract base register and offset. */
5227 if (!s390_decompose_shift_count (op, &base, &offset))
5228 gcc_unreachable ();
5230 /* Sanity check. */
5231 if (base)
5233 gcc_assert (GET_CODE (base) == REG);
5234 gcc_assert (REGNO (base) < FIRST_PSEUDO_REGISTER);
5235 gcc_assert (REGNO_REG_CLASS (REGNO (base)) == ADDR_REGS);
5238 /* Offsets are constricted to twelve bits. */
5239 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset & ((1 << 12) - 1));
5240 if (base)
5241 fprintf (file, "(%s)", reg_names[REGNO (base)]);
5244 /* See 'get_some_local_dynamic_name'. */
5246 static int
5247 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
5249 rtx x = *px;
5251 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
5253 x = get_pool_constant (x);
5254 return for_each_rtx (&x, get_some_local_dynamic_name_1, 0);
5257 if (GET_CODE (x) == SYMBOL_REF
5258 && tls_symbolic_operand (x) == TLS_MODEL_LOCAL_DYNAMIC)
5260 cfun->machine->some_ld_name = XSTR (x, 0);
5261 return 1;
5264 return 0;
5267 /* Locate some local-dynamic symbol still in use by this function
5268 so that we can print its name in local-dynamic base patterns. */
5270 static const char *
5271 get_some_local_dynamic_name (void)
5273 rtx insn;
5275 if (cfun->machine->some_ld_name)
5276 return cfun->machine->some_ld_name;
5278 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
5279 if (INSN_P (insn)
5280 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
5281 return cfun->machine->some_ld_name;
5283 gcc_unreachable ();
5286 /* Output machine-dependent UNSPECs occurring in address constant X
5287 in assembler syntax to stdio stream FILE. Returns true if the
5288 constant X could be recognized, false otherwise. */
5290 static bool
5291 s390_output_addr_const_extra (FILE *file, rtx x)
5293 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 1)
5294 switch (XINT (x, 1))
5296 case UNSPEC_GOTENT:
5297 output_addr_const (file, XVECEXP (x, 0, 0));
5298 fprintf (file, "@GOTENT");
5299 return true;
5300 case UNSPEC_GOT:
5301 output_addr_const (file, XVECEXP (x, 0, 0));
5302 fprintf (file, "@GOT");
5303 return true;
5304 case UNSPEC_GOTOFF:
5305 output_addr_const (file, XVECEXP (x, 0, 0));
5306 fprintf (file, "@GOTOFF");
5307 return true;
5308 case UNSPEC_PLT:
5309 output_addr_const (file, XVECEXP (x, 0, 0));
5310 fprintf (file, "@PLT");
5311 return true;
5312 case UNSPEC_PLTOFF:
5313 output_addr_const (file, XVECEXP (x, 0, 0));
5314 fprintf (file, "@PLTOFF");
5315 return true;
5316 case UNSPEC_TLSGD:
5317 output_addr_const (file, XVECEXP (x, 0, 0));
5318 fprintf (file, "@TLSGD");
5319 return true;
5320 case UNSPEC_TLSLDM:
5321 assemble_name (file, get_some_local_dynamic_name ());
5322 fprintf (file, "@TLSLDM");
5323 return true;
5324 case UNSPEC_DTPOFF:
5325 output_addr_const (file, XVECEXP (x, 0, 0));
5326 fprintf (file, "@DTPOFF");
5327 return true;
5328 case UNSPEC_NTPOFF:
5329 output_addr_const (file, XVECEXP (x, 0, 0));
5330 fprintf (file, "@NTPOFF");
5331 return true;
5332 case UNSPEC_GOTNTPOFF:
5333 output_addr_const (file, XVECEXP (x, 0, 0));
5334 fprintf (file, "@GOTNTPOFF");
5335 return true;
5336 case UNSPEC_INDNTPOFF:
5337 output_addr_const (file, XVECEXP (x, 0, 0));
5338 fprintf (file, "@INDNTPOFF");
5339 return true;
5342 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 2)
5343 switch (XINT (x, 1))
5345 case UNSPEC_POOL_OFFSET:
5346 x = gen_rtx_MINUS (GET_MODE (x), XVECEXP (x, 0, 0), XVECEXP (x, 0, 1));
5347 output_addr_const (file, x);
5348 return true;
5350 return false;
5353 /* Output address operand ADDR in assembler syntax to
5354 stdio stream FILE. */
5356 void
5357 print_operand_address (FILE *file, rtx addr)
5359 struct s390_address ad;
5361 if (s390_loadrelative_operand_p (addr, NULL, NULL))
5363 if (!TARGET_Z10)
5365 output_operand_lossage ("symbolic memory references are "
5366 "only supported on z10 or later");
5367 return;
5369 output_addr_const (file, addr);
5370 return;
5373 if (!s390_decompose_address (addr, &ad)
5374 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5375 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
5376 output_operand_lossage ("cannot decompose address");
5378 if (ad.disp)
5379 output_addr_const (file, ad.disp);
5380 else
5381 fprintf (file, "0");
5383 if (ad.base && ad.indx)
5384 fprintf (file, "(%s,%s)", reg_names[REGNO (ad.indx)],
5385 reg_names[REGNO (ad.base)]);
5386 else if (ad.base)
5387 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
5390 /* Output operand X in assembler syntax to stdio stream FILE.
5391 CODE specified the format flag. The following format flags
5392 are recognized:
5394 'C': print opcode suffix for branch condition.
5395 'D': print opcode suffix for inverse branch condition.
5396 'E': print opcode suffix for branch on index instruction.
5397 'G': print the size of the operand in bytes.
5398 'J': print tls_load/tls_gdcall/tls_ldcall suffix
5399 'M': print the second word of a TImode operand.
5400 'N': print the second word of a DImode operand.
5401 'O': print only the displacement of a memory reference.
5402 'R': print only the base register of a memory reference.
5403 'S': print S-type memory reference (base+displacement).
5404 'Y': print shift count operand.
5406 'b': print integer X as if it's an unsigned byte.
5407 'c': print integer X as if it's an signed byte.
5408 'e': "end" of DImode contiguous bitmask X.
5409 'f': "end" of SImode contiguous bitmask X.
5410 'h': print integer X as if it's a signed halfword.
5411 'i': print the first nonzero HImode part of X.
5412 'j': print the first HImode part unequal to -1 of X.
5413 'k': print the first nonzero SImode part of X.
5414 'm': print the first SImode part unequal to -1 of X.
5415 'o': print integer X as if it's an unsigned 32bit word.
5416 's': "start" of DImode contiguous bitmask X.
5417 't': "start" of SImode contiguous bitmask X.
5418 'x': print integer X as if it's an unsigned halfword.
5421 void
5422 print_operand (FILE *file, rtx x, int code)
5424 HOST_WIDE_INT ival;
5426 switch (code)
5428 case 'C':
5429 fprintf (file, s390_branch_condition_mnemonic (x, FALSE));
5430 return;
5432 case 'D':
5433 fprintf (file, s390_branch_condition_mnemonic (x, TRUE));
5434 return;
5436 case 'E':
5437 if (GET_CODE (x) == LE)
5438 fprintf (file, "l");
5439 else if (GET_CODE (x) == GT)
5440 fprintf (file, "h");
5441 else
5442 output_operand_lossage ("invalid comparison operator "
5443 "for 'E' output modifier");
5444 return;
5446 case 'J':
5447 if (GET_CODE (x) == SYMBOL_REF)
5449 fprintf (file, "%s", ":tls_load:");
5450 output_addr_const (file, x);
5452 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
5454 fprintf (file, "%s", ":tls_gdcall:");
5455 output_addr_const (file, XVECEXP (x, 0, 0));
5457 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM)
5459 fprintf (file, "%s", ":tls_ldcall:");
5460 assemble_name (file, get_some_local_dynamic_name ());
5462 else
5463 output_operand_lossage ("invalid reference for 'J' output modifier");
5464 return;
5466 case 'G':
5467 fprintf (file, "%u", GET_MODE_SIZE (GET_MODE (x)));
5468 return;
5470 case 'O':
5472 struct s390_address ad;
5473 int ret;
5475 if (!MEM_P (x))
5477 output_operand_lossage ("memory reference expected for "
5478 "'O' output modifier");
5479 return;
5482 ret = s390_decompose_address (XEXP (x, 0), &ad);
5484 if (!ret
5485 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5486 || ad.indx)
5488 output_operand_lossage ("invalid address for 'O' output modifier");
5489 return;
5492 if (ad.disp)
5493 output_addr_const (file, ad.disp);
5494 else
5495 fprintf (file, "0");
5497 return;
5499 case 'R':
5501 struct s390_address ad;
5502 int ret;
5504 if (!MEM_P (x))
5506 output_operand_lossage ("memory reference expected for "
5507 "'R' output modifier");
5508 return;
5511 ret = s390_decompose_address (XEXP (x, 0), &ad);
5513 if (!ret
5514 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5515 || ad.indx)
5517 output_operand_lossage ("invalid address for 'R' output modifier");
5518 return;
5521 if (ad.base)
5522 fprintf (file, "%s", reg_names[REGNO (ad.base)]);
5523 else
5524 fprintf (file, "0");
5526 return;
5528 case 'S':
5530 struct s390_address ad;
5531 int ret;
5533 if (!MEM_P (x))
5535 output_operand_lossage ("memory reference expected for "
5536 "'S' output modifier");
5537 return;
5539 ret = s390_decompose_address (XEXP (x, 0), &ad);
5541 if (!ret
5542 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5543 || ad.indx)
5545 output_operand_lossage ("invalid address for 'S' output modifier");
5546 return;
5549 if (ad.disp)
5550 output_addr_const (file, ad.disp);
5551 else
5552 fprintf (file, "0");
5554 if (ad.base)
5555 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
5557 return;
5559 case 'N':
5560 if (GET_CODE (x) == REG)
5561 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
5562 else if (GET_CODE (x) == MEM)
5563 x = change_address (x, VOIDmode,
5564 plus_constant (Pmode, XEXP (x, 0), 4));
5565 else
5566 output_operand_lossage ("register or memory expression expected "
5567 "for 'N' output modifier");
5568 break;
5570 case 'M':
5571 if (GET_CODE (x) == REG)
5572 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
5573 else if (GET_CODE (x) == MEM)
5574 x = change_address (x, VOIDmode,
5575 plus_constant (Pmode, XEXP (x, 0), 8));
5576 else
5577 output_operand_lossage ("register or memory expression expected "
5578 "for 'M' output modifier");
5579 break;
5581 case 'Y':
5582 print_shift_count_operand (file, x);
5583 return;
5586 switch (GET_CODE (x))
5588 case REG:
5589 fprintf (file, "%s", reg_names[REGNO (x)]);
5590 break;
5592 case MEM:
5593 output_address (XEXP (x, 0));
5594 break;
5596 case CONST:
5597 case CODE_LABEL:
5598 case LABEL_REF:
5599 case SYMBOL_REF:
5600 output_addr_const (file, x);
5601 break;
5603 case CONST_INT:
5604 ival = INTVAL (x);
5605 switch (code)
5607 case 0:
5608 break;
5609 case 'b':
5610 ival &= 0xff;
5611 break;
5612 case 'c':
5613 ival = ((ival & 0xff) ^ 0x80) - 0x80;
5614 break;
5615 case 'x':
5616 ival &= 0xffff;
5617 break;
5618 case 'h':
5619 ival = ((ival & 0xffff) ^ 0x8000) - 0x8000;
5620 break;
5621 case 'i':
5622 ival = s390_extract_part (x, HImode, 0);
5623 break;
5624 case 'j':
5625 ival = s390_extract_part (x, HImode, -1);
5626 break;
5627 case 'k':
5628 ival = s390_extract_part (x, SImode, 0);
5629 break;
5630 case 'm':
5631 ival = s390_extract_part (x, SImode, -1);
5632 break;
5633 case 'o':
5634 ival &= 0xffffffff;
5635 break;
5636 case 'e': case 'f':
5637 case 's': case 't':
5639 int pos, len;
5640 bool ok;
5642 len = (code == 's' || code == 'e' ? 64 : 32);
5643 ok = s390_contiguous_bitmask_p (ival, len, &pos, &len);
5644 gcc_assert (ok);
5645 if (code == 's' || code == 't')
5646 ival = 64 - pos - len;
5647 else
5648 ival = 64 - 1 - pos;
5650 break;
5651 default:
5652 output_operand_lossage ("invalid constant for output modifier '%c'", code);
5654 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
5655 break;
5657 case CONST_DOUBLE:
5658 gcc_assert (GET_MODE (x) == VOIDmode);
5659 if (code == 'b')
5660 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xff);
5661 else if (code == 'x')
5662 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xffff);
5663 else if (code == 'h')
5664 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5665 ((CONST_DOUBLE_LOW (x) & 0xffff) ^ 0x8000) - 0x8000);
5666 else
5668 if (code == 0)
5669 output_operand_lossage ("invalid constant - try using "
5670 "an output modifier");
5671 else
5672 output_operand_lossage ("invalid constant for output modifier '%c'",
5673 code);
5675 break;
5677 default:
5678 if (code == 0)
5679 output_operand_lossage ("invalid expression - try using "
5680 "an output modifier");
5681 else
5682 output_operand_lossage ("invalid expression for output "
5683 "modifier '%c'", code);
5684 break;
5688 /* Target hook for assembling integer objects. We need to define it
5689 here to work a round a bug in some versions of GAS, which couldn't
5690 handle values smaller than INT_MIN when printed in decimal. */
5692 static bool
5693 s390_assemble_integer (rtx x, unsigned int size, int aligned_p)
5695 if (size == 8 && aligned_p
5696 && GET_CODE (x) == CONST_INT && INTVAL (x) < INT_MIN)
5698 fprintf (asm_out_file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n",
5699 INTVAL (x));
5700 return true;
5702 return default_assemble_integer (x, size, aligned_p);
5705 /* Returns true if register REGNO is used for forming
5706 a memory address in expression X. */
5708 static bool
5709 reg_used_in_mem_p (int regno, rtx x)
5711 enum rtx_code code = GET_CODE (x);
5712 int i, j;
5713 const char *fmt;
5715 if (code == MEM)
5717 if (refers_to_regno_p (regno, regno+1,
5718 XEXP (x, 0), 0))
5719 return true;
5721 else if (code == SET
5722 && GET_CODE (SET_DEST (x)) == PC)
5724 if (refers_to_regno_p (regno, regno+1,
5725 SET_SRC (x), 0))
5726 return true;
5729 fmt = GET_RTX_FORMAT (code);
5730 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
5732 if (fmt[i] == 'e'
5733 && reg_used_in_mem_p (regno, XEXP (x, i)))
5734 return true;
5736 else if (fmt[i] == 'E')
5737 for (j = 0; j < XVECLEN (x, i); j++)
5738 if (reg_used_in_mem_p (regno, XVECEXP (x, i, j)))
5739 return true;
5741 return false;
5744 /* Returns true if expression DEP_RTX sets an address register
5745 used by instruction INSN to address memory. */
5747 static bool
5748 addr_generation_dependency_p (rtx dep_rtx, rtx insn)
5750 rtx target, pat;
5752 if (NONJUMP_INSN_P (dep_rtx))
5753 dep_rtx = PATTERN (dep_rtx);
5755 if (GET_CODE (dep_rtx) == SET)
5757 target = SET_DEST (dep_rtx);
5758 if (GET_CODE (target) == STRICT_LOW_PART)
5759 target = XEXP (target, 0);
5760 while (GET_CODE (target) == SUBREG)
5761 target = SUBREG_REG (target);
5763 if (GET_CODE (target) == REG)
5765 int regno = REGNO (target);
5767 if (s390_safe_attr_type (insn) == TYPE_LA)
5769 pat = PATTERN (insn);
5770 if (GET_CODE (pat) == PARALLEL)
5772 gcc_assert (XVECLEN (pat, 0) == 2);
5773 pat = XVECEXP (pat, 0, 0);
5775 gcc_assert (GET_CODE (pat) == SET);
5776 return refers_to_regno_p (regno, regno+1, SET_SRC (pat), 0);
5778 else if (get_attr_atype (insn) == ATYPE_AGEN)
5779 return reg_used_in_mem_p (regno, PATTERN (insn));
5782 return false;
5785 /* Return 1, if dep_insn sets register used in insn in the agen unit. */
5788 s390_agen_dep_p (rtx dep_insn, rtx insn)
5790 rtx dep_rtx = PATTERN (dep_insn);
5791 int i;
5793 if (GET_CODE (dep_rtx) == SET
5794 && addr_generation_dependency_p (dep_rtx, insn))
5795 return 1;
5796 else if (GET_CODE (dep_rtx) == PARALLEL)
5798 for (i = 0; i < XVECLEN (dep_rtx, 0); i++)
5800 if (addr_generation_dependency_p (XVECEXP (dep_rtx, 0, i), insn))
5801 return 1;
5804 return 0;
5808 /* A C statement (sans semicolon) to update the integer scheduling priority
5809 INSN_PRIORITY (INSN). Increase the priority to execute the INSN earlier,
5810 reduce the priority to execute INSN later. Do not define this macro if
5811 you do not need to adjust the scheduling priorities of insns.
5813 A STD instruction should be scheduled earlier,
5814 in order to use the bypass. */
5815 static int
5816 s390_adjust_priority (rtx insn ATTRIBUTE_UNUSED, int priority)
5818 if (! INSN_P (insn))
5819 return priority;
5821 if (s390_tune != PROCESSOR_2084_Z990
5822 && s390_tune != PROCESSOR_2094_Z9_109
5823 && s390_tune != PROCESSOR_2097_Z10
5824 && s390_tune != PROCESSOR_2817_Z196
5825 && s390_tune != PROCESSOR_2827_ZEC12)
5826 return priority;
5828 switch (s390_safe_attr_type (insn))
5830 case TYPE_FSTOREDF:
5831 case TYPE_FSTORESF:
5832 priority = priority << 3;
5833 break;
5834 case TYPE_STORE:
5835 case TYPE_STM:
5836 priority = priority << 1;
5837 break;
5838 default:
5839 break;
5841 return priority;
5845 /* The number of instructions that can be issued per cycle. */
5847 static int
5848 s390_issue_rate (void)
5850 switch (s390_tune)
5852 case PROCESSOR_2084_Z990:
5853 case PROCESSOR_2094_Z9_109:
5854 case PROCESSOR_2817_Z196:
5855 return 3;
5856 case PROCESSOR_2097_Z10:
5857 case PROCESSOR_2827_ZEC12:
5858 return 2;
5859 default:
5860 return 1;
5864 static int
5865 s390_first_cycle_multipass_dfa_lookahead (void)
5867 return 4;
5870 /* Annotate every literal pool reference in X by an UNSPEC_LTREF expression.
5871 Fix up MEMs as required. */
5873 static void
5874 annotate_constant_pool_refs (rtx *x)
5876 int i, j;
5877 const char *fmt;
5879 gcc_assert (GET_CODE (*x) != SYMBOL_REF
5880 || !CONSTANT_POOL_ADDRESS_P (*x));
5882 /* Literal pool references can only occur inside a MEM ... */
5883 if (GET_CODE (*x) == MEM)
5885 rtx memref = XEXP (*x, 0);
5887 if (GET_CODE (memref) == SYMBOL_REF
5888 && CONSTANT_POOL_ADDRESS_P (memref))
5890 rtx base = cfun->machine->base_reg;
5891 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, memref, base),
5892 UNSPEC_LTREF);
5894 *x = replace_equiv_address (*x, addr);
5895 return;
5898 if (GET_CODE (memref) == CONST
5899 && GET_CODE (XEXP (memref, 0)) == PLUS
5900 && GET_CODE (XEXP (XEXP (memref, 0), 1)) == CONST_INT
5901 && GET_CODE (XEXP (XEXP (memref, 0), 0)) == SYMBOL_REF
5902 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (memref, 0), 0)))
5904 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (memref, 0), 1));
5905 rtx sym = XEXP (XEXP (memref, 0), 0);
5906 rtx base = cfun->machine->base_reg;
5907 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
5908 UNSPEC_LTREF);
5910 *x = replace_equiv_address (*x, plus_constant (Pmode, addr, off));
5911 return;
5915 /* ... or a load-address type pattern. */
5916 if (GET_CODE (*x) == SET)
5918 rtx addrref = SET_SRC (*x);
5920 if (GET_CODE (addrref) == SYMBOL_REF
5921 && CONSTANT_POOL_ADDRESS_P (addrref))
5923 rtx base = cfun->machine->base_reg;
5924 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addrref, base),
5925 UNSPEC_LTREF);
5927 SET_SRC (*x) = addr;
5928 return;
5931 if (GET_CODE (addrref) == CONST
5932 && GET_CODE (XEXP (addrref, 0)) == PLUS
5933 && GET_CODE (XEXP (XEXP (addrref, 0), 1)) == CONST_INT
5934 && GET_CODE (XEXP (XEXP (addrref, 0), 0)) == SYMBOL_REF
5935 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (addrref, 0), 0)))
5937 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (addrref, 0), 1));
5938 rtx sym = XEXP (XEXP (addrref, 0), 0);
5939 rtx base = cfun->machine->base_reg;
5940 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
5941 UNSPEC_LTREF);
5943 SET_SRC (*x) = plus_constant (Pmode, addr, off);
5944 return;
5948 /* Annotate LTREL_BASE as well. */
5949 if (GET_CODE (*x) == UNSPEC
5950 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
5952 rtx base = cfun->machine->base_reg;
5953 *x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XVECEXP (*x, 0, 0), base),
5954 UNSPEC_LTREL_BASE);
5955 return;
5958 fmt = GET_RTX_FORMAT (GET_CODE (*x));
5959 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
5961 if (fmt[i] == 'e')
5963 annotate_constant_pool_refs (&XEXP (*x, i));
5965 else if (fmt[i] == 'E')
5967 for (j = 0; j < XVECLEN (*x, i); j++)
5968 annotate_constant_pool_refs (&XVECEXP (*x, i, j));
5973 /* Split all branches that exceed the maximum distance.
5974 Returns true if this created a new literal pool entry. */
5976 static int
5977 s390_split_branches (void)
5979 rtx temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
5980 int new_literal = 0, ret;
5981 rtx insn, pat, tmp, target;
5982 rtx *label;
5984 /* We need correct insn addresses. */
5986 shorten_branches (get_insns ());
5988 /* Find all branches that exceed 64KB, and split them. */
5990 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5992 if (! JUMP_P (insn))
5993 continue;
5995 pat = PATTERN (insn);
5996 if (GET_CODE (pat) == PARALLEL && XVECLEN (pat, 0) > 2)
5997 pat = XVECEXP (pat, 0, 0);
5998 if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx)
5999 continue;
6001 if (GET_CODE (SET_SRC (pat)) == LABEL_REF)
6003 label = &SET_SRC (pat);
6005 else if (GET_CODE (SET_SRC (pat)) == IF_THEN_ELSE)
6007 if (GET_CODE (XEXP (SET_SRC (pat), 1)) == LABEL_REF)
6008 label = &XEXP (SET_SRC (pat), 1);
6009 else if (GET_CODE (XEXP (SET_SRC (pat), 2)) == LABEL_REF)
6010 label = &XEXP (SET_SRC (pat), 2);
6011 else
6012 continue;
6014 else
6015 continue;
6017 if (get_attr_length (insn) <= 4)
6018 continue;
6020 /* We are going to use the return register as scratch register,
6021 make sure it will be saved/restored by the prologue/epilogue. */
6022 cfun_frame_layout.save_return_addr_p = 1;
6024 if (!flag_pic)
6026 new_literal = 1;
6027 tmp = force_const_mem (Pmode, *label);
6028 tmp = emit_insn_before (gen_rtx_SET (Pmode, temp_reg, tmp), insn);
6029 INSN_ADDRESSES_NEW (tmp, -1);
6030 annotate_constant_pool_refs (&PATTERN (tmp));
6032 target = temp_reg;
6034 else
6036 new_literal = 1;
6037 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, *label),
6038 UNSPEC_LTREL_OFFSET);
6039 target = gen_rtx_CONST (Pmode, target);
6040 target = force_const_mem (Pmode, target);
6041 tmp = emit_insn_before (gen_rtx_SET (Pmode, temp_reg, target), insn);
6042 INSN_ADDRESSES_NEW (tmp, -1);
6043 annotate_constant_pool_refs (&PATTERN (tmp));
6045 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XEXP (target, 0),
6046 cfun->machine->base_reg),
6047 UNSPEC_LTREL_BASE);
6048 target = gen_rtx_PLUS (Pmode, temp_reg, target);
6051 ret = validate_change (insn, label, target, 0);
6052 gcc_assert (ret);
6055 return new_literal;
6059 /* Find an annotated literal pool symbol referenced in RTX X,
6060 and store it at REF. Will abort if X contains references to
6061 more than one such pool symbol; multiple references to the same
6062 symbol are allowed, however.
6064 The rtx pointed to by REF must be initialized to NULL_RTX
6065 by the caller before calling this routine. */
6067 static void
6068 find_constant_pool_ref (rtx x, rtx *ref)
6070 int i, j;
6071 const char *fmt;
6073 /* Ignore LTREL_BASE references. */
6074 if (GET_CODE (x) == UNSPEC
6075 && XINT (x, 1) == UNSPEC_LTREL_BASE)
6076 return;
6077 /* Likewise POOL_ENTRY insns. */
6078 if (GET_CODE (x) == UNSPEC_VOLATILE
6079 && XINT (x, 1) == UNSPECV_POOL_ENTRY)
6080 return;
6082 gcc_assert (GET_CODE (x) != SYMBOL_REF
6083 || !CONSTANT_POOL_ADDRESS_P (x));
6085 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_LTREF)
6087 rtx sym = XVECEXP (x, 0, 0);
6088 gcc_assert (GET_CODE (sym) == SYMBOL_REF
6089 && CONSTANT_POOL_ADDRESS_P (sym));
6091 if (*ref == NULL_RTX)
6092 *ref = sym;
6093 else
6094 gcc_assert (*ref == sym);
6096 return;
6099 fmt = GET_RTX_FORMAT (GET_CODE (x));
6100 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6102 if (fmt[i] == 'e')
6104 find_constant_pool_ref (XEXP (x, i), ref);
6106 else if (fmt[i] == 'E')
6108 for (j = 0; j < XVECLEN (x, i); j++)
6109 find_constant_pool_ref (XVECEXP (x, i, j), ref);
6114 /* Replace every reference to the annotated literal pool
6115 symbol REF in X by its base plus OFFSET. */
6117 static void
6118 replace_constant_pool_ref (rtx *x, rtx ref, rtx offset)
6120 int i, j;
6121 const char *fmt;
6123 gcc_assert (*x != ref);
6125 if (GET_CODE (*x) == UNSPEC
6126 && XINT (*x, 1) == UNSPEC_LTREF
6127 && XVECEXP (*x, 0, 0) == ref)
6129 *x = gen_rtx_PLUS (Pmode, XVECEXP (*x, 0, 1), offset);
6130 return;
6133 if (GET_CODE (*x) == PLUS
6134 && GET_CODE (XEXP (*x, 1)) == CONST_INT
6135 && GET_CODE (XEXP (*x, 0)) == UNSPEC
6136 && XINT (XEXP (*x, 0), 1) == UNSPEC_LTREF
6137 && XVECEXP (XEXP (*x, 0), 0, 0) == ref)
6139 rtx addr = gen_rtx_PLUS (Pmode, XVECEXP (XEXP (*x, 0), 0, 1), offset);
6140 *x = plus_constant (Pmode, addr, INTVAL (XEXP (*x, 1)));
6141 return;
6144 fmt = GET_RTX_FORMAT (GET_CODE (*x));
6145 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
6147 if (fmt[i] == 'e')
6149 replace_constant_pool_ref (&XEXP (*x, i), ref, offset);
6151 else if (fmt[i] == 'E')
6153 for (j = 0; j < XVECLEN (*x, i); j++)
6154 replace_constant_pool_ref (&XVECEXP (*x, i, j), ref, offset);
6159 /* Check whether X contains an UNSPEC_LTREL_BASE.
6160 Return its constant pool symbol if found, NULL_RTX otherwise. */
6162 static rtx
6163 find_ltrel_base (rtx x)
6165 int i, j;
6166 const char *fmt;
6168 if (GET_CODE (x) == UNSPEC
6169 && XINT (x, 1) == UNSPEC_LTREL_BASE)
6170 return XVECEXP (x, 0, 0);
6172 fmt = GET_RTX_FORMAT (GET_CODE (x));
6173 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6175 if (fmt[i] == 'e')
6177 rtx fnd = find_ltrel_base (XEXP (x, i));
6178 if (fnd)
6179 return fnd;
6181 else if (fmt[i] == 'E')
6183 for (j = 0; j < XVECLEN (x, i); j++)
6185 rtx fnd = find_ltrel_base (XVECEXP (x, i, j));
6186 if (fnd)
6187 return fnd;
6192 return NULL_RTX;
6195 /* Replace any occurrence of UNSPEC_LTREL_BASE in X with its base. */
6197 static void
6198 replace_ltrel_base (rtx *x)
6200 int i, j;
6201 const char *fmt;
6203 if (GET_CODE (*x) == UNSPEC
6204 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
6206 *x = XVECEXP (*x, 0, 1);
6207 return;
6210 fmt = GET_RTX_FORMAT (GET_CODE (*x));
6211 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
6213 if (fmt[i] == 'e')
6215 replace_ltrel_base (&XEXP (*x, i));
6217 else if (fmt[i] == 'E')
6219 for (j = 0; j < XVECLEN (*x, i); j++)
6220 replace_ltrel_base (&XVECEXP (*x, i, j));
6226 /* We keep a list of constants which we have to add to internal
6227 constant tables in the middle of large functions. */
6229 #define NR_C_MODES 11
6230 enum machine_mode constant_modes[NR_C_MODES] =
6232 TFmode, TImode, TDmode,
6233 DFmode, DImode, DDmode,
6234 SFmode, SImode, SDmode,
6235 HImode,
6236 QImode
6239 struct constant
6241 struct constant *next;
6242 rtx value;
6243 rtx label;
6246 struct constant_pool
6248 struct constant_pool *next;
6249 rtx first_insn;
6250 rtx pool_insn;
6251 bitmap insns;
6252 rtx emit_pool_after;
6254 struct constant *constants[NR_C_MODES];
6255 struct constant *execute;
6256 rtx label;
6257 int size;
6260 /* Allocate new constant_pool structure. */
6262 static struct constant_pool *
6263 s390_alloc_pool (void)
6265 struct constant_pool *pool;
6266 int i;
6268 pool = (struct constant_pool *) xmalloc (sizeof *pool);
6269 pool->next = NULL;
6270 for (i = 0; i < NR_C_MODES; i++)
6271 pool->constants[i] = NULL;
6273 pool->execute = NULL;
6274 pool->label = gen_label_rtx ();
6275 pool->first_insn = NULL_RTX;
6276 pool->pool_insn = NULL_RTX;
6277 pool->insns = BITMAP_ALLOC (NULL);
6278 pool->size = 0;
6279 pool->emit_pool_after = NULL_RTX;
6281 return pool;
6284 /* Create new constant pool covering instructions starting at INSN
6285 and chain it to the end of POOL_LIST. */
6287 static struct constant_pool *
6288 s390_start_pool (struct constant_pool **pool_list, rtx insn)
6290 struct constant_pool *pool, **prev;
6292 pool = s390_alloc_pool ();
6293 pool->first_insn = insn;
6295 for (prev = pool_list; *prev; prev = &(*prev)->next)
6297 *prev = pool;
6299 return pool;
6302 /* End range of instructions covered by POOL at INSN and emit
6303 placeholder insn representing the pool. */
6305 static void
6306 s390_end_pool (struct constant_pool *pool, rtx insn)
6308 rtx pool_size = GEN_INT (pool->size + 8 /* alignment slop */);
6310 if (!insn)
6311 insn = get_last_insn ();
6313 pool->pool_insn = emit_insn_after (gen_pool (pool_size), insn);
6314 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6317 /* Add INSN to the list of insns covered by POOL. */
6319 static void
6320 s390_add_pool_insn (struct constant_pool *pool, rtx insn)
6322 bitmap_set_bit (pool->insns, INSN_UID (insn));
6325 /* Return pool out of POOL_LIST that covers INSN. */
6327 static struct constant_pool *
6328 s390_find_pool (struct constant_pool *pool_list, rtx insn)
6330 struct constant_pool *pool;
6332 for (pool = pool_list; pool; pool = pool->next)
6333 if (bitmap_bit_p (pool->insns, INSN_UID (insn)))
6334 break;
6336 return pool;
6339 /* Add constant VAL of mode MODE to the constant pool POOL. */
6341 static void
6342 s390_add_constant (struct constant_pool *pool, rtx val, enum machine_mode mode)
6344 struct constant *c;
6345 int i;
6347 for (i = 0; i < NR_C_MODES; i++)
6348 if (constant_modes[i] == mode)
6349 break;
6350 gcc_assert (i != NR_C_MODES);
6352 for (c = pool->constants[i]; c != NULL; c = c->next)
6353 if (rtx_equal_p (val, c->value))
6354 break;
6356 if (c == NULL)
6358 c = (struct constant *) xmalloc (sizeof *c);
6359 c->value = val;
6360 c->label = gen_label_rtx ();
6361 c->next = pool->constants[i];
6362 pool->constants[i] = c;
6363 pool->size += GET_MODE_SIZE (mode);
6367 /* Return an rtx that represents the offset of X from the start of
6368 pool POOL. */
6370 static rtx
6371 s390_pool_offset (struct constant_pool *pool, rtx x)
6373 rtx label;
6375 label = gen_rtx_LABEL_REF (GET_MODE (x), pool->label);
6376 x = gen_rtx_UNSPEC (GET_MODE (x), gen_rtvec (2, x, label),
6377 UNSPEC_POOL_OFFSET);
6378 return gen_rtx_CONST (GET_MODE (x), x);
6381 /* Find constant VAL of mode MODE in the constant pool POOL.
6382 Return an RTX describing the distance from the start of
6383 the pool to the location of the new constant. */
6385 static rtx
6386 s390_find_constant (struct constant_pool *pool, rtx val,
6387 enum machine_mode mode)
6389 struct constant *c;
6390 int i;
6392 for (i = 0; i < NR_C_MODES; i++)
6393 if (constant_modes[i] == mode)
6394 break;
6395 gcc_assert (i != NR_C_MODES);
6397 for (c = pool->constants[i]; c != NULL; c = c->next)
6398 if (rtx_equal_p (val, c->value))
6399 break;
6401 gcc_assert (c);
6403 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
6406 /* Check whether INSN is an execute. Return the label_ref to its
6407 execute target template if so, NULL_RTX otherwise. */
6409 static rtx
6410 s390_execute_label (rtx insn)
6412 if (NONJUMP_INSN_P (insn)
6413 && GET_CODE (PATTERN (insn)) == PARALLEL
6414 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == UNSPEC
6415 && XINT (XVECEXP (PATTERN (insn), 0, 0), 1) == UNSPEC_EXECUTE)
6416 return XVECEXP (XVECEXP (PATTERN (insn), 0, 0), 0, 2);
6418 return NULL_RTX;
6421 /* Add execute target for INSN to the constant pool POOL. */
6423 static void
6424 s390_add_execute (struct constant_pool *pool, rtx insn)
6426 struct constant *c;
6428 for (c = pool->execute; c != NULL; c = c->next)
6429 if (INSN_UID (insn) == INSN_UID (c->value))
6430 break;
6432 if (c == NULL)
6434 c = (struct constant *) xmalloc (sizeof *c);
6435 c->value = insn;
6436 c->label = gen_label_rtx ();
6437 c->next = pool->execute;
6438 pool->execute = c;
6439 pool->size += 6;
6443 /* Find execute target for INSN in the constant pool POOL.
6444 Return an RTX describing the distance from the start of
6445 the pool to the location of the execute target. */
6447 static rtx
6448 s390_find_execute (struct constant_pool *pool, rtx insn)
6450 struct constant *c;
6452 for (c = pool->execute; c != NULL; c = c->next)
6453 if (INSN_UID (insn) == INSN_UID (c->value))
6454 break;
6456 gcc_assert (c);
6458 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
6461 /* For an execute INSN, extract the execute target template. */
6463 static rtx
6464 s390_execute_target (rtx insn)
6466 rtx pattern = PATTERN (insn);
6467 gcc_assert (s390_execute_label (insn));
6469 if (XVECLEN (pattern, 0) == 2)
6471 pattern = copy_rtx (XVECEXP (pattern, 0, 1));
6473 else
6475 rtvec vec = rtvec_alloc (XVECLEN (pattern, 0) - 1);
6476 int i;
6478 for (i = 0; i < XVECLEN (pattern, 0) - 1; i++)
6479 RTVEC_ELT (vec, i) = copy_rtx (XVECEXP (pattern, 0, i + 1));
6481 pattern = gen_rtx_PARALLEL (VOIDmode, vec);
6484 return pattern;
6487 /* Indicate that INSN cannot be duplicated. This is the case for
6488 execute insns that carry a unique label. */
6490 static bool
6491 s390_cannot_copy_insn_p (rtx insn)
6493 rtx label = s390_execute_label (insn);
6494 return label && label != const0_rtx;
6497 /* Dump out the constants in POOL. If REMOTE_LABEL is true,
6498 do not emit the pool base label. */
6500 static void
6501 s390_dump_pool (struct constant_pool *pool, bool remote_label)
6503 struct constant *c;
6504 rtx insn = pool->pool_insn;
6505 int i;
6507 /* Switch to rodata section. */
6508 if (TARGET_CPU_ZARCH)
6510 insn = emit_insn_after (gen_pool_section_start (), insn);
6511 INSN_ADDRESSES_NEW (insn, -1);
6514 /* Ensure minimum pool alignment. */
6515 if (TARGET_CPU_ZARCH)
6516 insn = emit_insn_after (gen_pool_align (GEN_INT (8)), insn);
6517 else
6518 insn = emit_insn_after (gen_pool_align (GEN_INT (4)), insn);
6519 INSN_ADDRESSES_NEW (insn, -1);
6521 /* Emit pool base label. */
6522 if (!remote_label)
6524 insn = emit_label_after (pool->label, insn);
6525 INSN_ADDRESSES_NEW (insn, -1);
6528 /* Dump constants in descending alignment requirement order,
6529 ensuring proper alignment for every constant. */
6530 for (i = 0; i < NR_C_MODES; i++)
6531 for (c = pool->constants[i]; c; c = c->next)
6533 /* Convert UNSPEC_LTREL_OFFSET unspecs to pool-relative references. */
6534 rtx value = copy_rtx (c->value);
6535 if (GET_CODE (value) == CONST
6536 && GET_CODE (XEXP (value, 0)) == UNSPEC
6537 && XINT (XEXP (value, 0), 1) == UNSPEC_LTREL_OFFSET
6538 && XVECLEN (XEXP (value, 0), 0) == 1)
6539 value = s390_pool_offset (pool, XVECEXP (XEXP (value, 0), 0, 0));
6541 insn = emit_label_after (c->label, insn);
6542 INSN_ADDRESSES_NEW (insn, -1);
6544 value = gen_rtx_UNSPEC_VOLATILE (constant_modes[i],
6545 gen_rtvec (1, value),
6546 UNSPECV_POOL_ENTRY);
6547 insn = emit_insn_after (value, insn);
6548 INSN_ADDRESSES_NEW (insn, -1);
6551 /* Ensure minimum alignment for instructions. */
6552 insn = emit_insn_after (gen_pool_align (GEN_INT (2)), insn);
6553 INSN_ADDRESSES_NEW (insn, -1);
6555 /* Output in-pool execute template insns. */
6556 for (c = pool->execute; c; c = c->next)
6558 insn = emit_label_after (c->label, insn);
6559 INSN_ADDRESSES_NEW (insn, -1);
6561 insn = emit_insn_after (s390_execute_target (c->value), insn);
6562 INSN_ADDRESSES_NEW (insn, -1);
6565 /* Switch back to previous section. */
6566 if (TARGET_CPU_ZARCH)
6568 insn = emit_insn_after (gen_pool_section_end (), insn);
6569 INSN_ADDRESSES_NEW (insn, -1);
6572 insn = emit_barrier_after (insn);
6573 INSN_ADDRESSES_NEW (insn, -1);
6575 /* Remove placeholder insn. */
6576 remove_insn (pool->pool_insn);
6579 /* Free all memory used by POOL. */
6581 static void
6582 s390_free_pool (struct constant_pool *pool)
6584 struct constant *c, *next;
6585 int i;
6587 for (i = 0; i < NR_C_MODES; i++)
6588 for (c = pool->constants[i]; c; c = next)
6590 next = c->next;
6591 free (c);
6594 for (c = pool->execute; c; c = next)
6596 next = c->next;
6597 free (c);
6600 BITMAP_FREE (pool->insns);
6601 free (pool);
6605 /* Collect main literal pool. Return NULL on overflow. */
6607 static struct constant_pool *
6608 s390_mainpool_start (void)
6610 struct constant_pool *pool;
6611 rtx insn;
6613 pool = s390_alloc_pool ();
6615 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6617 if (NONJUMP_INSN_P (insn)
6618 && GET_CODE (PATTERN (insn)) == SET
6619 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC_VOLATILE
6620 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPECV_MAIN_POOL)
6622 gcc_assert (!pool->pool_insn);
6623 pool->pool_insn = insn;
6626 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
6628 s390_add_execute (pool, insn);
6630 else if (NONJUMP_INSN_P (insn) || CALL_P (insn))
6632 rtx pool_ref = NULL_RTX;
6633 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6634 if (pool_ref)
6636 rtx constant = get_pool_constant (pool_ref);
6637 enum machine_mode mode = get_pool_mode (pool_ref);
6638 s390_add_constant (pool, constant, mode);
6642 /* If hot/cold partitioning is enabled we have to make sure that
6643 the literal pool is emitted in the same section where the
6644 initialization of the literal pool base pointer takes place.
6645 emit_pool_after is only used in the non-overflow case on non
6646 Z cpus where we can emit the literal pool at the end of the
6647 function body within the text section. */
6648 if (NOTE_P (insn)
6649 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS
6650 && !pool->emit_pool_after)
6651 pool->emit_pool_after = PREV_INSN (insn);
6654 gcc_assert (pool->pool_insn || pool->size == 0);
6656 if (pool->size >= 4096)
6658 /* We're going to chunkify the pool, so remove the main
6659 pool placeholder insn. */
6660 remove_insn (pool->pool_insn);
6662 s390_free_pool (pool);
6663 pool = NULL;
6666 /* If the functions ends with the section where the literal pool
6667 should be emitted set the marker to its end. */
6668 if (pool && !pool->emit_pool_after)
6669 pool->emit_pool_after = get_last_insn ();
6671 return pool;
6674 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6675 Modify the current function to output the pool constants as well as
6676 the pool register setup instruction. */
6678 static void
6679 s390_mainpool_finish (struct constant_pool *pool)
6681 rtx base_reg = cfun->machine->base_reg;
6682 rtx insn;
6684 /* If the pool is empty, we're done. */
6685 if (pool->size == 0)
6687 /* We don't actually need a base register after all. */
6688 cfun->machine->base_reg = NULL_RTX;
6690 if (pool->pool_insn)
6691 remove_insn (pool->pool_insn);
6692 s390_free_pool (pool);
6693 return;
6696 /* We need correct insn addresses. */
6697 shorten_branches (get_insns ());
6699 /* On zSeries, we use a LARL to load the pool register. The pool is
6700 located in the .rodata section, so we emit it after the function. */
6701 if (TARGET_CPU_ZARCH)
6703 insn = gen_main_base_64 (base_reg, pool->label);
6704 insn = emit_insn_after (insn, pool->pool_insn);
6705 INSN_ADDRESSES_NEW (insn, -1);
6706 remove_insn (pool->pool_insn);
6708 insn = get_last_insn ();
6709 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6710 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6712 s390_dump_pool (pool, 0);
6715 /* On S/390, if the total size of the function's code plus literal pool
6716 does not exceed 4096 bytes, we use BASR to set up a function base
6717 pointer, and emit the literal pool at the end of the function. */
6718 else if (INSN_ADDRESSES (INSN_UID (pool->emit_pool_after))
6719 + pool->size + 8 /* alignment slop */ < 4096)
6721 insn = gen_main_base_31_small (base_reg, pool->label);
6722 insn = emit_insn_after (insn, pool->pool_insn);
6723 INSN_ADDRESSES_NEW (insn, -1);
6724 remove_insn (pool->pool_insn);
6726 insn = emit_label_after (pool->label, insn);
6727 INSN_ADDRESSES_NEW (insn, -1);
6729 /* emit_pool_after will be set by s390_mainpool_start to the
6730 last insn of the section where the literal pool should be
6731 emitted. */
6732 insn = pool->emit_pool_after;
6734 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6735 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6737 s390_dump_pool (pool, 1);
6740 /* Otherwise, we emit an inline literal pool and use BASR to branch
6741 over it, setting up the pool register at the same time. */
6742 else
6744 rtx pool_end = gen_label_rtx ();
6746 insn = gen_main_base_31_large (base_reg, pool->label, pool_end);
6747 insn = emit_jump_insn_after (insn, pool->pool_insn);
6748 JUMP_LABEL (insn) = pool_end;
6749 INSN_ADDRESSES_NEW (insn, -1);
6750 remove_insn (pool->pool_insn);
6752 insn = emit_label_after (pool->label, insn);
6753 INSN_ADDRESSES_NEW (insn, -1);
6755 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6756 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6758 insn = emit_label_after (pool_end, pool->pool_insn);
6759 INSN_ADDRESSES_NEW (insn, -1);
6761 s390_dump_pool (pool, 1);
6765 /* Replace all literal pool references. */
6767 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6769 if (INSN_P (insn))
6770 replace_ltrel_base (&PATTERN (insn));
6772 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
6774 rtx addr, pool_ref = NULL_RTX;
6775 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6776 if (pool_ref)
6778 if (s390_execute_label (insn))
6779 addr = s390_find_execute (pool, insn);
6780 else
6781 addr = s390_find_constant (pool, get_pool_constant (pool_ref),
6782 get_pool_mode (pool_ref));
6784 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
6785 INSN_CODE (insn) = -1;
6791 /* Free the pool. */
6792 s390_free_pool (pool);
6795 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6796 We have decided we cannot use this pool, so revert all changes
6797 to the current function that were done by s390_mainpool_start. */
6798 static void
6799 s390_mainpool_cancel (struct constant_pool *pool)
6801 /* We didn't actually change the instruction stream, so simply
6802 free the pool memory. */
6803 s390_free_pool (pool);
6807 /* Chunkify the literal pool. */
6809 #define S390_POOL_CHUNK_MIN 0xc00
6810 #define S390_POOL_CHUNK_MAX 0xe00
6812 static struct constant_pool *
6813 s390_chunkify_start (void)
6815 struct constant_pool *curr_pool = NULL, *pool_list = NULL;
6816 int extra_size = 0;
6817 bitmap far_labels;
6818 rtx pending_ltrel = NULL_RTX;
6819 rtx insn;
6821 rtx (*gen_reload_base) (rtx, rtx) =
6822 TARGET_CPU_ZARCH? gen_reload_base_64 : gen_reload_base_31;
6825 /* We need correct insn addresses. */
6827 shorten_branches (get_insns ());
6829 /* Scan all insns and move literals to pool chunks. */
6831 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6833 bool section_switch_p = false;
6835 /* Check for pending LTREL_BASE. */
6836 if (INSN_P (insn))
6838 rtx ltrel_base = find_ltrel_base (PATTERN (insn));
6839 if (ltrel_base)
6841 gcc_assert (ltrel_base == pending_ltrel);
6842 pending_ltrel = NULL_RTX;
6846 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
6848 if (!curr_pool)
6849 curr_pool = s390_start_pool (&pool_list, insn);
6851 s390_add_execute (curr_pool, insn);
6852 s390_add_pool_insn (curr_pool, insn);
6854 else if (NONJUMP_INSN_P (insn) || CALL_P (insn))
6856 rtx pool_ref = NULL_RTX;
6857 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6858 if (pool_ref)
6860 rtx constant = get_pool_constant (pool_ref);
6861 enum machine_mode mode = get_pool_mode (pool_ref);
6863 if (!curr_pool)
6864 curr_pool = s390_start_pool (&pool_list, insn);
6866 s390_add_constant (curr_pool, constant, mode);
6867 s390_add_pool_insn (curr_pool, insn);
6869 /* Don't split the pool chunk between a LTREL_OFFSET load
6870 and the corresponding LTREL_BASE. */
6871 if (GET_CODE (constant) == CONST
6872 && GET_CODE (XEXP (constant, 0)) == UNSPEC
6873 && XINT (XEXP (constant, 0), 1) == UNSPEC_LTREL_OFFSET)
6875 gcc_assert (!pending_ltrel);
6876 pending_ltrel = pool_ref;
6881 if (JUMP_P (insn) || JUMP_TABLE_DATA_P (insn) || LABEL_P (insn))
6883 if (curr_pool)
6884 s390_add_pool_insn (curr_pool, insn);
6885 /* An LTREL_BASE must follow within the same basic block. */
6886 gcc_assert (!pending_ltrel);
6889 if (NOTE_P (insn))
6890 switch (NOTE_KIND (insn))
6892 case NOTE_INSN_SWITCH_TEXT_SECTIONS:
6893 section_switch_p = true;
6894 break;
6895 case NOTE_INSN_VAR_LOCATION:
6896 case NOTE_INSN_CALL_ARG_LOCATION:
6897 continue;
6898 default:
6899 break;
6902 if (!curr_pool
6903 || INSN_ADDRESSES_SIZE () <= (size_t) INSN_UID (insn)
6904 || INSN_ADDRESSES (INSN_UID (insn)) == -1)
6905 continue;
6907 if (TARGET_CPU_ZARCH)
6909 if (curr_pool->size < S390_POOL_CHUNK_MAX)
6910 continue;
6912 s390_end_pool (curr_pool, NULL_RTX);
6913 curr_pool = NULL;
6915 else
6917 int chunk_size = INSN_ADDRESSES (INSN_UID (insn))
6918 - INSN_ADDRESSES (INSN_UID (curr_pool->first_insn))
6919 + extra_size;
6921 /* We will later have to insert base register reload insns.
6922 Those will have an effect on code size, which we need to
6923 consider here. This calculation makes rather pessimistic
6924 worst-case assumptions. */
6925 if (LABEL_P (insn))
6926 extra_size += 6;
6928 if (chunk_size < S390_POOL_CHUNK_MIN
6929 && curr_pool->size < S390_POOL_CHUNK_MIN
6930 && !section_switch_p)
6931 continue;
6933 /* Pool chunks can only be inserted after BARRIERs ... */
6934 if (BARRIER_P (insn))
6936 s390_end_pool (curr_pool, insn);
6937 curr_pool = NULL;
6938 extra_size = 0;
6941 /* ... so if we don't find one in time, create one. */
6942 else if (chunk_size > S390_POOL_CHUNK_MAX
6943 || curr_pool->size > S390_POOL_CHUNK_MAX
6944 || section_switch_p)
6946 rtx label, jump, barrier, next, prev;
6948 if (!section_switch_p)
6950 /* We can insert the barrier only after a 'real' insn. */
6951 if (! NONJUMP_INSN_P (insn) && ! CALL_P (insn))
6952 continue;
6953 if (get_attr_length (insn) == 0)
6954 continue;
6955 /* Don't separate LTREL_BASE from the corresponding
6956 LTREL_OFFSET load. */
6957 if (pending_ltrel)
6958 continue;
6959 next = insn;
6962 insn = next;
6963 next = NEXT_INSN (insn);
6965 while (next
6966 && NOTE_P (next)
6967 && (NOTE_KIND (next) == NOTE_INSN_VAR_LOCATION
6968 || NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION));
6970 else
6972 gcc_assert (!pending_ltrel);
6974 /* The old pool has to end before the section switch
6975 note in order to make it part of the current
6976 section. */
6977 insn = PREV_INSN (insn);
6980 label = gen_label_rtx ();
6981 prev = insn;
6982 if (prev && NOTE_P (prev))
6983 prev = prev_nonnote_insn (prev);
6984 if (prev)
6985 jump = emit_jump_insn_after_setloc (gen_jump (label), insn,
6986 INSN_LOCATION (prev));
6987 else
6988 jump = emit_jump_insn_after_noloc (gen_jump (label), insn);
6989 barrier = emit_barrier_after (jump);
6990 insn = emit_label_after (label, barrier);
6991 JUMP_LABEL (jump) = label;
6992 LABEL_NUSES (label) = 1;
6994 INSN_ADDRESSES_NEW (jump, -1);
6995 INSN_ADDRESSES_NEW (barrier, -1);
6996 INSN_ADDRESSES_NEW (insn, -1);
6998 s390_end_pool (curr_pool, barrier);
6999 curr_pool = NULL;
7000 extra_size = 0;
7005 if (curr_pool)
7006 s390_end_pool (curr_pool, NULL_RTX);
7007 gcc_assert (!pending_ltrel);
7009 /* Find all labels that are branched into
7010 from an insn belonging to a different chunk. */
7012 far_labels = BITMAP_ALLOC (NULL);
7014 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7016 /* Labels marked with LABEL_PRESERVE_P can be target
7017 of non-local jumps, so we have to mark them.
7018 The same holds for named labels.
7020 Don't do that, however, if it is the label before
7021 a jump table. */
7023 if (LABEL_P (insn)
7024 && (LABEL_PRESERVE_P (insn) || LABEL_NAME (insn)))
7026 rtx vec_insn = next_real_insn (insn);
7027 if (! vec_insn || ! JUMP_TABLE_DATA_P (vec_insn))
7028 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (insn));
7031 /* If we have a direct jump (conditional or unconditional)
7032 or a casesi jump, check all potential targets. */
7033 else if (JUMP_P (insn))
7035 rtx pat = PATTERN (insn);
7036 if (GET_CODE (pat) == PARALLEL && XVECLEN (pat, 0) > 2)
7037 pat = XVECEXP (pat, 0, 0);
7039 if (GET_CODE (pat) == SET)
7041 rtx label = JUMP_LABEL (insn);
7042 if (label)
7044 if (s390_find_pool (pool_list, label)
7045 != s390_find_pool (pool_list, insn))
7046 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
7049 else if (GET_CODE (pat) == PARALLEL
7050 && XVECLEN (pat, 0) == 2
7051 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
7052 && GET_CODE (XVECEXP (pat, 0, 1)) == USE
7053 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == LABEL_REF)
7055 /* Find the jump table used by this casesi jump. */
7056 rtx vec_label = XEXP (XEXP (XVECEXP (pat, 0, 1), 0), 0);
7057 rtx vec_insn = next_real_insn (vec_label);
7058 if (vec_insn && JUMP_TABLE_DATA_P (vec_insn))
7060 rtx vec_pat = PATTERN (vec_insn);
7061 int i, diff_p = GET_CODE (vec_pat) == ADDR_DIFF_VEC;
7063 for (i = 0; i < XVECLEN (vec_pat, diff_p); i++)
7065 rtx label = XEXP (XVECEXP (vec_pat, diff_p, i), 0);
7067 if (s390_find_pool (pool_list, label)
7068 != s390_find_pool (pool_list, insn))
7069 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
7076 /* Insert base register reload insns before every pool. */
7078 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
7080 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
7081 curr_pool->label);
7082 rtx insn = curr_pool->first_insn;
7083 INSN_ADDRESSES_NEW (emit_insn_before (new_insn, insn), -1);
7086 /* Insert base register reload insns at every far label. */
7088 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7089 if (LABEL_P (insn)
7090 && bitmap_bit_p (far_labels, CODE_LABEL_NUMBER (insn)))
7092 struct constant_pool *pool = s390_find_pool (pool_list, insn);
7093 if (pool)
7095 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
7096 pool->label);
7097 INSN_ADDRESSES_NEW (emit_insn_after (new_insn, insn), -1);
7102 BITMAP_FREE (far_labels);
7105 /* Recompute insn addresses. */
7107 init_insn_lengths ();
7108 shorten_branches (get_insns ());
7110 return pool_list;
7113 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
7114 After we have decided to use this list, finish implementing
7115 all changes to the current function as required. */
7117 static void
7118 s390_chunkify_finish (struct constant_pool *pool_list)
7120 struct constant_pool *curr_pool = NULL;
7121 rtx insn;
7124 /* Replace all literal pool references. */
7126 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7128 if (INSN_P (insn))
7129 replace_ltrel_base (&PATTERN (insn));
7131 curr_pool = s390_find_pool (pool_list, insn);
7132 if (!curr_pool)
7133 continue;
7135 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
7137 rtx addr, pool_ref = NULL_RTX;
7138 find_constant_pool_ref (PATTERN (insn), &pool_ref);
7139 if (pool_ref)
7141 if (s390_execute_label (insn))
7142 addr = s390_find_execute (curr_pool, insn);
7143 else
7144 addr = s390_find_constant (curr_pool,
7145 get_pool_constant (pool_ref),
7146 get_pool_mode (pool_ref));
7148 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
7149 INSN_CODE (insn) = -1;
7154 /* Dump out all literal pools. */
7156 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
7157 s390_dump_pool (curr_pool, 0);
7159 /* Free pool list. */
7161 while (pool_list)
7163 struct constant_pool *next = pool_list->next;
7164 s390_free_pool (pool_list);
7165 pool_list = next;
7169 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
7170 We have decided we cannot use this list, so revert all changes
7171 to the current function that were done by s390_chunkify_start. */
7173 static void
7174 s390_chunkify_cancel (struct constant_pool *pool_list)
7176 struct constant_pool *curr_pool = NULL;
7177 rtx insn;
7179 /* Remove all pool placeholder insns. */
7181 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
7183 /* Did we insert an extra barrier? Remove it. */
7184 rtx barrier = PREV_INSN (curr_pool->pool_insn);
7185 rtx jump = barrier? PREV_INSN (barrier) : NULL_RTX;
7186 rtx label = NEXT_INSN (curr_pool->pool_insn);
7188 if (jump && JUMP_P (jump)
7189 && barrier && BARRIER_P (barrier)
7190 && label && LABEL_P (label)
7191 && GET_CODE (PATTERN (jump)) == SET
7192 && SET_DEST (PATTERN (jump)) == pc_rtx
7193 && GET_CODE (SET_SRC (PATTERN (jump))) == LABEL_REF
7194 && XEXP (SET_SRC (PATTERN (jump)), 0) == label)
7196 remove_insn (jump);
7197 remove_insn (barrier);
7198 remove_insn (label);
7201 remove_insn (curr_pool->pool_insn);
7204 /* Remove all base register reload insns. */
7206 for (insn = get_insns (); insn; )
7208 rtx next_insn = NEXT_INSN (insn);
7210 if (NONJUMP_INSN_P (insn)
7211 && GET_CODE (PATTERN (insn)) == SET
7212 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
7213 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_RELOAD_BASE)
7214 remove_insn (insn);
7216 insn = next_insn;
7219 /* Free pool list. */
7221 while (pool_list)
7223 struct constant_pool *next = pool_list->next;
7224 s390_free_pool (pool_list);
7225 pool_list = next;
7229 /* Output the constant pool entry EXP in mode MODE with alignment ALIGN. */
7231 void
7232 s390_output_pool_entry (rtx exp, enum machine_mode mode, unsigned int align)
7234 REAL_VALUE_TYPE r;
7236 switch (GET_MODE_CLASS (mode))
7238 case MODE_FLOAT:
7239 case MODE_DECIMAL_FLOAT:
7240 gcc_assert (GET_CODE (exp) == CONST_DOUBLE);
7242 REAL_VALUE_FROM_CONST_DOUBLE (r, exp);
7243 assemble_real (r, mode, align);
7244 break;
7246 case MODE_INT:
7247 assemble_integer (exp, GET_MODE_SIZE (mode), align, 1);
7248 mark_symbol_refs_as_used (exp);
7249 break;
7251 default:
7252 gcc_unreachable ();
7257 /* Return an RTL expression representing the value of the return address
7258 for the frame COUNT steps up from the current frame. FRAME is the
7259 frame pointer of that frame. */
7262 s390_return_addr_rtx (int count, rtx frame ATTRIBUTE_UNUSED)
7264 int offset;
7265 rtx addr;
7267 /* Without backchain, we fail for all but the current frame. */
7269 if (!TARGET_BACKCHAIN && count > 0)
7270 return NULL_RTX;
7272 /* For the current frame, we need to make sure the initial
7273 value of RETURN_REGNUM is actually saved. */
7275 if (count == 0)
7277 /* On non-z architectures branch splitting could overwrite r14. */
7278 if (TARGET_CPU_ZARCH)
7279 return get_hard_reg_initial_val (Pmode, RETURN_REGNUM);
7280 else
7282 cfun_frame_layout.save_return_addr_p = true;
7283 return gen_rtx_MEM (Pmode, return_address_pointer_rtx);
7287 if (TARGET_PACKED_STACK)
7288 offset = -2 * UNITS_PER_LONG;
7289 else
7290 offset = RETURN_REGNUM * UNITS_PER_LONG;
7292 addr = plus_constant (Pmode, frame, offset);
7293 addr = memory_address (Pmode, addr);
7294 return gen_rtx_MEM (Pmode, addr);
7297 /* Return an RTL expression representing the back chain stored in
7298 the current stack frame. */
7301 s390_back_chain_rtx (void)
7303 rtx chain;
7305 gcc_assert (TARGET_BACKCHAIN);
7307 if (TARGET_PACKED_STACK)
7308 chain = plus_constant (Pmode, stack_pointer_rtx,
7309 STACK_POINTER_OFFSET - UNITS_PER_LONG);
7310 else
7311 chain = stack_pointer_rtx;
7313 chain = gen_rtx_MEM (Pmode, chain);
7314 return chain;
7317 /* Find first call clobbered register unused in a function.
7318 This could be used as base register in a leaf function
7319 or for holding the return address before epilogue. */
7321 static int
7322 find_unused_clobbered_reg (void)
7324 int i;
7325 for (i = 0; i < 6; i++)
7326 if (!df_regs_ever_live_p (i))
7327 return i;
7328 return 0;
7332 /* Helper function for s390_regs_ever_clobbered. Sets the fields in DATA for all
7333 clobbered hard regs in SETREG. */
7335 static void
7336 s390_reg_clobbered_rtx (rtx setreg, const_rtx set_insn ATTRIBUTE_UNUSED, void *data)
7338 int *regs_ever_clobbered = (int *)data;
7339 unsigned int i, regno;
7340 enum machine_mode mode = GET_MODE (setreg);
7342 if (GET_CODE (setreg) == SUBREG)
7344 rtx inner = SUBREG_REG (setreg);
7345 if (!GENERAL_REG_P (inner))
7346 return;
7347 regno = subreg_regno (setreg);
7349 else if (GENERAL_REG_P (setreg))
7350 regno = REGNO (setreg);
7351 else
7352 return;
7354 for (i = regno;
7355 i < regno + HARD_REGNO_NREGS (regno, mode);
7356 i++)
7357 regs_ever_clobbered[i] = 1;
7360 /* Walks through all basic blocks of the current function looking
7361 for clobbered hard regs using s390_reg_clobbered_rtx. The fields
7362 of the passed integer array REGS_EVER_CLOBBERED are set to one for
7363 each of those regs. */
7365 static void
7366 s390_regs_ever_clobbered (int *regs_ever_clobbered)
7368 basic_block cur_bb;
7369 rtx cur_insn;
7370 unsigned int i;
7372 memset (regs_ever_clobbered, 0, 16 * sizeof (int));
7374 /* For non-leaf functions we have to consider all call clobbered regs to be
7375 clobbered. */
7376 if (!crtl->is_leaf)
7378 for (i = 0; i < 16; i++)
7379 regs_ever_clobbered[i] = call_really_used_regs[i];
7382 /* Make the "magic" eh_return registers live if necessary. For regs_ever_live
7383 this work is done by liveness analysis (mark_regs_live_at_end).
7384 Special care is needed for functions containing landing pads. Landing pads
7385 may use the eh registers, but the code which sets these registers is not
7386 contained in that function. Hence s390_regs_ever_clobbered is not able to
7387 deal with this automatically. */
7388 if (crtl->calls_eh_return || cfun->machine->has_landing_pad_p)
7389 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM ; i++)
7390 if (crtl->calls_eh_return
7391 || (cfun->machine->has_landing_pad_p
7392 && df_regs_ever_live_p (EH_RETURN_DATA_REGNO (i))))
7393 regs_ever_clobbered[EH_RETURN_DATA_REGNO (i)] = 1;
7395 /* For nonlocal gotos all call-saved registers have to be saved.
7396 This flag is also set for the unwinding code in libgcc.
7397 See expand_builtin_unwind_init. For regs_ever_live this is done by
7398 reload. */
7399 if (cfun->has_nonlocal_label)
7400 for (i = 0; i < 16; i++)
7401 if (!call_really_used_regs[i])
7402 regs_ever_clobbered[i] = 1;
7404 FOR_EACH_BB (cur_bb)
7406 FOR_BB_INSNS (cur_bb, cur_insn)
7408 if (INSN_P (cur_insn))
7409 note_stores (PATTERN (cur_insn),
7410 s390_reg_clobbered_rtx,
7411 regs_ever_clobbered);
7416 /* Determine the frame area which actually has to be accessed
7417 in the function epilogue. The values are stored at the
7418 given pointers AREA_BOTTOM (address of the lowest used stack
7419 address) and AREA_TOP (address of the first item which does
7420 not belong to the stack frame). */
7422 static void
7423 s390_frame_area (int *area_bottom, int *area_top)
7425 int b, t;
7426 int i;
7428 b = INT_MAX;
7429 t = INT_MIN;
7431 if (cfun_frame_layout.first_restore_gpr != -1)
7433 b = (cfun_frame_layout.gprs_offset
7434 + cfun_frame_layout.first_restore_gpr * UNITS_PER_LONG);
7435 t = b + (cfun_frame_layout.last_restore_gpr
7436 - cfun_frame_layout.first_restore_gpr + 1) * UNITS_PER_LONG;
7439 if (TARGET_64BIT && cfun_save_high_fprs_p)
7441 b = MIN (b, cfun_frame_layout.f8_offset);
7442 t = MAX (t, (cfun_frame_layout.f8_offset
7443 + cfun_frame_layout.high_fprs * 8));
7446 if (!TARGET_64BIT)
7447 for (i = 2; i < 4; i++)
7448 if (cfun_fpr_bit_p (i))
7450 b = MIN (b, cfun_frame_layout.f4_offset + (i - 2) * 8);
7451 t = MAX (t, cfun_frame_layout.f4_offset + (i - 1) * 8);
7454 *area_bottom = b;
7455 *area_top = t;
7458 /* Fill cfun->machine with info about register usage of current function.
7459 Return in CLOBBERED_REGS which GPRs are currently considered set. */
7461 static void
7462 s390_register_info (int clobbered_regs[])
7464 int i, j;
7466 /* fprs 8 - 15 are call saved for 64 Bit ABI. */
7467 cfun_frame_layout.fpr_bitmap = 0;
7468 cfun_frame_layout.high_fprs = 0;
7469 if (TARGET_64BIT)
7470 for (i = 24; i < 32; i++)
7471 if (df_regs_ever_live_p (i) && !global_regs[i])
7473 cfun_set_fpr_bit (i - 16);
7474 cfun_frame_layout.high_fprs++;
7477 /* Find first and last gpr to be saved. We trust regs_ever_live
7478 data, except that we don't save and restore global registers.
7480 Also, all registers with special meaning to the compiler need
7481 to be handled extra. */
7483 s390_regs_ever_clobbered (clobbered_regs);
7485 for (i = 0; i < 16; i++)
7486 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i] && !fixed_regs[i];
7488 if (frame_pointer_needed)
7489 clobbered_regs[HARD_FRAME_POINTER_REGNUM] = 1;
7491 if (flag_pic)
7492 clobbered_regs[PIC_OFFSET_TABLE_REGNUM]
7493 |= df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM);
7495 clobbered_regs[BASE_REGNUM]
7496 |= (cfun->machine->base_reg
7497 && REGNO (cfun->machine->base_reg) == BASE_REGNUM);
7499 clobbered_regs[RETURN_REGNUM]
7500 |= (!crtl->is_leaf
7501 || TARGET_TPF_PROFILING
7502 || cfun->machine->split_branches_pending_p
7503 || cfun_frame_layout.save_return_addr_p
7504 || crtl->calls_eh_return
7505 || cfun->stdarg);
7507 clobbered_regs[STACK_POINTER_REGNUM]
7508 |= (!crtl->is_leaf
7509 || TARGET_TPF_PROFILING
7510 || cfun_save_high_fprs_p
7511 || get_frame_size () > 0
7512 || cfun->calls_alloca
7513 || cfun->stdarg);
7515 for (i = 6; i < 16; i++)
7516 if (df_regs_ever_live_p (i) || clobbered_regs[i])
7517 break;
7518 for (j = 15; j > i; j--)
7519 if (df_regs_ever_live_p (j) || clobbered_regs[j])
7520 break;
7522 if (i == 16)
7524 /* Nothing to save/restore. */
7525 cfun_frame_layout.first_save_gpr_slot = -1;
7526 cfun_frame_layout.last_save_gpr_slot = -1;
7527 cfun_frame_layout.first_save_gpr = -1;
7528 cfun_frame_layout.first_restore_gpr = -1;
7529 cfun_frame_layout.last_save_gpr = -1;
7530 cfun_frame_layout.last_restore_gpr = -1;
7532 else
7534 /* Save slots for gprs from i to j. */
7535 cfun_frame_layout.first_save_gpr_slot = i;
7536 cfun_frame_layout.last_save_gpr_slot = j;
7538 for (i = cfun_frame_layout.first_save_gpr_slot;
7539 i < cfun_frame_layout.last_save_gpr_slot + 1;
7540 i++)
7541 if (clobbered_regs[i])
7542 break;
7544 for (j = cfun_frame_layout.last_save_gpr_slot; j > i; j--)
7545 if (clobbered_regs[j])
7546 break;
7548 if (i == cfun_frame_layout.last_save_gpr_slot + 1)
7550 /* Nothing to save/restore. */
7551 cfun_frame_layout.first_save_gpr = -1;
7552 cfun_frame_layout.first_restore_gpr = -1;
7553 cfun_frame_layout.last_save_gpr = -1;
7554 cfun_frame_layout.last_restore_gpr = -1;
7556 else
7558 /* Save / Restore from gpr i to j. */
7559 cfun_frame_layout.first_save_gpr = i;
7560 cfun_frame_layout.first_restore_gpr = i;
7561 cfun_frame_layout.last_save_gpr = j;
7562 cfun_frame_layout.last_restore_gpr = j;
7566 if (cfun->stdarg)
7568 /* Varargs functions need to save gprs 2 to 6. */
7569 if (cfun->va_list_gpr_size
7570 && crtl->args.info.gprs < GP_ARG_NUM_REG)
7572 int min_gpr = crtl->args.info.gprs;
7573 int max_gpr = min_gpr + cfun->va_list_gpr_size;
7574 if (max_gpr > GP_ARG_NUM_REG)
7575 max_gpr = GP_ARG_NUM_REG;
7577 if (cfun_frame_layout.first_save_gpr == -1
7578 || cfun_frame_layout.first_save_gpr > 2 + min_gpr)
7580 cfun_frame_layout.first_save_gpr = 2 + min_gpr;
7581 cfun_frame_layout.first_save_gpr_slot = 2 + min_gpr;
7584 if (cfun_frame_layout.last_save_gpr == -1
7585 || cfun_frame_layout.last_save_gpr < 2 + max_gpr - 1)
7587 cfun_frame_layout.last_save_gpr = 2 + max_gpr - 1;
7588 cfun_frame_layout.last_save_gpr_slot = 2 + max_gpr - 1;
7592 /* Mark f0, f2 for 31 bit and f0-f4 for 64 bit to be saved. */
7593 if (TARGET_HARD_FLOAT && cfun->va_list_fpr_size
7594 && crtl->args.info.fprs < FP_ARG_NUM_REG)
7596 int min_fpr = crtl->args.info.fprs;
7597 int max_fpr = min_fpr + cfun->va_list_fpr_size;
7598 if (max_fpr > FP_ARG_NUM_REG)
7599 max_fpr = FP_ARG_NUM_REG;
7601 /* ??? This is currently required to ensure proper location
7602 of the fpr save slots within the va_list save area. */
7603 if (TARGET_PACKED_STACK)
7604 min_fpr = 0;
7606 for (i = min_fpr; i < max_fpr; i++)
7607 cfun_set_fpr_bit (i);
7611 if (!TARGET_64BIT)
7612 for (i = 2; i < 4; i++)
7613 if (df_regs_ever_live_p (i + 16) && !global_regs[i + 16])
7614 cfun_set_fpr_bit (i);
7617 /* Fill cfun->machine with info about frame of current function. */
7619 static void
7620 s390_frame_info (void)
7622 int i;
7624 cfun_frame_layout.frame_size = get_frame_size ();
7625 if (!TARGET_64BIT && cfun_frame_layout.frame_size > 0x7fff0000)
7626 fatal_error ("total size of local variables exceeds architecture limit");
7628 if (!TARGET_PACKED_STACK)
7630 cfun_frame_layout.backchain_offset = 0;
7631 cfun_frame_layout.f0_offset = 16 * UNITS_PER_LONG;
7632 cfun_frame_layout.f4_offset = cfun_frame_layout.f0_offset + 2 * 8;
7633 cfun_frame_layout.f8_offset = -cfun_frame_layout.high_fprs * 8;
7634 cfun_frame_layout.gprs_offset = (cfun_frame_layout.first_save_gpr_slot
7635 * UNITS_PER_LONG);
7637 else if (TARGET_BACKCHAIN) /* kernel stack layout */
7639 cfun_frame_layout.backchain_offset = (STACK_POINTER_OFFSET
7640 - UNITS_PER_LONG);
7641 cfun_frame_layout.gprs_offset
7642 = (cfun_frame_layout.backchain_offset
7643 - (STACK_POINTER_REGNUM - cfun_frame_layout.first_save_gpr_slot + 1)
7644 * UNITS_PER_LONG);
7646 if (TARGET_64BIT)
7648 cfun_frame_layout.f4_offset
7649 = (cfun_frame_layout.gprs_offset
7650 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7652 cfun_frame_layout.f0_offset
7653 = (cfun_frame_layout.f4_offset
7654 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7656 else
7658 /* On 31 bit we have to care about alignment of the
7659 floating point regs to provide fastest access. */
7660 cfun_frame_layout.f0_offset
7661 = ((cfun_frame_layout.gprs_offset
7662 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1))
7663 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7665 cfun_frame_layout.f4_offset
7666 = (cfun_frame_layout.f0_offset
7667 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7670 else /* no backchain */
7672 cfun_frame_layout.f4_offset
7673 = (STACK_POINTER_OFFSET
7674 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7676 cfun_frame_layout.f0_offset
7677 = (cfun_frame_layout.f4_offset
7678 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7680 cfun_frame_layout.gprs_offset
7681 = cfun_frame_layout.f0_offset - cfun_gprs_save_area_size;
7684 if (crtl->is_leaf
7685 && !TARGET_TPF_PROFILING
7686 && cfun_frame_layout.frame_size == 0
7687 && !cfun_save_high_fprs_p
7688 && !cfun->calls_alloca
7689 && !cfun->stdarg)
7690 return;
7692 if (!TARGET_PACKED_STACK)
7693 cfun_frame_layout.frame_size += (STACK_POINTER_OFFSET
7694 + crtl->outgoing_args_size
7695 + cfun_frame_layout.high_fprs * 8);
7696 else
7698 if (TARGET_BACKCHAIN)
7699 cfun_frame_layout.frame_size += UNITS_PER_LONG;
7701 /* No alignment trouble here because f8-f15 are only saved under
7702 64 bit. */
7703 cfun_frame_layout.f8_offset = (MIN (MIN (cfun_frame_layout.f0_offset,
7704 cfun_frame_layout.f4_offset),
7705 cfun_frame_layout.gprs_offset)
7706 - cfun_frame_layout.high_fprs * 8);
7708 cfun_frame_layout.frame_size += cfun_frame_layout.high_fprs * 8;
7710 for (i = 0; i < 8; i++)
7711 if (cfun_fpr_bit_p (i))
7712 cfun_frame_layout.frame_size += 8;
7714 cfun_frame_layout.frame_size += cfun_gprs_save_area_size;
7716 /* If under 31 bit an odd number of gprs has to be saved we have to adjust
7717 the frame size to sustain 8 byte alignment of stack frames. */
7718 cfun_frame_layout.frame_size = ((cfun_frame_layout.frame_size +
7719 STACK_BOUNDARY / BITS_PER_UNIT - 1)
7720 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1));
7722 cfun_frame_layout.frame_size += crtl->outgoing_args_size;
7726 /* Generate frame layout. Fills in register and frame data for the current
7727 function in cfun->machine. This routine can be called multiple times;
7728 it will re-do the complete frame layout every time. */
7730 static void
7731 s390_init_frame_layout (void)
7733 HOST_WIDE_INT frame_size;
7734 int base_used;
7735 int clobbered_regs[16];
7737 /* On S/390 machines, we may need to perform branch splitting, which
7738 will require both base and return address register. We have no
7739 choice but to assume we're going to need them until right at the
7740 end of the machine dependent reorg phase. */
7741 if (!TARGET_CPU_ZARCH)
7742 cfun->machine->split_branches_pending_p = true;
7746 frame_size = cfun_frame_layout.frame_size;
7748 /* Try to predict whether we'll need the base register. */
7749 base_used = cfun->machine->split_branches_pending_p
7750 || crtl->uses_const_pool
7751 || (!DISP_IN_RANGE (frame_size)
7752 && !CONST_OK_FOR_K (frame_size));
7754 /* Decide which register to use as literal pool base. In small
7755 leaf functions, try to use an unused call-clobbered register
7756 as base register to avoid save/restore overhead. */
7757 if (!base_used)
7758 cfun->machine->base_reg = NULL_RTX;
7759 else if (crtl->is_leaf && !df_regs_ever_live_p (5))
7760 cfun->machine->base_reg = gen_rtx_REG (Pmode, 5);
7761 else
7762 cfun->machine->base_reg = gen_rtx_REG (Pmode, BASE_REGNUM);
7764 s390_register_info (clobbered_regs);
7765 s390_frame_info ();
7767 while (frame_size != cfun_frame_layout.frame_size);
7770 /* Update frame layout. Recompute actual register save data based on
7771 current info and update regs_ever_live for the special registers.
7772 May be called multiple times, but may never cause *more* registers
7773 to be saved than s390_init_frame_layout allocated room for. */
7775 static void
7776 s390_update_frame_layout (void)
7778 int clobbered_regs[16];
7780 s390_register_info (clobbered_regs);
7782 df_set_regs_ever_live (BASE_REGNUM,
7783 clobbered_regs[BASE_REGNUM] ? true : false);
7784 df_set_regs_ever_live (RETURN_REGNUM,
7785 clobbered_regs[RETURN_REGNUM] ? true : false);
7786 df_set_regs_ever_live (STACK_POINTER_REGNUM,
7787 clobbered_regs[STACK_POINTER_REGNUM] ? true : false);
7789 if (cfun->machine->base_reg)
7790 df_set_regs_ever_live (REGNO (cfun->machine->base_reg), true);
7793 /* Return true if it is legal to put a value with MODE into REGNO. */
7795 bool
7796 s390_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
7798 switch (REGNO_REG_CLASS (regno))
7800 case FP_REGS:
7801 if (REGNO_PAIR_OK (regno, mode))
7803 if (mode == SImode || mode == DImode)
7804 return true;
7806 if (FLOAT_MODE_P (mode) && GET_MODE_CLASS (mode) != MODE_VECTOR_FLOAT)
7807 return true;
7809 break;
7810 case ADDR_REGS:
7811 if (FRAME_REGNO_P (regno) && mode == Pmode)
7812 return true;
7814 /* fallthrough */
7815 case GENERAL_REGS:
7816 if (REGNO_PAIR_OK (regno, mode))
7818 if (TARGET_ZARCH
7819 || (mode != TFmode && mode != TCmode && mode != TDmode))
7820 return true;
7822 break;
7823 case CC_REGS:
7824 if (GET_MODE_CLASS (mode) == MODE_CC)
7825 return true;
7826 break;
7827 case ACCESS_REGS:
7828 if (REGNO_PAIR_OK (regno, mode))
7830 if (mode == SImode || mode == Pmode)
7831 return true;
7833 break;
7834 default:
7835 return false;
7838 return false;
7841 /* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */
7843 bool
7844 s390_hard_regno_rename_ok (unsigned int old_reg, unsigned int new_reg)
7846 /* Once we've decided upon a register to use as base register, it must
7847 no longer be used for any other purpose. */
7848 if (cfun->machine->base_reg)
7849 if (REGNO (cfun->machine->base_reg) == old_reg
7850 || REGNO (cfun->machine->base_reg) == new_reg)
7851 return false;
7853 return true;
7856 /* Maximum number of registers to represent a value of mode MODE
7857 in a register of class RCLASS. */
7860 s390_class_max_nregs (enum reg_class rclass, enum machine_mode mode)
7862 switch (rclass)
7864 case FP_REGS:
7865 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
7866 return 2 * ((GET_MODE_SIZE (mode) / 2 + 8 - 1) / 8);
7867 else
7868 return (GET_MODE_SIZE (mode) + 8 - 1) / 8;
7869 case ACCESS_REGS:
7870 return (GET_MODE_SIZE (mode) + 4 - 1) / 4;
7871 default:
7872 break;
7874 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7877 /* Return true if we use LRA instead of reload pass. */
7878 static bool
7879 s390_lra_p (void)
7881 return s390_lra_flag;
7884 /* Return true if register FROM can be eliminated via register TO. */
7886 static bool
7887 s390_can_eliminate (const int from, const int to)
7889 /* On zSeries machines, we have not marked the base register as fixed.
7890 Instead, we have an elimination rule BASE_REGNUM -> BASE_REGNUM.
7891 If a function requires the base register, we say here that this
7892 elimination cannot be performed. This will cause reload to free
7893 up the base register (as if it were fixed). On the other hand,
7894 if the current function does *not* require the base register, we
7895 say here the elimination succeeds, which in turn allows reload
7896 to allocate the base register for any other purpose. */
7897 if (from == BASE_REGNUM && to == BASE_REGNUM)
7899 if (TARGET_CPU_ZARCH)
7901 s390_init_frame_layout ();
7902 return cfun->machine->base_reg == NULL_RTX;
7905 return false;
7908 /* Everything else must point into the stack frame. */
7909 gcc_assert (to == STACK_POINTER_REGNUM
7910 || to == HARD_FRAME_POINTER_REGNUM);
7912 gcc_assert (from == FRAME_POINTER_REGNUM
7913 || from == ARG_POINTER_REGNUM
7914 || from == RETURN_ADDRESS_POINTER_REGNUM);
7916 /* Make sure we actually saved the return address. */
7917 if (from == RETURN_ADDRESS_POINTER_REGNUM)
7918 if (!crtl->calls_eh_return
7919 && !cfun->stdarg
7920 && !cfun_frame_layout.save_return_addr_p)
7921 return false;
7923 return true;
7926 /* Return offset between register FROM and TO initially after prolog. */
7928 HOST_WIDE_INT
7929 s390_initial_elimination_offset (int from, int to)
7931 HOST_WIDE_INT offset;
7932 int index;
7934 /* ??? Why are we called for non-eliminable pairs? */
7935 if (!s390_can_eliminate (from, to))
7936 return 0;
7938 switch (from)
7940 case FRAME_POINTER_REGNUM:
7941 offset = (get_frame_size()
7942 + STACK_POINTER_OFFSET
7943 + crtl->outgoing_args_size);
7944 break;
7946 case ARG_POINTER_REGNUM:
7947 s390_init_frame_layout ();
7948 offset = cfun_frame_layout.frame_size + STACK_POINTER_OFFSET;
7949 break;
7951 case RETURN_ADDRESS_POINTER_REGNUM:
7952 s390_init_frame_layout ();
7953 index = RETURN_REGNUM - cfun_frame_layout.first_save_gpr_slot;
7954 gcc_assert (index >= 0);
7955 offset = cfun_frame_layout.frame_size + cfun_frame_layout.gprs_offset;
7956 offset += index * UNITS_PER_LONG;
7957 break;
7959 case BASE_REGNUM:
7960 offset = 0;
7961 break;
7963 default:
7964 gcc_unreachable ();
7967 return offset;
7970 /* Emit insn to save fpr REGNUM at offset OFFSET relative
7971 to register BASE. Return generated insn. */
7973 static rtx
7974 save_fpr (rtx base, int offset, int regnum)
7976 rtx addr;
7977 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
7979 if (regnum >= 16 && regnum <= (16 + FP_ARG_NUM_REG))
7980 set_mem_alias_set (addr, get_varargs_alias_set ());
7981 else
7982 set_mem_alias_set (addr, get_frame_alias_set ());
7984 return emit_move_insn (addr, gen_rtx_REG (DFmode, regnum));
7987 /* Emit insn to restore fpr REGNUM from offset OFFSET relative
7988 to register BASE. Return generated insn. */
7990 static rtx
7991 restore_fpr (rtx base, int offset, int regnum)
7993 rtx addr;
7994 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
7995 set_mem_alias_set (addr, get_frame_alias_set ());
7997 return emit_move_insn (gen_rtx_REG (DFmode, regnum), addr);
8000 /* Return true if REGNO is a global register, but not one
8001 of the special ones that need to be saved/restored in anyway. */
8003 static inline bool
8004 global_not_special_regno_p (int regno)
8006 return (global_regs[regno]
8007 /* These registers are special and need to be
8008 restored in any case. */
8009 && !(regno == STACK_POINTER_REGNUM
8010 || regno == RETURN_REGNUM
8011 || regno == BASE_REGNUM
8012 || (flag_pic && regno == (int)PIC_OFFSET_TABLE_REGNUM)));
8015 /* Generate insn to save registers FIRST to LAST into
8016 the register save area located at offset OFFSET
8017 relative to register BASE. */
8019 static rtx
8020 save_gprs (rtx base, int offset, int first, int last)
8022 rtx addr, insn, note;
8023 int i;
8025 addr = plus_constant (Pmode, base, offset);
8026 addr = gen_rtx_MEM (Pmode, addr);
8028 set_mem_alias_set (addr, get_frame_alias_set ());
8030 /* Special-case single register. */
8031 if (first == last)
8033 if (TARGET_64BIT)
8034 insn = gen_movdi (addr, gen_rtx_REG (Pmode, first));
8035 else
8036 insn = gen_movsi (addr, gen_rtx_REG (Pmode, first));
8038 if (!global_not_special_regno_p (first))
8039 RTX_FRAME_RELATED_P (insn) = 1;
8040 return insn;
8044 insn = gen_store_multiple (addr,
8045 gen_rtx_REG (Pmode, first),
8046 GEN_INT (last - first + 1));
8048 if (first <= 6 && cfun->stdarg)
8049 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
8051 rtx mem = XEXP (XVECEXP (PATTERN (insn), 0, i), 0);
8053 if (first + i <= 6)
8054 set_mem_alias_set (mem, get_varargs_alias_set ());
8057 /* We need to set the FRAME_RELATED flag on all SETs
8058 inside the store-multiple pattern.
8060 However, we must not emit DWARF records for registers 2..5
8061 if they are stored for use by variable arguments ...
8063 ??? Unfortunately, it is not enough to simply not the
8064 FRAME_RELATED flags for those SETs, because the first SET
8065 of the PARALLEL is always treated as if it had the flag
8066 set, even if it does not. Therefore we emit a new pattern
8067 without those registers as REG_FRAME_RELATED_EXPR note. */
8069 if (first >= 6 && !global_not_special_regno_p (first))
8071 rtx pat = PATTERN (insn);
8073 for (i = 0; i < XVECLEN (pat, 0); i++)
8074 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
8075 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (pat,
8076 0, i)))))
8077 RTX_FRAME_RELATED_P (XVECEXP (pat, 0, i)) = 1;
8079 RTX_FRAME_RELATED_P (insn) = 1;
8081 else if (last >= 6)
8083 int start;
8085 for (start = first >= 6 ? first : 6; start <= last; start++)
8086 if (!global_not_special_regno_p (start))
8087 break;
8089 if (start > last)
8090 return insn;
8092 addr = plus_constant (Pmode, base,
8093 offset + (start - first) * UNITS_PER_LONG);
8094 note = gen_store_multiple (gen_rtx_MEM (Pmode, addr),
8095 gen_rtx_REG (Pmode, start),
8096 GEN_INT (last - start + 1));
8097 note = PATTERN (note);
8099 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
8101 for (i = 0; i < XVECLEN (note, 0); i++)
8102 if (GET_CODE (XVECEXP (note, 0, i)) == SET
8103 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (note,
8104 0, i)))))
8105 RTX_FRAME_RELATED_P (XVECEXP (note, 0, i)) = 1;
8107 RTX_FRAME_RELATED_P (insn) = 1;
8110 return insn;
8113 /* Generate insn to restore registers FIRST to LAST from
8114 the register save area located at offset OFFSET
8115 relative to register BASE. */
8117 static rtx
8118 restore_gprs (rtx base, int offset, int first, int last)
8120 rtx addr, insn;
8122 addr = plus_constant (Pmode, base, offset);
8123 addr = gen_rtx_MEM (Pmode, addr);
8124 set_mem_alias_set (addr, get_frame_alias_set ());
8126 /* Special-case single register. */
8127 if (first == last)
8129 if (TARGET_64BIT)
8130 insn = gen_movdi (gen_rtx_REG (Pmode, first), addr);
8131 else
8132 insn = gen_movsi (gen_rtx_REG (Pmode, first), addr);
8134 return insn;
8137 insn = gen_load_multiple (gen_rtx_REG (Pmode, first),
8138 addr,
8139 GEN_INT (last - first + 1));
8140 return insn;
8143 /* Return insn sequence to load the GOT register. */
8145 static GTY(()) rtx got_symbol;
8147 s390_load_got (void)
8149 rtx insns;
8151 /* We cannot use pic_offset_table_rtx here since we use this
8152 function also for non-pic if __tls_get_offset is called and in
8153 that case PIC_OFFSET_TABLE_REGNUM as well as pic_offset_table_rtx
8154 aren't usable. */
8155 rtx got_rtx = gen_rtx_REG (Pmode, 12);
8157 if (!got_symbol)
8159 got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
8160 SYMBOL_REF_FLAGS (got_symbol) = SYMBOL_FLAG_LOCAL;
8163 start_sequence ();
8165 if (TARGET_CPU_ZARCH)
8167 emit_move_insn (got_rtx, got_symbol);
8169 else
8171 rtx offset;
8173 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, got_symbol),
8174 UNSPEC_LTREL_OFFSET);
8175 offset = gen_rtx_CONST (Pmode, offset);
8176 offset = force_const_mem (Pmode, offset);
8178 emit_move_insn (got_rtx, offset);
8180 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (offset, 0)),
8181 UNSPEC_LTREL_BASE);
8182 offset = gen_rtx_PLUS (Pmode, got_rtx, offset);
8184 emit_move_insn (got_rtx, offset);
8187 insns = get_insns ();
8188 end_sequence ();
8189 return insns;
8192 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
8193 and the change to the stack pointer. */
8195 static void
8196 s390_emit_stack_tie (void)
8198 rtx mem = gen_frame_mem (BLKmode,
8199 gen_rtx_REG (Pmode, STACK_POINTER_REGNUM));
8201 emit_insn (gen_stack_tie (mem));
8204 /* Expand the prologue into a bunch of separate insns. */
8206 void
8207 s390_emit_prologue (void)
8209 rtx insn, addr;
8210 rtx temp_reg;
8211 int i;
8212 int offset;
8213 int next_fpr = 0;
8215 /* Complete frame layout. */
8217 s390_update_frame_layout ();
8219 /* Annotate all constant pool references to let the scheduler know
8220 they implicitly use the base register. */
8222 push_topmost_sequence ();
8224 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8225 if (INSN_P (insn))
8227 annotate_constant_pool_refs (&PATTERN (insn));
8228 df_insn_rescan (insn);
8231 pop_topmost_sequence ();
8233 /* Choose best register to use for temp use within prologue.
8234 See below for why TPF must use the register 1. */
8236 if (!has_hard_reg_initial_val (Pmode, RETURN_REGNUM)
8237 && !crtl->is_leaf
8238 && !TARGET_TPF_PROFILING)
8239 temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
8240 else
8241 temp_reg = gen_rtx_REG (Pmode, 1);
8243 /* Save call saved gprs. */
8244 if (cfun_frame_layout.first_save_gpr != -1)
8246 insn = save_gprs (stack_pointer_rtx,
8247 cfun_frame_layout.gprs_offset +
8248 UNITS_PER_LONG * (cfun_frame_layout.first_save_gpr
8249 - cfun_frame_layout.first_save_gpr_slot),
8250 cfun_frame_layout.first_save_gpr,
8251 cfun_frame_layout.last_save_gpr);
8252 emit_insn (insn);
8255 /* Dummy insn to mark literal pool slot. */
8257 if (cfun->machine->base_reg)
8258 emit_insn (gen_main_pool (cfun->machine->base_reg));
8260 offset = cfun_frame_layout.f0_offset;
8262 /* Save f0 and f2. */
8263 for (i = 0; i < 2; i++)
8265 if (cfun_fpr_bit_p (i))
8267 save_fpr (stack_pointer_rtx, offset, i + 16);
8268 offset += 8;
8270 else if (!TARGET_PACKED_STACK)
8271 offset += 8;
8274 /* Save f4 and f6. */
8275 offset = cfun_frame_layout.f4_offset;
8276 for (i = 2; i < 4; i++)
8278 if (cfun_fpr_bit_p (i))
8280 insn = save_fpr (stack_pointer_rtx, offset, i + 16);
8281 offset += 8;
8283 /* If f4 and f6 are call clobbered they are saved due to stdargs and
8284 therefore are not frame related. */
8285 if (!call_really_used_regs[i + 16])
8286 RTX_FRAME_RELATED_P (insn) = 1;
8288 else if (!TARGET_PACKED_STACK)
8289 offset += 8;
8292 if (TARGET_PACKED_STACK
8293 && cfun_save_high_fprs_p
8294 && cfun_frame_layout.f8_offset + cfun_frame_layout.high_fprs * 8 > 0)
8296 offset = (cfun_frame_layout.f8_offset
8297 + (cfun_frame_layout.high_fprs - 1) * 8);
8299 for (i = 15; i > 7 && offset >= 0; i--)
8300 if (cfun_fpr_bit_p (i))
8302 insn = save_fpr (stack_pointer_rtx, offset, i + 16);
8304 RTX_FRAME_RELATED_P (insn) = 1;
8305 offset -= 8;
8307 if (offset >= cfun_frame_layout.f8_offset)
8308 next_fpr = i + 16;
8311 if (!TARGET_PACKED_STACK)
8312 next_fpr = cfun_save_high_fprs_p ? 31 : 0;
8314 if (flag_stack_usage_info)
8315 current_function_static_stack_size = cfun_frame_layout.frame_size;
8317 /* Decrement stack pointer. */
8319 if (cfun_frame_layout.frame_size > 0)
8321 rtx frame_off = GEN_INT (-cfun_frame_layout.frame_size);
8322 rtx real_frame_off;
8324 if (s390_stack_size)
8326 HOST_WIDE_INT stack_guard;
8328 if (s390_stack_guard)
8329 stack_guard = s390_stack_guard;
8330 else
8332 /* If no value for stack guard is provided the smallest power of 2
8333 larger than the current frame size is chosen. */
8334 stack_guard = 1;
8335 while (stack_guard < cfun_frame_layout.frame_size)
8336 stack_guard <<= 1;
8339 if (cfun_frame_layout.frame_size >= s390_stack_size)
8341 warning (0, "frame size of function %qs is %wd"
8342 " bytes exceeding user provided stack limit of "
8343 "%d bytes. "
8344 "An unconditional trap is added.",
8345 current_function_name(), cfun_frame_layout.frame_size,
8346 s390_stack_size);
8347 emit_insn (gen_trap ());
8349 else
8351 /* stack_guard has to be smaller than s390_stack_size.
8352 Otherwise we would emit an AND with zero which would
8353 not match the test under mask pattern. */
8354 if (stack_guard >= s390_stack_size)
8356 warning (0, "frame size of function %qs is %wd"
8357 " bytes which is more than half the stack size. "
8358 "The dynamic check would not be reliable. "
8359 "No check emitted for this function.",
8360 current_function_name(),
8361 cfun_frame_layout.frame_size);
8363 else
8365 HOST_WIDE_INT stack_check_mask = ((s390_stack_size - 1)
8366 & ~(stack_guard - 1));
8368 rtx t = gen_rtx_AND (Pmode, stack_pointer_rtx,
8369 GEN_INT (stack_check_mask));
8370 if (TARGET_64BIT)
8371 emit_insn (gen_ctrapdi4 (gen_rtx_EQ (VOIDmode,
8372 t, const0_rtx),
8373 t, const0_rtx, const0_rtx));
8374 else
8375 emit_insn (gen_ctrapsi4 (gen_rtx_EQ (VOIDmode,
8376 t, const0_rtx),
8377 t, const0_rtx, const0_rtx));
8382 if (s390_warn_framesize > 0
8383 && cfun_frame_layout.frame_size >= s390_warn_framesize)
8384 warning (0, "frame size of %qs is %wd bytes",
8385 current_function_name (), cfun_frame_layout.frame_size);
8387 if (s390_warn_dynamicstack_p && cfun->calls_alloca)
8388 warning (0, "%qs uses dynamic stack allocation", current_function_name ());
8390 /* Save incoming stack pointer into temp reg. */
8391 if (TARGET_BACKCHAIN || next_fpr)
8392 insn = emit_insn (gen_move_insn (temp_reg, stack_pointer_rtx));
8394 /* Subtract frame size from stack pointer. */
8396 if (DISP_IN_RANGE (INTVAL (frame_off)))
8398 insn = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8399 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8400 frame_off));
8401 insn = emit_insn (insn);
8403 else
8405 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
8406 frame_off = force_const_mem (Pmode, frame_off);
8408 insn = emit_insn (gen_add2_insn (stack_pointer_rtx, frame_off));
8409 annotate_constant_pool_refs (&PATTERN (insn));
8412 RTX_FRAME_RELATED_P (insn) = 1;
8413 real_frame_off = GEN_INT (-cfun_frame_layout.frame_size);
8414 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
8415 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8416 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8417 real_frame_off)));
8419 /* Set backchain. */
8421 if (TARGET_BACKCHAIN)
8423 if (cfun_frame_layout.backchain_offset)
8424 addr = gen_rtx_MEM (Pmode,
8425 plus_constant (Pmode, stack_pointer_rtx,
8426 cfun_frame_layout.backchain_offset));
8427 else
8428 addr = gen_rtx_MEM (Pmode, stack_pointer_rtx);
8429 set_mem_alias_set (addr, get_frame_alias_set ());
8430 insn = emit_insn (gen_move_insn (addr, temp_reg));
8433 /* If we support non-call exceptions (e.g. for Java),
8434 we need to make sure the backchain pointer is set up
8435 before any possibly trapping memory access. */
8436 if (TARGET_BACKCHAIN && cfun->can_throw_non_call_exceptions)
8438 addr = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode));
8439 emit_clobber (addr);
8443 /* Save fprs 8 - 15 (64 bit ABI). */
8445 if (cfun_save_high_fprs_p && next_fpr)
8447 /* If the stack might be accessed through a different register
8448 we have to make sure that the stack pointer decrement is not
8449 moved below the use of the stack slots. */
8450 s390_emit_stack_tie ();
8452 insn = emit_insn (gen_add2_insn (temp_reg,
8453 GEN_INT (cfun_frame_layout.f8_offset)));
8455 offset = 0;
8457 for (i = 24; i <= next_fpr; i++)
8458 if (cfun_fpr_bit_p (i - 16))
8460 rtx addr = plus_constant (Pmode, stack_pointer_rtx,
8461 cfun_frame_layout.frame_size
8462 + cfun_frame_layout.f8_offset
8463 + offset);
8465 insn = save_fpr (temp_reg, offset, i);
8466 offset += 8;
8467 RTX_FRAME_RELATED_P (insn) = 1;
8468 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
8469 gen_rtx_SET (VOIDmode,
8470 gen_rtx_MEM (DFmode, addr),
8471 gen_rtx_REG (DFmode, i)));
8475 /* Set frame pointer, if needed. */
8477 if (frame_pointer_needed)
8479 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
8480 RTX_FRAME_RELATED_P (insn) = 1;
8483 /* Set up got pointer, if needed. */
8485 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
8487 rtx insns = s390_load_got ();
8489 for (insn = insns; insn; insn = NEXT_INSN (insn))
8490 annotate_constant_pool_refs (&PATTERN (insn));
8492 emit_insn (insns);
8495 if (TARGET_TPF_PROFILING)
8497 /* Generate a BAS instruction to serve as a function
8498 entry intercept to facilitate the use of tracing
8499 algorithms located at the branch target. */
8500 emit_insn (gen_prologue_tpf ());
8502 /* Emit a blockage here so that all code
8503 lies between the profiling mechanisms. */
8504 emit_insn (gen_blockage ());
8508 /* Expand the epilogue into a bunch of separate insns. */
8510 void
8511 s390_emit_epilogue (bool sibcall)
8513 rtx frame_pointer, return_reg, cfa_restores = NULL_RTX;
8514 int area_bottom, area_top, offset = 0;
8515 int next_offset;
8516 rtvec p;
8517 int i;
8519 if (TARGET_TPF_PROFILING)
8522 /* Generate a BAS instruction to serve as a function
8523 entry intercept to facilitate the use of tracing
8524 algorithms located at the branch target. */
8526 /* Emit a blockage here so that all code
8527 lies between the profiling mechanisms. */
8528 emit_insn (gen_blockage ());
8530 emit_insn (gen_epilogue_tpf ());
8533 /* Check whether to use frame or stack pointer for restore. */
8535 frame_pointer = (frame_pointer_needed
8536 ? hard_frame_pointer_rtx : stack_pointer_rtx);
8538 s390_frame_area (&area_bottom, &area_top);
8540 /* Check whether we can access the register save area.
8541 If not, increment the frame pointer as required. */
8543 if (area_top <= area_bottom)
8545 /* Nothing to restore. */
8547 else if (DISP_IN_RANGE (cfun_frame_layout.frame_size + area_bottom)
8548 && DISP_IN_RANGE (cfun_frame_layout.frame_size + area_top - 1))
8550 /* Area is in range. */
8551 offset = cfun_frame_layout.frame_size;
8553 else
8555 rtx insn, frame_off, cfa;
8557 offset = area_bottom < 0 ? -area_bottom : 0;
8558 frame_off = GEN_INT (cfun_frame_layout.frame_size - offset);
8560 cfa = gen_rtx_SET (VOIDmode, frame_pointer,
8561 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
8562 if (DISP_IN_RANGE (INTVAL (frame_off)))
8564 insn = gen_rtx_SET (VOIDmode, frame_pointer,
8565 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
8566 insn = emit_insn (insn);
8568 else
8570 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
8571 frame_off = force_const_mem (Pmode, frame_off);
8573 insn = emit_insn (gen_add2_insn (frame_pointer, frame_off));
8574 annotate_constant_pool_refs (&PATTERN (insn));
8576 add_reg_note (insn, REG_CFA_ADJUST_CFA, cfa);
8577 RTX_FRAME_RELATED_P (insn) = 1;
8580 /* Restore call saved fprs. */
8582 if (TARGET_64BIT)
8584 if (cfun_save_high_fprs_p)
8586 next_offset = cfun_frame_layout.f8_offset;
8587 for (i = 24; i < 32; i++)
8589 if (cfun_fpr_bit_p (i - 16))
8591 restore_fpr (frame_pointer,
8592 offset + next_offset, i);
8593 cfa_restores
8594 = alloc_reg_note (REG_CFA_RESTORE,
8595 gen_rtx_REG (DFmode, i), cfa_restores);
8596 next_offset += 8;
8602 else
8604 next_offset = cfun_frame_layout.f4_offset;
8605 for (i = 18; i < 20; i++)
8607 if (cfun_fpr_bit_p (i - 16))
8609 restore_fpr (frame_pointer,
8610 offset + next_offset, i);
8611 cfa_restores
8612 = alloc_reg_note (REG_CFA_RESTORE,
8613 gen_rtx_REG (DFmode, i), cfa_restores);
8614 next_offset += 8;
8616 else if (!TARGET_PACKED_STACK)
8617 next_offset += 8;
8622 /* Return register. */
8624 return_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
8626 /* Restore call saved gprs. */
8628 if (cfun_frame_layout.first_restore_gpr != -1)
8630 rtx insn, addr;
8631 int i;
8633 /* Check for global register and save them
8634 to stack location from where they get restored. */
8636 for (i = cfun_frame_layout.first_restore_gpr;
8637 i <= cfun_frame_layout.last_restore_gpr;
8638 i++)
8640 if (global_not_special_regno_p (i))
8642 addr = plus_constant (Pmode, frame_pointer,
8643 offset + cfun_frame_layout.gprs_offset
8644 + (i - cfun_frame_layout.first_save_gpr_slot)
8645 * UNITS_PER_LONG);
8646 addr = gen_rtx_MEM (Pmode, addr);
8647 set_mem_alias_set (addr, get_frame_alias_set ());
8648 emit_move_insn (addr, gen_rtx_REG (Pmode, i));
8650 else
8651 cfa_restores
8652 = alloc_reg_note (REG_CFA_RESTORE,
8653 gen_rtx_REG (Pmode, i), cfa_restores);
8656 if (! sibcall)
8658 /* Fetch return address from stack before load multiple,
8659 this will do good for scheduling. */
8661 if (cfun_frame_layout.save_return_addr_p
8662 || (cfun_frame_layout.first_restore_gpr < BASE_REGNUM
8663 && cfun_frame_layout.last_restore_gpr > RETURN_REGNUM))
8665 int return_regnum = find_unused_clobbered_reg();
8666 if (!return_regnum)
8667 return_regnum = 4;
8668 return_reg = gen_rtx_REG (Pmode, return_regnum);
8670 addr = plus_constant (Pmode, frame_pointer,
8671 offset + cfun_frame_layout.gprs_offset
8672 + (RETURN_REGNUM
8673 - cfun_frame_layout.first_save_gpr_slot)
8674 * UNITS_PER_LONG);
8675 addr = gen_rtx_MEM (Pmode, addr);
8676 set_mem_alias_set (addr, get_frame_alias_set ());
8677 emit_move_insn (return_reg, addr);
8681 insn = restore_gprs (frame_pointer,
8682 offset + cfun_frame_layout.gprs_offset
8683 + (cfun_frame_layout.first_restore_gpr
8684 - cfun_frame_layout.first_save_gpr_slot)
8685 * UNITS_PER_LONG,
8686 cfun_frame_layout.first_restore_gpr,
8687 cfun_frame_layout.last_restore_gpr);
8688 insn = emit_insn (insn);
8689 REG_NOTES (insn) = cfa_restores;
8690 add_reg_note (insn, REG_CFA_DEF_CFA,
8691 plus_constant (Pmode, stack_pointer_rtx,
8692 STACK_POINTER_OFFSET));
8693 RTX_FRAME_RELATED_P (insn) = 1;
8696 if (! sibcall)
8699 /* Return to caller. */
8701 p = rtvec_alloc (2);
8703 RTVEC_ELT (p, 0) = ret_rtx;
8704 RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode, return_reg);
8705 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
8710 /* Return the size in bytes of a function argument of
8711 type TYPE and/or mode MODE. At least one of TYPE or
8712 MODE must be specified. */
8714 static int
8715 s390_function_arg_size (enum machine_mode mode, const_tree type)
8717 if (type)
8718 return int_size_in_bytes (type);
8720 /* No type info available for some library calls ... */
8721 if (mode != BLKmode)
8722 return GET_MODE_SIZE (mode);
8724 /* If we have neither type nor mode, abort */
8725 gcc_unreachable ();
8728 /* Return true if a function argument of type TYPE and mode MODE
8729 is to be passed in a floating-point register, if available. */
8731 static bool
8732 s390_function_arg_float (enum machine_mode mode, const_tree type)
8734 int size = s390_function_arg_size (mode, type);
8735 if (size > 8)
8736 return false;
8738 /* Soft-float changes the ABI: no floating-point registers are used. */
8739 if (TARGET_SOFT_FLOAT)
8740 return false;
8742 /* No type info available for some library calls ... */
8743 if (!type)
8744 return mode == SFmode || mode == DFmode || mode == SDmode || mode == DDmode;
8746 /* The ABI says that record types with a single member are treated
8747 just like that member would be. */
8748 while (TREE_CODE (type) == RECORD_TYPE)
8750 tree field, single = NULL_TREE;
8752 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
8754 if (TREE_CODE (field) != FIELD_DECL)
8755 continue;
8757 if (single == NULL_TREE)
8758 single = TREE_TYPE (field);
8759 else
8760 return false;
8763 if (single == NULL_TREE)
8764 return false;
8765 else
8766 type = single;
8769 return TREE_CODE (type) == REAL_TYPE;
8772 /* Return true if a function argument of type TYPE and mode MODE
8773 is to be passed in an integer register, or a pair of integer
8774 registers, if available. */
8776 static bool
8777 s390_function_arg_integer (enum machine_mode mode, const_tree type)
8779 int size = s390_function_arg_size (mode, type);
8780 if (size > 8)
8781 return false;
8783 /* No type info available for some library calls ... */
8784 if (!type)
8785 return GET_MODE_CLASS (mode) == MODE_INT
8786 || (TARGET_SOFT_FLOAT && SCALAR_FLOAT_MODE_P (mode));
8788 /* We accept small integral (and similar) types. */
8789 if (INTEGRAL_TYPE_P (type)
8790 || POINTER_TYPE_P (type)
8791 || TREE_CODE (type) == NULLPTR_TYPE
8792 || TREE_CODE (type) == OFFSET_TYPE
8793 || (TARGET_SOFT_FLOAT && TREE_CODE (type) == REAL_TYPE))
8794 return true;
8796 /* We also accept structs of size 1, 2, 4, 8 that are not
8797 passed in floating-point registers. */
8798 if (AGGREGATE_TYPE_P (type)
8799 && exact_log2 (size) >= 0
8800 && !s390_function_arg_float (mode, type))
8801 return true;
8803 return false;
8806 /* Return 1 if a function argument of type TYPE and mode MODE
8807 is to be passed by reference. The ABI specifies that only
8808 structures of size 1, 2, 4, or 8 bytes are passed by value,
8809 all other structures (and complex numbers) are passed by
8810 reference. */
8812 static bool
8813 s390_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
8814 enum machine_mode mode, const_tree type,
8815 bool named ATTRIBUTE_UNUSED)
8817 int size = s390_function_arg_size (mode, type);
8818 if (size > 8)
8819 return true;
8821 if (type)
8823 if (AGGREGATE_TYPE_P (type) && exact_log2 (size) < 0)
8824 return 1;
8826 if (TREE_CODE (type) == COMPLEX_TYPE
8827 || TREE_CODE (type) == VECTOR_TYPE)
8828 return 1;
8831 return 0;
8834 /* Update the data in CUM to advance over an argument of mode MODE and
8835 data type TYPE. (TYPE is null for libcalls where that information
8836 may not be available.). The boolean NAMED specifies whether the
8837 argument is a named argument (as opposed to an unnamed argument
8838 matching an ellipsis). */
8840 static void
8841 s390_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
8842 const_tree type, bool named ATTRIBUTE_UNUSED)
8844 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
8846 if (s390_function_arg_float (mode, type))
8848 cum->fprs += 1;
8850 else if (s390_function_arg_integer (mode, type))
8852 int size = s390_function_arg_size (mode, type);
8853 cum->gprs += ((size + UNITS_PER_LONG - 1) / UNITS_PER_LONG);
8855 else
8856 gcc_unreachable ();
8859 /* Define where to put the arguments to a function.
8860 Value is zero to push the argument on the stack,
8861 or a hard register in which to store the argument.
8863 MODE is the argument's machine mode.
8864 TYPE is the data type of the argument (as a tree).
8865 This is null for libcalls where that information may
8866 not be available.
8867 CUM is a variable of type CUMULATIVE_ARGS which gives info about
8868 the preceding args and about the function being called.
8869 NAMED is nonzero if this argument is a named parameter
8870 (otherwise it is an extra parameter matching an ellipsis).
8872 On S/390, we use general purpose registers 2 through 6 to
8873 pass integer, pointer, and certain structure arguments, and
8874 floating point registers 0 and 2 (0, 2, 4, and 6 on 64-bit)
8875 to pass floating point arguments. All remaining arguments
8876 are pushed to the stack. */
8878 static rtx
8879 s390_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
8880 const_tree type, bool named ATTRIBUTE_UNUSED)
8882 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
8884 if (s390_function_arg_float (mode, type))
8886 if (cum->fprs + 1 > FP_ARG_NUM_REG)
8887 return 0;
8888 else
8889 return gen_rtx_REG (mode, cum->fprs + 16);
8891 else if (s390_function_arg_integer (mode, type))
8893 int size = s390_function_arg_size (mode, type);
8894 int n_gprs = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
8896 if (cum->gprs + n_gprs > GP_ARG_NUM_REG)
8897 return 0;
8898 else if (n_gprs == 1 || UNITS_PER_WORD == UNITS_PER_LONG)
8899 return gen_rtx_REG (mode, cum->gprs + 2);
8900 else if (n_gprs == 2)
8902 rtvec p = rtvec_alloc (2);
8904 RTVEC_ELT (p, 0)
8905 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 2),
8906 const0_rtx);
8907 RTVEC_ELT (p, 1)
8908 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 3),
8909 GEN_INT (4));
8911 return gen_rtx_PARALLEL (mode, p);
8915 /* After the real arguments, expand_call calls us once again
8916 with a void_type_node type. Whatever we return here is
8917 passed as operand 2 to the call expanders.
8919 We don't need this feature ... */
8920 else if (type == void_type_node)
8921 return const0_rtx;
8923 gcc_unreachable ();
8926 /* Return true if return values of type TYPE should be returned
8927 in a memory buffer whose address is passed by the caller as
8928 hidden first argument. */
8930 static bool
8931 s390_return_in_memory (const_tree type, const_tree fundecl ATTRIBUTE_UNUSED)
8933 /* We accept small integral (and similar) types. */
8934 if (INTEGRAL_TYPE_P (type)
8935 || POINTER_TYPE_P (type)
8936 || TREE_CODE (type) == OFFSET_TYPE
8937 || TREE_CODE (type) == REAL_TYPE)
8938 return int_size_in_bytes (type) > 8;
8940 /* Aggregates and similar constructs are always returned
8941 in memory. */
8942 if (AGGREGATE_TYPE_P (type)
8943 || TREE_CODE (type) == COMPLEX_TYPE
8944 || TREE_CODE (type) == VECTOR_TYPE)
8945 return true;
8947 /* ??? We get called on all sorts of random stuff from
8948 aggregate_value_p. We can't abort, but it's not clear
8949 what's safe to return. Pretend it's a struct I guess. */
8950 return true;
8953 /* Function arguments and return values are promoted to word size. */
8955 static enum machine_mode
8956 s390_promote_function_mode (const_tree type, enum machine_mode mode,
8957 int *punsignedp,
8958 const_tree fntype ATTRIBUTE_UNUSED,
8959 int for_return ATTRIBUTE_UNUSED)
8961 if (INTEGRAL_MODE_P (mode)
8962 && GET_MODE_SIZE (mode) < UNITS_PER_LONG)
8964 if (type != NULL_TREE && POINTER_TYPE_P (type))
8965 *punsignedp = POINTERS_EXTEND_UNSIGNED;
8966 return Pmode;
8969 return mode;
8972 /* Define where to return a (scalar) value of type RET_TYPE.
8973 If RET_TYPE is null, define where to return a (scalar)
8974 value of mode MODE from a libcall. */
8976 static rtx
8977 s390_function_and_libcall_value (enum machine_mode mode,
8978 const_tree ret_type,
8979 const_tree fntype_or_decl,
8980 bool outgoing ATTRIBUTE_UNUSED)
8982 /* For normal functions perform the promotion as
8983 promote_function_mode would do. */
8984 if (ret_type)
8986 int unsignedp = TYPE_UNSIGNED (ret_type);
8987 mode = promote_function_mode (ret_type, mode, &unsignedp,
8988 fntype_or_decl, 1);
8991 gcc_assert (GET_MODE_CLASS (mode) == MODE_INT || SCALAR_FLOAT_MODE_P (mode));
8992 gcc_assert (GET_MODE_SIZE (mode) <= 8);
8994 if (TARGET_HARD_FLOAT && SCALAR_FLOAT_MODE_P (mode))
8995 return gen_rtx_REG (mode, 16);
8996 else if (GET_MODE_SIZE (mode) <= UNITS_PER_LONG
8997 || UNITS_PER_LONG == UNITS_PER_WORD)
8998 return gen_rtx_REG (mode, 2);
8999 else if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_LONG)
9001 /* This case is triggered when returning a 64 bit value with
9002 -m31 -mzarch. Although the value would fit into a single
9003 register it has to be forced into a 32 bit register pair in
9004 order to match the ABI. */
9005 rtvec p = rtvec_alloc (2);
9007 RTVEC_ELT (p, 0)
9008 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 2), const0_rtx);
9009 RTVEC_ELT (p, 1)
9010 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 3), GEN_INT (4));
9012 return gen_rtx_PARALLEL (mode, p);
9015 gcc_unreachable ();
9018 /* Define where to return a scalar return value of type RET_TYPE. */
9020 static rtx
9021 s390_function_value (const_tree ret_type, const_tree fn_decl_or_type,
9022 bool outgoing)
9024 return s390_function_and_libcall_value (TYPE_MODE (ret_type), ret_type,
9025 fn_decl_or_type, outgoing);
9028 /* Define where to return a scalar libcall return value of mode
9029 MODE. */
9031 static rtx
9032 s390_libcall_value (enum machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
9034 return s390_function_and_libcall_value (mode, NULL_TREE,
9035 NULL_TREE, true);
9039 /* Create and return the va_list datatype.
9041 On S/390, va_list is an array type equivalent to
9043 typedef struct __va_list_tag
9045 long __gpr;
9046 long __fpr;
9047 void *__overflow_arg_area;
9048 void *__reg_save_area;
9049 } va_list[1];
9051 where __gpr and __fpr hold the number of general purpose
9052 or floating point arguments used up to now, respectively,
9053 __overflow_arg_area points to the stack location of the
9054 next argument passed on the stack, and __reg_save_area
9055 always points to the start of the register area in the
9056 call frame of the current function. The function prologue
9057 saves all registers used for argument passing into this
9058 area if the function uses variable arguments. */
9060 static tree
9061 s390_build_builtin_va_list (void)
9063 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
9065 record = lang_hooks.types.make_type (RECORD_TYPE);
9067 type_decl =
9068 build_decl (BUILTINS_LOCATION,
9069 TYPE_DECL, get_identifier ("__va_list_tag"), record);
9071 f_gpr = build_decl (BUILTINS_LOCATION,
9072 FIELD_DECL, get_identifier ("__gpr"),
9073 long_integer_type_node);
9074 f_fpr = build_decl (BUILTINS_LOCATION,
9075 FIELD_DECL, get_identifier ("__fpr"),
9076 long_integer_type_node);
9077 f_ovf = build_decl (BUILTINS_LOCATION,
9078 FIELD_DECL, get_identifier ("__overflow_arg_area"),
9079 ptr_type_node);
9080 f_sav = build_decl (BUILTINS_LOCATION,
9081 FIELD_DECL, get_identifier ("__reg_save_area"),
9082 ptr_type_node);
9084 va_list_gpr_counter_field = f_gpr;
9085 va_list_fpr_counter_field = f_fpr;
9087 DECL_FIELD_CONTEXT (f_gpr) = record;
9088 DECL_FIELD_CONTEXT (f_fpr) = record;
9089 DECL_FIELD_CONTEXT (f_ovf) = record;
9090 DECL_FIELD_CONTEXT (f_sav) = record;
9092 TYPE_STUB_DECL (record) = type_decl;
9093 TYPE_NAME (record) = type_decl;
9094 TYPE_FIELDS (record) = f_gpr;
9095 DECL_CHAIN (f_gpr) = f_fpr;
9096 DECL_CHAIN (f_fpr) = f_ovf;
9097 DECL_CHAIN (f_ovf) = f_sav;
9099 layout_type (record);
9101 /* The correct type is an array type of one element. */
9102 return build_array_type (record, build_index_type (size_zero_node));
9105 /* Implement va_start by filling the va_list structure VALIST.
9106 STDARG_P is always true, and ignored.
9107 NEXTARG points to the first anonymous stack argument.
9109 The following global variables are used to initialize
9110 the va_list structure:
9112 crtl->args.info:
9113 holds number of gprs and fprs used for named arguments.
9114 crtl->args.arg_offset_rtx:
9115 holds the offset of the first anonymous stack argument
9116 (relative to the virtual arg pointer). */
9118 static void
9119 s390_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
9121 HOST_WIDE_INT n_gpr, n_fpr;
9122 int off;
9123 tree f_gpr, f_fpr, f_ovf, f_sav;
9124 tree gpr, fpr, ovf, sav, t;
9126 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
9127 f_fpr = DECL_CHAIN (f_gpr);
9128 f_ovf = DECL_CHAIN (f_fpr);
9129 f_sav = DECL_CHAIN (f_ovf);
9131 valist = build_simple_mem_ref (valist);
9132 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
9133 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
9134 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
9135 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
9137 /* Count number of gp and fp argument registers used. */
9139 n_gpr = crtl->args.info.gprs;
9140 n_fpr = crtl->args.info.fprs;
9142 if (cfun->va_list_gpr_size)
9144 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
9145 build_int_cst (NULL_TREE, n_gpr));
9146 TREE_SIDE_EFFECTS (t) = 1;
9147 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9150 if (cfun->va_list_fpr_size)
9152 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
9153 build_int_cst (NULL_TREE, n_fpr));
9154 TREE_SIDE_EFFECTS (t) = 1;
9155 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9158 /* Find the overflow area. */
9159 if (n_gpr + cfun->va_list_gpr_size > GP_ARG_NUM_REG
9160 || n_fpr + cfun->va_list_fpr_size > FP_ARG_NUM_REG)
9162 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
9164 off = INTVAL (crtl->args.arg_offset_rtx);
9165 off = off < 0 ? 0 : off;
9166 if (TARGET_DEBUG_ARG)
9167 fprintf (stderr, "va_start: n_gpr = %d, n_fpr = %d off %d\n",
9168 (int)n_gpr, (int)n_fpr, off);
9170 t = fold_build_pointer_plus_hwi (t, off);
9172 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
9173 TREE_SIDE_EFFECTS (t) = 1;
9174 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9177 /* Find the register save area. */
9178 if ((cfun->va_list_gpr_size && n_gpr < GP_ARG_NUM_REG)
9179 || (cfun->va_list_fpr_size && n_fpr < FP_ARG_NUM_REG))
9181 t = make_tree (TREE_TYPE (sav), return_address_pointer_rtx);
9182 t = fold_build_pointer_plus_hwi (t, -RETURN_REGNUM * UNITS_PER_LONG);
9184 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
9185 TREE_SIDE_EFFECTS (t) = 1;
9186 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9190 /* Implement va_arg by updating the va_list structure
9191 VALIST as required to retrieve an argument of type
9192 TYPE, and returning that argument.
9194 Generates code equivalent to:
9196 if (integral value) {
9197 if (size <= 4 && args.gpr < 5 ||
9198 size > 4 && args.gpr < 4 )
9199 ret = args.reg_save_area[args.gpr+8]
9200 else
9201 ret = *args.overflow_arg_area++;
9202 } else if (float value) {
9203 if (args.fgpr < 2)
9204 ret = args.reg_save_area[args.fpr+64]
9205 else
9206 ret = *args.overflow_arg_area++;
9207 } else if (aggregate value) {
9208 if (args.gpr < 5)
9209 ret = *args.reg_save_area[args.gpr]
9210 else
9211 ret = **args.overflow_arg_area++;
9212 } */
9214 static tree
9215 s390_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
9216 gimple_seq *post_p ATTRIBUTE_UNUSED)
9218 tree f_gpr, f_fpr, f_ovf, f_sav;
9219 tree gpr, fpr, ovf, sav, reg, t, u;
9220 int indirect_p, size, n_reg, sav_ofs, sav_scale, max_reg;
9221 tree lab_false, lab_over, addr;
9223 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
9224 f_fpr = DECL_CHAIN (f_gpr);
9225 f_ovf = DECL_CHAIN (f_fpr);
9226 f_sav = DECL_CHAIN (f_ovf);
9228 valist = build_va_arg_indirect_ref (valist);
9229 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
9230 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
9231 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
9233 /* The tree for args* cannot be shared between gpr/fpr and ovf since
9234 both appear on a lhs. */
9235 valist = unshare_expr (valist);
9236 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
9238 size = int_size_in_bytes (type);
9240 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
9242 if (TARGET_DEBUG_ARG)
9244 fprintf (stderr, "va_arg: aggregate type");
9245 debug_tree (type);
9248 /* Aggregates are passed by reference. */
9249 indirect_p = 1;
9250 reg = gpr;
9251 n_reg = 1;
9253 /* kernel stack layout on 31 bit: It is assumed here that no padding
9254 will be added by s390_frame_info because for va_args always an even
9255 number of gprs has to be saved r15-r2 = 14 regs. */
9256 sav_ofs = 2 * UNITS_PER_LONG;
9257 sav_scale = UNITS_PER_LONG;
9258 size = UNITS_PER_LONG;
9259 max_reg = GP_ARG_NUM_REG - n_reg;
9261 else if (s390_function_arg_float (TYPE_MODE (type), type))
9263 if (TARGET_DEBUG_ARG)
9265 fprintf (stderr, "va_arg: float type");
9266 debug_tree (type);
9269 /* FP args go in FP registers, if present. */
9270 indirect_p = 0;
9271 reg = fpr;
9272 n_reg = 1;
9273 sav_ofs = 16 * UNITS_PER_LONG;
9274 sav_scale = 8;
9275 max_reg = FP_ARG_NUM_REG - n_reg;
9277 else
9279 if (TARGET_DEBUG_ARG)
9281 fprintf (stderr, "va_arg: other type");
9282 debug_tree (type);
9285 /* Otherwise into GP registers. */
9286 indirect_p = 0;
9287 reg = gpr;
9288 n_reg = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
9290 /* kernel stack layout on 31 bit: It is assumed here that no padding
9291 will be added by s390_frame_info because for va_args always an even
9292 number of gprs has to be saved r15-r2 = 14 regs. */
9293 sav_ofs = 2 * UNITS_PER_LONG;
9295 if (size < UNITS_PER_LONG)
9296 sav_ofs += UNITS_PER_LONG - size;
9298 sav_scale = UNITS_PER_LONG;
9299 max_reg = GP_ARG_NUM_REG - n_reg;
9302 /* Pull the value out of the saved registers ... */
9304 lab_false = create_artificial_label (UNKNOWN_LOCATION);
9305 lab_over = create_artificial_label (UNKNOWN_LOCATION);
9306 addr = create_tmp_var (ptr_type_node, "addr");
9308 t = fold_convert (TREE_TYPE (reg), size_int (max_reg));
9309 t = build2 (GT_EXPR, boolean_type_node, reg, t);
9310 u = build1 (GOTO_EXPR, void_type_node, lab_false);
9311 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
9312 gimplify_and_add (t, pre_p);
9314 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
9315 u = build2 (MULT_EXPR, TREE_TYPE (reg), reg,
9316 fold_convert (TREE_TYPE (reg), size_int (sav_scale)));
9317 t = fold_build_pointer_plus (t, u);
9319 gimplify_assign (addr, t, pre_p);
9321 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
9323 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
9326 /* ... Otherwise out of the overflow area. */
9328 t = ovf;
9329 if (size < UNITS_PER_LONG)
9330 t = fold_build_pointer_plus_hwi (t, UNITS_PER_LONG - size);
9332 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
9334 gimplify_assign (addr, t, pre_p);
9336 t = fold_build_pointer_plus_hwi (t, size);
9337 gimplify_assign (ovf, t, pre_p);
9339 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
9342 /* Increment register save count. */
9344 u = build2 (PREINCREMENT_EXPR, TREE_TYPE (reg), reg,
9345 fold_convert (TREE_TYPE (reg), size_int (n_reg)));
9346 gimplify_and_add (u, pre_p);
9348 if (indirect_p)
9350 t = build_pointer_type_for_mode (build_pointer_type (type),
9351 ptr_mode, true);
9352 addr = fold_convert (t, addr);
9353 addr = build_va_arg_indirect_ref (addr);
9355 else
9357 t = build_pointer_type_for_mode (type, ptr_mode, true);
9358 addr = fold_convert (t, addr);
9361 return build_va_arg_indirect_ref (addr);
9364 /* Output assembly code for the trampoline template to
9365 stdio stream FILE.
9367 On S/390, we use gpr 1 internally in the trampoline code;
9368 gpr 0 is used to hold the static chain. */
9370 static void
9371 s390_asm_trampoline_template (FILE *file)
9373 rtx op[2];
9374 op[0] = gen_rtx_REG (Pmode, 0);
9375 op[1] = gen_rtx_REG (Pmode, 1);
9377 if (TARGET_64BIT)
9379 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
9380 output_asm_insn ("lmg\t%0,%1,14(%1)", op); /* 6 byte */
9381 output_asm_insn ("br\t%1", op); /* 2 byte */
9382 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 10));
9384 else
9386 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
9387 output_asm_insn ("lm\t%0,%1,6(%1)", op); /* 4 byte */
9388 output_asm_insn ("br\t%1", op); /* 2 byte */
9389 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 8));
9393 /* Emit RTL insns to initialize the variable parts of a trampoline.
9394 FNADDR is an RTX for the address of the function's pure code.
9395 CXT is an RTX for the static chain value for the function. */
9397 static void
9398 s390_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
9400 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
9401 rtx mem;
9403 emit_block_move (m_tramp, assemble_trampoline_template (),
9404 GEN_INT (2 * UNITS_PER_LONG), BLOCK_OP_NORMAL);
9406 mem = adjust_address (m_tramp, Pmode, 2 * UNITS_PER_LONG);
9407 emit_move_insn (mem, cxt);
9408 mem = adjust_address (m_tramp, Pmode, 3 * UNITS_PER_LONG);
9409 emit_move_insn (mem, fnaddr);
9412 /* Output assembler code to FILE to increment profiler label # LABELNO
9413 for profiling a function entry. */
9415 void
9416 s390_function_profiler (FILE *file, int labelno)
9418 rtx op[7];
9420 char label[128];
9421 ASM_GENERATE_INTERNAL_LABEL (label, "LP", labelno);
9423 fprintf (file, "# function profiler \n");
9425 op[0] = gen_rtx_REG (Pmode, RETURN_REGNUM);
9426 op[1] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
9427 op[1] = gen_rtx_MEM (Pmode, plus_constant (Pmode, op[1], UNITS_PER_LONG));
9429 op[2] = gen_rtx_REG (Pmode, 1);
9430 op[3] = gen_rtx_SYMBOL_REF (Pmode, label);
9431 SYMBOL_REF_FLAGS (op[3]) = SYMBOL_FLAG_LOCAL;
9433 op[4] = gen_rtx_SYMBOL_REF (Pmode, "_mcount");
9434 if (flag_pic)
9436 op[4] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[4]), UNSPEC_PLT);
9437 op[4] = gen_rtx_CONST (Pmode, op[4]);
9440 if (TARGET_64BIT)
9442 output_asm_insn ("stg\t%0,%1", op);
9443 output_asm_insn ("larl\t%2,%3", op);
9444 output_asm_insn ("brasl\t%0,%4", op);
9445 output_asm_insn ("lg\t%0,%1", op);
9447 else if (!flag_pic)
9449 op[6] = gen_label_rtx ();
9451 output_asm_insn ("st\t%0,%1", op);
9452 output_asm_insn ("bras\t%2,%l6", op);
9453 output_asm_insn (".long\t%4", op);
9454 output_asm_insn (".long\t%3", op);
9455 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
9456 output_asm_insn ("l\t%0,0(%2)", op);
9457 output_asm_insn ("l\t%2,4(%2)", op);
9458 output_asm_insn ("basr\t%0,%0", op);
9459 output_asm_insn ("l\t%0,%1", op);
9461 else
9463 op[5] = gen_label_rtx ();
9464 op[6] = gen_label_rtx ();
9466 output_asm_insn ("st\t%0,%1", op);
9467 output_asm_insn ("bras\t%2,%l6", op);
9468 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[5]));
9469 output_asm_insn (".long\t%4-%l5", op);
9470 output_asm_insn (".long\t%3-%l5", op);
9471 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
9472 output_asm_insn ("lr\t%0,%2", op);
9473 output_asm_insn ("a\t%0,0(%2)", op);
9474 output_asm_insn ("a\t%2,4(%2)", op);
9475 output_asm_insn ("basr\t%0,%0", op);
9476 output_asm_insn ("l\t%0,%1", op);
9480 /* Encode symbol attributes (local vs. global, tls model) of a SYMBOL_REF
9481 into its SYMBOL_REF_FLAGS. */
9483 static void
9484 s390_encode_section_info (tree decl, rtx rtl, int first)
9486 default_encode_section_info (decl, rtl, first);
9488 if (TREE_CODE (decl) == VAR_DECL)
9490 /* If a variable has a forced alignment to < 2 bytes, mark it
9491 with SYMBOL_FLAG_ALIGN1 to prevent it from being used as LARL
9492 operand. */
9493 if (DECL_USER_ALIGN (decl) && DECL_ALIGN (decl) < 16)
9494 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_ALIGN1;
9495 if (!DECL_SIZE (decl)
9496 || !DECL_ALIGN (decl)
9497 || !host_integerp (DECL_SIZE (decl), 0)
9498 || (DECL_ALIGN (decl) <= 64
9499 && DECL_ALIGN (decl) != tree_low_cst (DECL_SIZE (decl), 0)))
9500 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
9503 /* Literal pool references don't have a decl so they are handled
9504 differently here. We rely on the information in the MEM_ALIGN
9505 entry to decide upon natural alignment. */
9506 if (MEM_P (rtl)
9507 && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF
9508 && TREE_CONSTANT_POOL_ADDRESS_P (XEXP (rtl, 0))
9509 && (MEM_ALIGN (rtl) == 0
9510 || GET_MODE_BITSIZE (GET_MODE (rtl)) == 0
9511 || MEM_ALIGN (rtl) < GET_MODE_BITSIZE (GET_MODE (rtl))))
9512 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
9515 /* Output thunk to FILE that implements a C++ virtual function call (with
9516 multiple inheritance) to FUNCTION. The thunk adjusts the this pointer
9517 by DELTA, and unless VCALL_OFFSET is zero, applies an additional adjustment
9518 stored at VCALL_OFFSET in the vtable whose address is located at offset 0
9519 relative to the resulting this pointer. */
9521 static void
9522 s390_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
9523 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
9524 tree function)
9526 rtx op[10];
9527 int nonlocal = 0;
9529 /* Make sure unwind info is emitted for the thunk if needed. */
9530 final_start_function (emit_barrier (), file, 1);
9532 /* Operand 0 is the target function. */
9533 op[0] = XEXP (DECL_RTL (function), 0);
9534 if (flag_pic && !SYMBOL_REF_LOCAL_P (op[0]))
9536 nonlocal = 1;
9537 op[0] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[0]),
9538 TARGET_64BIT ? UNSPEC_PLT : UNSPEC_GOT);
9539 op[0] = gen_rtx_CONST (Pmode, op[0]);
9542 /* Operand 1 is the 'this' pointer. */
9543 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
9544 op[1] = gen_rtx_REG (Pmode, 3);
9545 else
9546 op[1] = gen_rtx_REG (Pmode, 2);
9548 /* Operand 2 is the delta. */
9549 op[2] = GEN_INT (delta);
9551 /* Operand 3 is the vcall_offset. */
9552 op[3] = GEN_INT (vcall_offset);
9554 /* Operand 4 is the temporary register. */
9555 op[4] = gen_rtx_REG (Pmode, 1);
9557 /* Operands 5 to 8 can be used as labels. */
9558 op[5] = NULL_RTX;
9559 op[6] = NULL_RTX;
9560 op[7] = NULL_RTX;
9561 op[8] = NULL_RTX;
9563 /* Operand 9 can be used for temporary register. */
9564 op[9] = NULL_RTX;
9566 /* Generate code. */
9567 if (TARGET_64BIT)
9569 /* Setup literal pool pointer if required. */
9570 if ((!DISP_IN_RANGE (delta)
9571 && !CONST_OK_FOR_K (delta)
9572 && !CONST_OK_FOR_Os (delta))
9573 || (!DISP_IN_RANGE (vcall_offset)
9574 && !CONST_OK_FOR_K (vcall_offset)
9575 && !CONST_OK_FOR_Os (vcall_offset)))
9577 op[5] = gen_label_rtx ();
9578 output_asm_insn ("larl\t%4,%5", op);
9581 /* Add DELTA to this pointer. */
9582 if (delta)
9584 if (CONST_OK_FOR_J (delta))
9585 output_asm_insn ("la\t%1,%2(%1)", op);
9586 else if (DISP_IN_RANGE (delta))
9587 output_asm_insn ("lay\t%1,%2(%1)", op);
9588 else if (CONST_OK_FOR_K (delta))
9589 output_asm_insn ("aghi\t%1,%2", op);
9590 else if (CONST_OK_FOR_Os (delta))
9591 output_asm_insn ("agfi\t%1,%2", op);
9592 else
9594 op[6] = gen_label_rtx ();
9595 output_asm_insn ("agf\t%1,%6-%5(%4)", op);
9599 /* Perform vcall adjustment. */
9600 if (vcall_offset)
9602 if (DISP_IN_RANGE (vcall_offset))
9604 output_asm_insn ("lg\t%4,0(%1)", op);
9605 output_asm_insn ("ag\t%1,%3(%4)", op);
9607 else if (CONST_OK_FOR_K (vcall_offset))
9609 output_asm_insn ("lghi\t%4,%3", op);
9610 output_asm_insn ("ag\t%4,0(%1)", op);
9611 output_asm_insn ("ag\t%1,0(%4)", op);
9613 else if (CONST_OK_FOR_Os (vcall_offset))
9615 output_asm_insn ("lgfi\t%4,%3", op);
9616 output_asm_insn ("ag\t%4,0(%1)", op);
9617 output_asm_insn ("ag\t%1,0(%4)", op);
9619 else
9621 op[7] = gen_label_rtx ();
9622 output_asm_insn ("llgf\t%4,%7-%5(%4)", op);
9623 output_asm_insn ("ag\t%4,0(%1)", op);
9624 output_asm_insn ("ag\t%1,0(%4)", op);
9628 /* Jump to target. */
9629 output_asm_insn ("jg\t%0", op);
9631 /* Output literal pool if required. */
9632 if (op[5])
9634 output_asm_insn (".align\t4", op);
9635 targetm.asm_out.internal_label (file, "L",
9636 CODE_LABEL_NUMBER (op[5]));
9638 if (op[6])
9640 targetm.asm_out.internal_label (file, "L",
9641 CODE_LABEL_NUMBER (op[6]));
9642 output_asm_insn (".long\t%2", op);
9644 if (op[7])
9646 targetm.asm_out.internal_label (file, "L",
9647 CODE_LABEL_NUMBER (op[7]));
9648 output_asm_insn (".long\t%3", op);
9651 else
9653 /* Setup base pointer if required. */
9654 if (!vcall_offset
9655 || (!DISP_IN_RANGE (delta)
9656 && !CONST_OK_FOR_K (delta)
9657 && !CONST_OK_FOR_Os (delta))
9658 || (!DISP_IN_RANGE (delta)
9659 && !CONST_OK_FOR_K (vcall_offset)
9660 && !CONST_OK_FOR_Os (vcall_offset)))
9662 op[5] = gen_label_rtx ();
9663 output_asm_insn ("basr\t%4,0", op);
9664 targetm.asm_out.internal_label (file, "L",
9665 CODE_LABEL_NUMBER (op[5]));
9668 /* Add DELTA to this pointer. */
9669 if (delta)
9671 if (CONST_OK_FOR_J (delta))
9672 output_asm_insn ("la\t%1,%2(%1)", op);
9673 else if (DISP_IN_RANGE (delta))
9674 output_asm_insn ("lay\t%1,%2(%1)", op);
9675 else if (CONST_OK_FOR_K (delta))
9676 output_asm_insn ("ahi\t%1,%2", op);
9677 else if (CONST_OK_FOR_Os (delta))
9678 output_asm_insn ("afi\t%1,%2", op);
9679 else
9681 op[6] = gen_label_rtx ();
9682 output_asm_insn ("a\t%1,%6-%5(%4)", op);
9686 /* Perform vcall adjustment. */
9687 if (vcall_offset)
9689 if (CONST_OK_FOR_J (vcall_offset))
9691 output_asm_insn ("l\t%4,0(%1)", op);
9692 output_asm_insn ("a\t%1,%3(%4)", op);
9694 else if (DISP_IN_RANGE (vcall_offset))
9696 output_asm_insn ("l\t%4,0(%1)", op);
9697 output_asm_insn ("ay\t%1,%3(%4)", op);
9699 else if (CONST_OK_FOR_K (vcall_offset))
9701 output_asm_insn ("lhi\t%4,%3", op);
9702 output_asm_insn ("a\t%4,0(%1)", op);
9703 output_asm_insn ("a\t%1,0(%4)", op);
9705 else if (CONST_OK_FOR_Os (vcall_offset))
9707 output_asm_insn ("iilf\t%4,%3", op);
9708 output_asm_insn ("a\t%4,0(%1)", op);
9709 output_asm_insn ("a\t%1,0(%4)", op);
9711 else
9713 op[7] = gen_label_rtx ();
9714 output_asm_insn ("l\t%4,%7-%5(%4)", op);
9715 output_asm_insn ("a\t%4,0(%1)", op);
9716 output_asm_insn ("a\t%1,0(%4)", op);
9719 /* We had to clobber the base pointer register.
9720 Re-setup the base pointer (with a different base). */
9721 op[5] = gen_label_rtx ();
9722 output_asm_insn ("basr\t%4,0", op);
9723 targetm.asm_out.internal_label (file, "L",
9724 CODE_LABEL_NUMBER (op[5]));
9727 /* Jump to target. */
9728 op[8] = gen_label_rtx ();
9730 if (!flag_pic)
9731 output_asm_insn ("l\t%4,%8-%5(%4)", op);
9732 else if (!nonlocal)
9733 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9734 /* We cannot call through .plt, since .plt requires %r12 loaded. */
9735 else if (flag_pic == 1)
9737 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9738 output_asm_insn ("l\t%4,%0(%4)", op);
9740 else if (flag_pic == 2)
9742 op[9] = gen_rtx_REG (Pmode, 0);
9743 output_asm_insn ("l\t%9,%8-4-%5(%4)", op);
9744 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9745 output_asm_insn ("ar\t%4,%9", op);
9746 output_asm_insn ("l\t%4,0(%4)", op);
9749 output_asm_insn ("br\t%4", op);
9751 /* Output literal pool. */
9752 output_asm_insn (".align\t4", op);
9754 if (nonlocal && flag_pic == 2)
9755 output_asm_insn (".long\t%0", op);
9756 if (nonlocal)
9758 op[0] = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
9759 SYMBOL_REF_FLAGS (op[0]) = SYMBOL_FLAG_LOCAL;
9762 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[8]));
9763 if (!flag_pic)
9764 output_asm_insn (".long\t%0", op);
9765 else
9766 output_asm_insn (".long\t%0-%5", op);
9768 if (op[6])
9770 targetm.asm_out.internal_label (file, "L",
9771 CODE_LABEL_NUMBER (op[6]));
9772 output_asm_insn (".long\t%2", op);
9774 if (op[7])
9776 targetm.asm_out.internal_label (file, "L",
9777 CODE_LABEL_NUMBER (op[7]));
9778 output_asm_insn (".long\t%3", op);
9781 final_end_function ();
9784 static bool
9785 s390_valid_pointer_mode (enum machine_mode mode)
9787 return (mode == SImode || (TARGET_64BIT && mode == DImode));
9790 /* Checks whether the given CALL_EXPR would use a caller
9791 saved register. This is used to decide whether sibling call
9792 optimization could be performed on the respective function
9793 call. */
9795 static bool
9796 s390_call_saved_register_used (tree call_expr)
9798 CUMULATIVE_ARGS cum_v;
9799 cumulative_args_t cum;
9800 tree parameter;
9801 enum machine_mode mode;
9802 tree type;
9803 rtx parm_rtx;
9804 int reg, i;
9806 INIT_CUMULATIVE_ARGS (cum_v, NULL, NULL, 0, 0);
9807 cum = pack_cumulative_args (&cum_v);
9809 for (i = 0; i < call_expr_nargs (call_expr); i++)
9811 parameter = CALL_EXPR_ARG (call_expr, i);
9812 gcc_assert (parameter);
9814 /* For an undeclared variable passed as parameter we will get
9815 an ERROR_MARK node here. */
9816 if (TREE_CODE (parameter) == ERROR_MARK)
9817 return true;
9819 type = TREE_TYPE (parameter);
9820 gcc_assert (type);
9822 mode = TYPE_MODE (type);
9823 gcc_assert (mode);
9825 if (pass_by_reference (&cum_v, mode, type, true))
9827 mode = Pmode;
9828 type = build_pointer_type (type);
9831 parm_rtx = s390_function_arg (cum, mode, type, 0);
9833 s390_function_arg_advance (cum, mode, type, 0);
9835 if (!parm_rtx)
9836 continue;
9838 if (REG_P (parm_rtx))
9840 for (reg = 0;
9841 reg < HARD_REGNO_NREGS (REGNO (parm_rtx), GET_MODE (parm_rtx));
9842 reg++)
9843 if (!call_used_regs[reg + REGNO (parm_rtx)])
9844 return true;
9847 if (GET_CODE (parm_rtx) == PARALLEL)
9849 int i;
9851 for (i = 0; i < XVECLEN (parm_rtx, 0); i++)
9853 rtx r = XEXP (XVECEXP (parm_rtx, 0, i), 0);
9855 gcc_assert (REG_P (r));
9857 for (reg = 0;
9858 reg < HARD_REGNO_NREGS (REGNO (r), GET_MODE (r));
9859 reg++)
9860 if (!call_used_regs[reg + REGNO (r)])
9861 return true;
9866 return false;
9869 /* Return true if the given call expression can be
9870 turned into a sibling call.
9871 DECL holds the declaration of the function to be called whereas
9872 EXP is the call expression itself. */
9874 static bool
9875 s390_function_ok_for_sibcall (tree decl, tree exp)
9877 /* The TPF epilogue uses register 1. */
9878 if (TARGET_TPF_PROFILING)
9879 return false;
9881 /* The 31 bit PLT code uses register 12 (GOT pointer - caller saved)
9882 which would have to be restored before the sibcall. */
9883 if (!TARGET_64BIT && flag_pic && decl && !targetm.binds_local_p (decl))
9884 return false;
9886 /* Register 6 on s390 is available as an argument register but unfortunately
9887 "caller saved". This makes functions needing this register for arguments
9888 not suitable for sibcalls. */
9889 return !s390_call_saved_register_used (exp);
9892 /* Return the fixed registers used for condition codes. */
9894 static bool
9895 s390_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
9897 *p1 = CC_REGNUM;
9898 *p2 = INVALID_REGNUM;
9900 return true;
9903 /* This function is used by the call expanders of the machine description.
9904 It emits the call insn itself together with the necessary operations
9905 to adjust the target address and returns the emitted insn.
9906 ADDR_LOCATION is the target address rtx
9907 TLS_CALL the location of the thread-local symbol
9908 RESULT_REG the register where the result of the call should be stored
9909 RETADDR_REG the register where the return address should be stored
9910 If this parameter is NULL_RTX the call is considered
9911 to be a sibling call. */
9914 s390_emit_call (rtx addr_location, rtx tls_call, rtx result_reg,
9915 rtx retaddr_reg)
9917 bool plt_call = false;
9918 rtx insn;
9919 rtx call;
9920 rtx clobber;
9921 rtvec vec;
9923 /* Direct function calls need special treatment. */
9924 if (GET_CODE (addr_location) == SYMBOL_REF)
9926 /* When calling a global routine in PIC mode, we must
9927 replace the symbol itself with the PLT stub. */
9928 if (flag_pic && !SYMBOL_REF_LOCAL_P (addr_location))
9930 if (retaddr_reg != NULL_RTX)
9932 addr_location = gen_rtx_UNSPEC (Pmode,
9933 gen_rtvec (1, addr_location),
9934 UNSPEC_PLT);
9935 addr_location = gen_rtx_CONST (Pmode, addr_location);
9936 plt_call = true;
9938 else
9939 /* For -fpic code the PLT entries might use r12 which is
9940 call-saved. Therefore we cannot do a sibcall when
9941 calling directly using a symbol ref. When reaching
9942 this point we decided (in s390_function_ok_for_sibcall)
9943 to do a sibcall for a function pointer but one of the
9944 optimizers was able to get rid of the function pointer
9945 by propagating the symbol ref into the call. This
9946 optimization is illegal for S/390 so we turn the direct
9947 call into a indirect call again. */
9948 addr_location = force_reg (Pmode, addr_location);
9951 /* Unless we can use the bras(l) insn, force the
9952 routine address into a register. */
9953 if (!TARGET_SMALL_EXEC && !TARGET_CPU_ZARCH)
9955 if (flag_pic)
9956 addr_location = legitimize_pic_address (addr_location, 0);
9957 else
9958 addr_location = force_reg (Pmode, addr_location);
9962 /* If it is already an indirect call or the code above moved the
9963 SYMBOL_REF to somewhere else make sure the address can be found in
9964 register 1. */
9965 if (retaddr_reg == NULL_RTX
9966 && GET_CODE (addr_location) != SYMBOL_REF
9967 && !plt_call)
9969 emit_move_insn (gen_rtx_REG (Pmode, SIBCALL_REGNUM), addr_location);
9970 addr_location = gen_rtx_REG (Pmode, SIBCALL_REGNUM);
9973 addr_location = gen_rtx_MEM (QImode, addr_location);
9974 call = gen_rtx_CALL (VOIDmode, addr_location, const0_rtx);
9976 if (result_reg != NULL_RTX)
9977 call = gen_rtx_SET (VOIDmode, result_reg, call);
9979 if (retaddr_reg != NULL_RTX)
9981 clobber = gen_rtx_CLOBBER (VOIDmode, retaddr_reg);
9983 if (tls_call != NULL_RTX)
9984 vec = gen_rtvec (3, call, clobber,
9985 gen_rtx_USE (VOIDmode, tls_call));
9986 else
9987 vec = gen_rtvec (2, call, clobber);
9989 call = gen_rtx_PARALLEL (VOIDmode, vec);
9992 insn = emit_call_insn (call);
9994 /* 31-bit PLT stubs and tls calls use the GOT register implicitly. */
9995 if ((!TARGET_64BIT && plt_call) || tls_call != NULL_RTX)
9997 /* s390_function_ok_for_sibcall should
9998 have denied sibcalls in this case. */
9999 gcc_assert (retaddr_reg != NULL_RTX);
10000 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, 12));
10002 return insn;
10005 /* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
10007 static void
10008 s390_conditional_register_usage (void)
10010 int i;
10012 if (flag_pic)
10014 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
10015 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
10017 if (TARGET_CPU_ZARCH)
10019 fixed_regs[BASE_REGNUM] = 0;
10020 call_used_regs[BASE_REGNUM] = 0;
10021 fixed_regs[RETURN_REGNUM] = 0;
10022 call_used_regs[RETURN_REGNUM] = 0;
10024 if (TARGET_64BIT)
10026 for (i = 24; i < 32; i++)
10027 call_used_regs[i] = call_really_used_regs[i] = 0;
10029 else
10031 for (i = 18; i < 20; i++)
10032 call_used_regs[i] = call_really_used_regs[i] = 0;
10035 if (TARGET_SOFT_FLOAT)
10037 for (i = 16; i < 32; i++)
10038 call_used_regs[i] = fixed_regs[i] = 1;
10042 /* Corresponding function to eh_return expander. */
10044 static GTY(()) rtx s390_tpf_eh_return_symbol;
10045 void
10046 s390_emit_tpf_eh_return (rtx target)
10048 rtx insn, reg;
10050 if (!s390_tpf_eh_return_symbol)
10051 s390_tpf_eh_return_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tpf_eh_return");
10053 reg = gen_rtx_REG (Pmode, 2);
10055 emit_move_insn (reg, target);
10056 insn = s390_emit_call (s390_tpf_eh_return_symbol, NULL_RTX, reg,
10057 gen_rtx_REG (Pmode, RETURN_REGNUM));
10058 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), reg);
10060 emit_move_insn (EH_RETURN_HANDLER_RTX, reg);
10063 /* Rework the prologue/epilogue to avoid saving/restoring
10064 registers unnecessarily. */
10066 static void
10067 s390_optimize_prologue (void)
10069 rtx insn, new_insn, next_insn;
10071 /* Do a final recompute of the frame-related data. */
10073 s390_update_frame_layout ();
10075 /* If all special registers are in fact used, there's nothing we
10076 can do, so no point in walking the insn list. */
10078 if (cfun_frame_layout.first_save_gpr <= BASE_REGNUM
10079 && cfun_frame_layout.last_save_gpr >= BASE_REGNUM
10080 && (TARGET_CPU_ZARCH
10081 || (cfun_frame_layout.first_save_gpr <= RETURN_REGNUM
10082 && cfun_frame_layout.last_save_gpr >= RETURN_REGNUM)))
10083 return;
10085 /* Search for prologue/epilogue insns and replace them. */
10087 for (insn = get_insns (); insn; insn = next_insn)
10089 int first, last, off;
10090 rtx set, base, offset;
10092 next_insn = NEXT_INSN (insn);
10094 if (! NONJUMP_INSN_P (insn))
10095 continue;
10097 if (GET_CODE (PATTERN (insn)) == PARALLEL
10098 && store_multiple_operation (PATTERN (insn), VOIDmode))
10100 set = XVECEXP (PATTERN (insn), 0, 0);
10101 first = REGNO (SET_SRC (set));
10102 last = first + XVECLEN (PATTERN (insn), 0) - 1;
10103 offset = const0_rtx;
10104 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
10105 off = INTVAL (offset);
10107 if (GET_CODE (base) != REG || off < 0)
10108 continue;
10109 if (cfun_frame_layout.first_save_gpr != -1
10110 && (cfun_frame_layout.first_save_gpr < first
10111 || cfun_frame_layout.last_save_gpr > last))
10112 continue;
10113 if (REGNO (base) != STACK_POINTER_REGNUM
10114 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10115 continue;
10116 if (first > BASE_REGNUM || last < BASE_REGNUM)
10117 continue;
10119 if (cfun_frame_layout.first_save_gpr != -1)
10121 new_insn = save_gprs (base,
10122 off + (cfun_frame_layout.first_save_gpr
10123 - first) * UNITS_PER_LONG,
10124 cfun_frame_layout.first_save_gpr,
10125 cfun_frame_layout.last_save_gpr);
10126 new_insn = emit_insn_before (new_insn, insn);
10127 INSN_ADDRESSES_NEW (new_insn, -1);
10130 remove_insn (insn);
10131 continue;
10134 if (cfun_frame_layout.first_save_gpr == -1
10135 && GET_CODE (PATTERN (insn)) == SET
10136 && GET_CODE (SET_SRC (PATTERN (insn))) == REG
10137 && (REGNO (SET_SRC (PATTERN (insn))) == BASE_REGNUM
10138 || (!TARGET_CPU_ZARCH
10139 && REGNO (SET_SRC (PATTERN (insn))) == RETURN_REGNUM))
10140 && GET_CODE (SET_DEST (PATTERN (insn))) == MEM)
10142 set = PATTERN (insn);
10143 first = REGNO (SET_SRC (set));
10144 offset = const0_rtx;
10145 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
10146 off = INTVAL (offset);
10148 if (GET_CODE (base) != REG || off < 0)
10149 continue;
10150 if (REGNO (base) != STACK_POINTER_REGNUM
10151 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10152 continue;
10154 remove_insn (insn);
10155 continue;
10158 if (GET_CODE (PATTERN (insn)) == PARALLEL
10159 && load_multiple_operation (PATTERN (insn), VOIDmode))
10161 set = XVECEXP (PATTERN (insn), 0, 0);
10162 first = REGNO (SET_DEST (set));
10163 last = first + XVECLEN (PATTERN (insn), 0) - 1;
10164 offset = const0_rtx;
10165 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
10166 off = INTVAL (offset);
10168 if (GET_CODE (base) != REG || off < 0)
10169 continue;
10170 if (cfun_frame_layout.first_restore_gpr != -1
10171 && (cfun_frame_layout.first_restore_gpr < first
10172 || cfun_frame_layout.last_restore_gpr > last))
10173 continue;
10174 if (REGNO (base) != STACK_POINTER_REGNUM
10175 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10176 continue;
10177 if (first > BASE_REGNUM || last < BASE_REGNUM)
10178 continue;
10180 if (cfun_frame_layout.first_restore_gpr != -1)
10182 new_insn = restore_gprs (base,
10183 off + (cfun_frame_layout.first_restore_gpr
10184 - first) * UNITS_PER_LONG,
10185 cfun_frame_layout.first_restore_gpr,
10186 cfun_frame_layout.last_restore_gpr);
10187 new_insn = emit_insn_before (new_insn, insn);
10188 INSN_ADDRESSES_NEW (new_insn, -1);
10191 remove_insn (insn);
10192 continue;
10195 if (cfun_frame_layout.first_restore_gpr == -1
10196 && GET_CODE (PATTERN (insn)) == SET
10197 && GET_CODE (SET_DEST (PATTERN (insn))) == REG
10198 && (REGNO (SET_DEST (PATTERN (insn))) == BASE_REGNUM
10199 || (!TARGET_CPU_ZARCH
10200 && REGNO (SET_DEST (PATTERN (insn))) == RETURN_REGNUM))
10201 && GET_CODE (SET_SRC (PATTERN (insn))) == MEM)
10203 set = PATTERN (insn);
10204 first = REGNO (SET_DEST (set));
10205 offset = const0_rtx;
10206 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
10207 off = INTVAL (offset);
10209 if (GET_CODE (base) != REG || off < 0)
10210 continue;
10211 if (REGNO (base) != STACK_POINTER_REGNUM
10212 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10213 continue;
10215 remove_insn (insn);
10216 continue;
10221 /* On z10 and later the dynamic branch prediction must see the
10222 backward jump within a certain windows. If not it falls back to
10223 the static prediction. This function rearranges the loop backward
10224 branch in a way which makes the static prediction always correct.
10225 The function returns true if it added an instruction. */
10226 static bool
10227 s390_fix_long_loop_prediction (rtx insn)
10229 rtx set = single_set (insn);
10230 rtx code_label, label_ref, new_label;
10231 rtx uncond_jump;
10232 rtx cur_insn;
10233 rtx tmp;
10234 int distance;
10236 /* This will exclude branch on count and branch on index patterns
10237 since these are correctly statically predicted. */
10238 if (!set
10239 || SET_DEST (set) != pc_rtx
10240 || GET_CODE (SET_SRC(set)) != IF_THEN_ELSE)
10241 return false;
10243 label_ref = (GET_CODE (XEXP (SET_SRC (set), 1)) == LABEL_REF ?
10244 XEXP (SET_SRC (set), 1) : XEXP (SET_SRC (set), 2));
10246 gcc_assert (GET_CODE (label_ref) == LABEL_REF);
10248 code_label = XEXP (label_ref, 0);
10250 if (INSN_ADDRESSES (INSN_UID (code_label)) == -1
10251 || INSN_ADDRESSES (INSN_UID (insn)) == -1
10252 || (INSN_ADDRESSES (INSN_UID (insn))
10253 - INSN_ADDRESSES (INSN_UID (code_label)) < PREDICT_DISTANCE))
10254 return false;
10256 for (distance = 0, cur_insn = PREV_INSN (insn);
10257 distance < PREDICT_DISTANCE - 6;
10258 distance += get_attr_length (cur_insn), cur_insn = PREV_INSN (cur_insn))
10259 if (!cur_insn || JUMP_P (cur_insn) || LABEL_P (cur_insn))
10260 return false;
10262 new_label = gen_label_rtx ();
10263 uncond_jump = emit_jump_insn_after (
10264 gen_rtx_SET (VOIDmode, pc_rtx,
10265 gen_rtx_LABEL_REF (VOIDmode, code_label)),
10266 insn);
10267 emit_label_after (new_label, uncond_jump);
10269 tmp = XEXP (SET_SRC (set), 1);
10270 XEXP (SET_SRC (set), 1) = XEXP (SET_SRC (set), 2);
10271 XEXP (SET_SRC (set), 2) = tmp;
10272 INSN_CODE (insn) = -1;
10274 XEXP (label_ref, 0) = new_label;
10275 JUMP_LABEL (insn) = new_label;
10276 JUMP_LABEL (uncond_jump) = code_label;
10278 return true;
10281 /* Returns 1 if INSN reads the value of REG for purposes not related
10282 to addressing of memory, and 0 otherwise. */
10283 static int
10284 s390_non_addr_reg_read_p (rtx reg, rtx insn)
10286 return reg_referenced_p (reg, PATTERN (insn))
10287 && !reg_used_in_mem_p (REGNO (reg), PATTERN (insn));
10290 /* Starting from INSN find_cond_jump looks downwards in the insn
10291 stream for a single jump insn which is the last user of the
10292 condition code set in INSN. */
10293 static rtx
10294 find_cond_jump (rtx insn)
10296 for (; insn; insn = NEXT_INSN (insn))
10298 rtx ite, cc;
10300 if (LABEL_P (insn))
10301 break;
10303 if (!JUMP_P (insn))
10305 if (reg_mentioned_p (gen_rtx_REG (CCmode, CC_REGNUM), insn))
10306 break;
10307 continue;
10310 /* This will be triggered by a return. */
10311 if (GET_CODE (PATTERN (insn)) != SET)
10312 break;
10314 gcc_assert (SET_DEST (PATTERN (insn)) == pc_rtx);
10315 ite = SET_SRC (PATTERN (insn));
10317 if (GET_CODE (ite) != IF_THEN_ELSE)
10318 break;
10320 cc = XEXP (XEXP (ite, 0), 0);
10321 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc)))
10322 break;
10324 if (find_reg_note (insn, REG_DEAD, cc))
10325 return insn;
10326 break;
10329 return NULL_RTX;
10332 /* Swap the condition in COND and the operands in OP0 and OP1 so that
10333 the semantics does not change. If NULL_RTX is passed as COND the
10334 function tries to find the conditional jump starting with INSN. */
10335 static void
10336 s390_swap_cmp (rtx cond, rtx *op0, rtx *op1, rtx insn)
10338 rtx tmp = *op0;
10340 if (cond == NULL_RTX)
10342 rtx jump = find_cond_jump (NEXT_INSN (insn));
10343 jump = jump ? single_set (jump) : NULL_RTX;
10345 if (jump == NULL_RTX)
10346 return;
10348 cond = XEXP (XEXP (jump, 1), 0);
10351 *op0 = *op1;
10352 *op1 = tmp;
10353 PUT_CODE (cond, swap_condition (GET_CODE (cond)));
10356 /* On z10, instructions of the compare-and-branch family have the
10357 property to access the register occurring as second operand with
10358 its bits complemented. If such a compare is grouped with a second
10359 instruction that accesses the same register non-complemented, and
10360 if that register's value is delivered via a bypass, then the
10361 pipeline recycles, thereby causing significant performance decline.
10362 This function locates such situations and exchanges the two
10363 operands of the compare. The function return true whenever it
10364 added an insn. */
10365 static bool
10366 s390_z10_optimize_cmp (rtx insn)
10368 rtx prev_insn, next_insn;
10369 bool insn_added_p = false;
10370 rtx cond, *op0, *op1;
10372 if (GET_CODE (PATTERN (insn)) == PARALLEL)
10374 /* Handle compare and branch and branch on count
10375 instructions. */
10376 rtx pattern = single_set (insn);
10378 if (!pattern
10379 || SET_DEST (pattern) != pc_rtx
10380 || GET_CODE (SET_SRC (pattern)) != IF_THEN_ELSE)
10381 return false;
10383 cond = XEXP (SET_SRC (pattern), 0);
10384 op0 = &XEXP (cond, 0);
10385 op1 = &XEXP (cond, 1);
10387 else if (GET_CODE (PATTERN (insn)) == SET)
10389 rtx src, dest;
10391 /* Handle normal compare instructions. */
10392 src = SET_SRC (PATTERN (insn));
10393 dest = SET_DEST (PATTERN (insn));
10395 if (!REG_P (dest)
10396 || !CC_REGNO_P (REGNO (dest))
10397 || GET_CODE (src) != COMPARE)
10398 return false;
10400 /* s390_swap_cmp will try to find the conditional
10401 jump when passing NULL_RTX as condition. */
10402 cond = NULL_RTX;
10403 op0 = &XEXP (src, 0);
10404 op1 = &XEXP (src, 1);
10406 else
10407 return false;
10409 if (!REG_P (*op0) || !REG_P (*op1))
10410 return false;
10412 if (GET_MODE_CLASS (GET_MODE (*op0)) != MODE_INT)
10413 return false;
10415 /* Swap the COMPARE arguments and its mask if there is a
10416 conflicting access in the previous insn. */
10417 prev_insn = prev_active_insn (insn);
10418 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
10419 && reg_referenced_p (*op1, PATTERN (prev_insn)))
10420 s390_swap_cmp (cond, op0, op1, insn);
10422 /* Check if there is a conflict with the next insn. If there
10423 was no conflict with the previous insn, then swap the
10424 COMPARE arguments and its mask. If we already swapped
10425 the operands, or if swapping them would cause a conflict
10426 with the previous insn, issue a NOP after the COMPARE in
10427 order to separate the two instuctions. */
10428 next_insn = next_active_insn (insn);
10429 if (next_insn != NULL_RTX && INSN_P (next_insn)
10430 && s390_non_addr_reg_read_p (*op1, next_insn))
10432 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
10433 && s390_non_addr_reg_read_p (*op0, prev_insn))
10435 if (REGNO (*op1) == 0)
10436 emit_insn_after (gen_nop1 (), insn);
10437 else
10438 emit_insn_after (gen_nop (), insn);
10439 insn_added_p = true;
10441 else
10442 s390_swap_cmp (cond, op0, op1, insn);
10444 return insn_added_p;
10447 /* Perform machine-dependent processing. */
10449 static void
10450 s390_reorg (void)
10452 bool pool_overflow = false;
10454 /* Make sure all splits have been performed; splits after
10455 machine_dependent_reorg might confuse insn length counts. */
10456 split_all_insns_noflow ();
10458 /* Install the main literal pool and the associated base
10459 register load insns.
10461 In addition, there are two problematic situations we need
10462 to correct:
10464 - the literal pool might be > 4096 bytes in size, so that
10465 some of its elements cannot be directly accessed
10467 - a branch target might be > 64K away from the branch, so that
10468 it is not possible to use a PC-relative instruction.
10470 To fix those, we split the single literal pool into multiple
10471 pool chunks, reloading the pool base register at various
10472 points throughout the function to ensure it always points to
10473 the pool chunk the following code expects, and / or replace
10474 PC-relative branches by absolute branches.
10476 However, the two problems are interdependent: splitting the
10477 literal pool can move a branch further away from its target,
10478 causing the 64K limit to overflow, and on the other hand,
10479 replacing a PC-relative branch by an absolute branch means
10480 we need to put the branch target address into the literal
10481 pool, possibly causing it to overflow.
10483 So, we loop trying to fix up both problems until we manage
10484 to satisfy both conditions at the same time. Note that the
10485 loop is guaranteed to terminate as every pass of the loop
10486 strictly decreases the total number of PC-relative branches
10487 in the function. (This is not completely true as there
10488 might be branch-over-pool insns introduced by chunkify_start.
10489 Those never need to be split however.) */
10491 for (;;)
10493 struct constant_pool *pool = NULL;
10495 /* Collect the literal pool. */
10496 if (!pool_overflow)
10498 pool = s390_mainpool_start ();
10499 if (!pool)
10500 pool_overflow = true;
10503 /* If literal pool overflowed, start to chunkify it. */
10504 if (pool_overflow)
10505 pool = s390_chunkify_start ();
10507 /* Split out-of-range branches. If this has created new
10508 literal pool entries, cancel current chunk list and
10509 recompute it. zSeries machines have large branch
10510 instructions, so we never need to split a branch. */
10511 if (!TARGET_CPU_ZARCH && s390_split_branches ())
10513 if (pool_overflow)
10514 s390_chunkify_cancel (pool);
10515 else
10516 s390_mainpool_cancel (pool);
10518 continue;
10521 /* If we made it up to here, both conditions are satisfied.
10522 Finish up literal pool related changes. */
10523 if (pool_overflow)
10524 s390_chunkify_finish (pool);
10525 else
10526 s390_mainpool_finish (pool);
10528 /* We're done splitting branches. */
10529 cfun->machine->split_branches_pending_p = false;
10530 break;
10533 /* Generate out-of-pool execute target insns. */
10534 if (TARGET_CPU_ZARCH)
10536 rtx insn, label, target;
10538 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10540 label = s390_execute_label (insn);
10541 if (!label)
10542 continue;
10544 gcc_assert (label != const0_rtx);
10546 target = emit_label (XEXP (label, 0));
10547 INSN_ADDRESSES_NEW (target, -1);
10549 target = emit_insn (s390_execute_target (insn));
10550 INSN_ADDRESSES_NEW (target, -1);
10554 /* Try to optimize prologue and epilogue further. */
10555 s390_optimize_prologue ();
10557 /* Walk over the insns and do some >=z10 specific changes. */
10558 if (s390_tune == PROCESSOR_2097_Z10
10559 || s390_tune == PROCESSOR_2817_Z196
10560 || s390_tune == PROCESSOR_2827_ZEC12)
10562 rtx insn;
10563 bool insn_added_p = false;
10565 /* The insn lengths and addresses have to be up to date for the
10566 following manipulations. */
10567 shorten_branches (get_insns ());
10569 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10571 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
10572 continue;
10574 if (JUMP_P (insn))
10575 insn_added_p |= s390_fix_long_loop_prediction (insn);
10577 if ((GET_CODE (PATTERN (insn)) == PARALLEL
10578 || GET_CODE (PATTERN (insn)) == SET)
10579 && s390_tune == PROCESSOR_2097_Z10)
10580 insn_added_p |= s390_z10_optimize_cmp (insn);
10583 /* Adjust branches if we added new instructions. */
10584 if (insn_added_p)
10585 shorten_branches (get_insns ());
10589 /* Return true if INSN is a fp load insn writing register REGNO. */
10590 static inline bool
10591 s390_fpload_toreg (rtx insn, unsigned int regno)
10593 rtx set;
10594 enum attr_type flag = s390_safe_attr_type (insn);
10596 if (flag != TYPE_FLOADSF && flag != TYPE_FLOADDF)
10597 return false;
10599 set = single_set (insn);
10601 if (set == NULL_RTX)
10602 return false;
10604 if (!REG_P (SET_DEST (set)) || !MEM_P (SET_SRC (set)))
10605 return false;
10607 if (REGNO (SET_DEST (set)) != regno)
10608 return false;
10610 return true;
10613 /* This value describes the distance to be avoided between an
10614 aritmetic fp instruction and an fp load writing the same register.
10615 Z10_EARLYLOAD_DISTANCE - 1 as well as Z10_EARLYLOAD_DISTANCE + 1 is
10616 fine but the exact value has to be avoided. Otherwise the FP
10617 pipeline will throw an exception causing a major penalty. */
10618 #define Z10_EARLYLOAD_DISTANCE 7
10620 /* Rearrange the ready list in order to avoid the situation described
10621 for Z10_EARLYLOAD_DISTANCE. A problematic load instruction is
10622 moved to the very end of the ready list. */
10623 static void
10624 s390_z10_prevent_earlyload_conflicts (rtx *ready, int *nready_p)
10626 unsigned int regno;
10627 int nready = *nready_p;
10628 rtx tmp;
10629 int i;
10630 rtx insn;
10631 rtx set;
10632 enum attr_type flag;
10633 int distance;
10635 /* Skip DISTANCE - 1 active insns. */
10636 for (insn = last_scheduled_insn, distance = Z10_EARLYLOAD_DISTANCE - 1;
10637 distance > 0 && insn != NULL_RTX;
10638 distance--, insn = prev_active_insn (insn))
10639 if (CALL_P (insn) || JUMP_P (insn))
10640 return;
10642 if (insn == NULL_RTX)
10643 return;
10645 set = single_set (insn);
10647 if (set == NULL_RTX || !REG_P (SET_DEST (set))
10648 || GET_MODE_CLASS (GET_MODE (SET_DEST (set))) != MODE_FLOAT)
10649 return;
10651 flag = s390_safe_attr_type (insn);
10653 if (flag == TYPE_FLOADSF || flag == TYPE_FLOADDF)
10654 return;
10656 regno = REGNO (SET_DEST (set));
10657 i = nready - 1;
10659 while (!s390_fpload_toreg (ready[i], regno) && i > 0)
10660 i--;
10662 if (!i)
10663 return;
10665 tmp = ready[i];
10666 memmove (&ready[1], &ready[0], sizeof (rtx) * i);
10667 ready[0] = tmp;
10671 /* The s390_sched_state variable tracks the state of the current or
10672 the last instruction group.
10674 0,1,2 number of instructions scheduled in the current group
10675 3 the last group is complete - normal insns
10676 4 the last group was a cracked/expanded insn */
10678 static int s390_sched_state;
10680 #define S390_OOO_SCHED_STATE_NORMAL 3
10681 #define S390_OOO_SCHED_STATE_CRACKED 4
10683 #define S390_OOO_SCHED_ATTR_MASK_CRACKED 0x1
10684 #define S390_OOO_SCHED_ATTR_MASK_EXPANDED 0x2
10685 #define S390_OOO_SCHED_ATTR_MASK_ENDGROUP 0x4
10686 #define S390_OOO_SCHED_ATTR_MASK_GROUPALONE 0x8
10688 static unsigned int
10689 s390_get_sched_attrmask (rtx insn)
10691 unsigned int mask = 0;
10693 if (get_attr_ooo_cracked (insn))
10694 mask |= S390_OOO_SCHED_ATTR_MASK_CRACKED;
10695 if (get_attr_ooo_expanded (insn))
10696 mask |= S390_OOO_SCHED_ATTR_MASK_EXPANDED;
10697 if (get_attr_ooo_endgroup (insn))
10698 mask |= S390_OOO_SCHED_ATTR_MASK_ENDGROUP;
10699 if (get_attr_ooo_groupalone (insn))
10700 mask |= S390_OOO_SCHED_ATTR_MASK_GROUPALONE;
10701 return mask;
10704 /* Return the scheduling score for INSN. The higher the score the
10705 better. The score is calculated from the OOO scheduling attributes
10706 of INSN and the scheduling state s390_sched_state. */
10707 static int
10708 s390_sched_score (rtx insn)
10710 unsigned int mask = s390_get_sched_attrmask (insn);
10711 int score = 0;
10713 switch (s390_sched_state)
10715 case 0:
10716 /* Try to put insns into the first slot which would otherwise
10717 break a group. */
10718 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) != 0
10719 || (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) != 0)
10720 score += 5;
10721 if ((mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) != 0)
10722 score += 10;
10723 case 1:
10724 /* Prefer not cracked insns while trying to put together a
10725 group. */
10726 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) == 0
10727 && (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) == 0
10728 && (mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) == 0)
10729 score += 10;
10730 if ((mask & S390_OOO_SCHED_ATTR_MASK_ENDGROUP) == 0)
10731 score += 5;
10732 break;
10733 case 2:
10734 /* Prefer not cracked insns while trying to put together a
10735 group. */
10736 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) == 0
10737 && (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) == 0
10738 && (mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) == 0)
10739 score += 10;
10740 /* Prefer endgroup insns in the last slot. */
10741 if ((mask & S390_OOO_SCHED_ATTR_MASK_ENDGROUP) != 0)
10742 score += 10;
10743 break;
10744 case S390_OOO_SCHED_STATE_NORMAL:
10745 /* Prefer not cracked insns if the last was not cracked. */
10746 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) == 0
10747 && (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) == 0)
10748 score += 5;
10749 if ((mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) != 0)
10750 score += 10;
10751 break;
10752 case S390_OOO_SCHED_STATE_CRACKED:
10753 /* Try to keep cracked insns together to prevent them from
10754 interrupting groups. */
10755 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) != 0
10756 || (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) != 0)
10757 score += 5;
10758 break;
10760 return score;
10763 /* This function is called via hook TARGET_SCHED_REORDER before
10764 issueing one insn from list READY which contains *NREADYP entries.
10765 For target z10 it reorders load instructions to avoid early load
10766 conflicts in the floating point pipeline */
10767 static int
10768 s390_sched_reorder (FILE *file, int verbose,
10769 rtx *ready, int *nreadyp, int clock ATTRIBUTE_UNUSED)
10771 if (s390_tune == PROCESSOR_2097_Z10)
10772 if (reload_completed && *nreadyp > 1)
10773 s390_z10_prevent_earlyload_conflicts (ready, nreadyp);
10775 if (s390_tune == PROCESSOR_2827_ZEC12
10776 && reload_completed
10777 && *nreadyp > 1)
10779 int i;
10780 int last_index = *nreadyp - 1;
10781 int max_index = -1;
10782 int max_score = -1;
10783 rtx tmp;
10785 /* Just move the insn with the highest score to the top (the
10786 end) of the list. A full sort is not needed since a conflict
10787 in the hazard recognition cannot happen. So the top insn in
10788 the ready list will always be taken. */
10789 for (i = last_index; i >= 0; i--)
10791 int score;
10793 if (recog_memoized (ready[i]) < 0)
10794 continue;
10796 score = s390_sched_score (ready[i]);
10797 if (score > max_score)
10799 max_score = score;
10800 max_index = i;
10804 if (max_index != -1)
10806 if (max_index != last_index)
10808 tmp = ready[max_index];
10809 ready[max_index] = ready[last_index];
10810 ready[last_index] = tmp;
10812 if (verbose > 5)
10813 fprintf (file,
10814 "move insn %d to the top of list\n",
10815 INSN_UID (ready[last_index]));
10817 else if (verbose > 5)
10818 fprintf (file,
10819 "best insn %d already on top\n",
10820 INSN_UID (ready[last_index]));
10823 if (verbose > 5)
10825 fprintf (file, "ready list ooo attributes - sched state: %d\n",
10826 s390_sched_state);
10828 for (i = last_index; i >= 0; i--)
10830 if (recog_memoized (ready[i]) < 0)
10831 continue;
10832 fprintf (file, "insn %d score: %d: ", INSN_UID (ready[i]),
10833 s390_sched_score (ready[i]));
10834 #define PRINT_OOO_ATTR(ATTR) fprintf (file, "%s ", get_attr_##ATTR (ready[i]) ? #ATTR : "!" #ATTR);
10835 PRINT_OOO_ATTR (ooo_cracked);
10836 PRINT_OOO_ATTR (ooo_expanded);
10837 PRINT_OOO_ATTR (ooo_endgroup);
10838 PRINT_OOO_ATTR (ooo_groupalone);
10839 #undef PRINT_OOO_ATTR
10840 fprintf (file, "\n");
10845 return s390_issue_rate ();
10849 /* This function is called via hook TARGET_SCHED_VARIABLE_ISSUE after
10850 the scheduler has issued INSN. It stores the last issued insn into
10851 last_scheduled_insn in order to make it available for
10852 s390_sched_reorder. */
10853 static int
10854 s390_sched_variable_issue (FILE *file, int verbose, rtx insn, int more)
10856 last_scheduled_insn = insn;
10858 if (s390_tune == PROCESSOR_2827_ZEC12
10859 && reload_completed
10860 && recog_memoized (insn) >= 0)
10862 unsigned int mask = s390_get_sched_attrmask (insn);
10864 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) != 0
10865 || (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) != 0)
10866 s390_sched_state = S390_OOO_SCHED_STATE_CRACKED;
10867 else if ((mask & S390_OOO_SCHED_ATTR_MASK_ENDGROUP) != 0
10868 || (mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) != 0)
10869 s390_sched_state = S390_OOO_SCHED_STATE_NORMAL;
10870 else
10872 /* Only normal insns are left (mask == 0). */
10873 switch (s390_sched_state)
10875 case 0:
10876 case 1:
10877 case 2:
10878 case S390_OOO_SCHED_STATE_NORMAL:
10879 if (s390_sched_state == S390_OOO_SCHED_STATE_NORMAL)
10880 s390_sched_state = 1;
10881 else
10882 s390_sched_state++;
10884 break;
10885 case S390_OOO_SCHED_STATE_CRACKED:
10886 s390_sched_state = S390_OOO_SCHED_STATE_NORMAL;
10887 break;
10890 if (verbose > 5)
10892 fprintf (file, "insn %d: ", INSN_UID (insn));
10893 #define PRINT_OOO_ATTR(ATTR) \
10894 fprintf (file, "%s ", get_attr_##ATTR (insn) ? #ATTR : "");
10895 PRINT_OOO_ATTR (ooo_cracked);
10896 PRINT_OOO_ATTR (ooo_expanded);
10897 PRINT_OOO_ATTR (ooo_endgroup);
10898 PRINT_OOO_ATTR (ooo_groupalone);
10899 #undef PRINT_OOO_ATTR
10900 fprintf (file, "\n");
10901 fprintf (file, "sched state: %d\n", s390_sched_state);
10905 if (GET_CODE (PATTERN (insn)) != USE
10906 && GET_CODE (PATTERN (insn)) != CLOBBER)
10907 return more - 1;
10908 else
10909 return more;
10912 static void
10913 s390_sched_init (FILE *file ATTRIBUTE_UNUSED,
10914 int verbose ATTRIBUTE_UNUSED,
10915 int max_ready ATTRIBUTE_UNUSED)
10917 last_scheduled_insn = NULL_RTX;
10918 s390_sched_state = 0;
10921 /* This function checks the whole of insn X for memory references. The
10922 function always returns zero because the framework it is called
10923 from would stop recursively analyzing the insn upon a return value
10924 other than zero. The real result of this function is updating
10925 counter variable MEM_COUNT. */
10926 static int
10927 check_dpu (rtx *x, unsigned *mem_count)
10929 if (*x != NULL_RTX && MEM_P (*x))
10930 (*mem_count)++;
10931 return 0;
10934 /* This target hook implementation for TARGET_LOOP_UNROLL_ADJUST calculates
10935 a new number struct loop *loop should be unrolled if tuned for cpus with
10936 a built-in stride prefetcher.
10937 The loop is analyzed for memory accesses by calling check_dpu for
10938 each rtx of the loop. Depending on the loop_depth and the amount of
10939 memory accesses a new number <=nunroll is returned to improve the
10940 behaviour of the hardware prefetch unit. */
10941 static unsigned
10942 s390_loop_unroll_adjust (unsigned nunroll, struct loop *loop)
10944 basic_block *bbs;
10945 rtx insn;
10946 unsigned i;
10947 unsigned mem_count = 0;
10949 if (s390_tune != PROCESSOR_2097_Z10
10950 && s390_tune != PROCESSOR_2817_Z196
10951 && s390_tune != PROCESSOR_2827_ZEC12)
10952 return nunroll;
10954 /* Count the number of memory references within the loop body. */
10955 bbs = get_loop_body (loop);
10956 for (i = 0; i < loop->num_nodes; i++)
10958 for (insn = BB_HEAD (bbs[i]); insn != BB_END (bbs[i]); insn = NEXT_INSN (insn))
10959 if (INSN_P (insn) && INSN_CODE (insn) != -1)
10960 for_each_rtx (&insn, (rtx_function) check_dpu, &mem_count);
10962 free (bbs);
10964 /* Prevent division by zero, and we do not need to adjust nunroll in this case. */
10965 if (mem_count == 0)
10966 return nunroll;
10968 switch (loop_depth(loop))
10970 case 1:
10971 return MIN (nunroll, 28 / mem_count);
10972 case 2:
10973 return MIN (nunroll, 22 / mem_count);
10974 default:
10975 return MIN (nunroll, 16 / mem_count);
10979 /* Initialize GCC target structure. */
10981 #undef TARGET_ASM_ALIGNED_HI_OP
10982 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
10983 #undef TARGET_ASM_ALIGNED_DI_OP
10984 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
10985 #undef TARGET_ASM_INTEGER
10986 #define TARGET_ASM_INTEGER s390_assemble_integer
10988 #undef TARGET_ASM_OPEN_PAREN
10989 #define TARGET_ASM_OPEN_PAREN ""
10991 #undef TARGET_ASM_CLOSE_PAREN
10992 #define TARGET_ASM_CLOSE_PAREN ""
10994 #undef TARGET_OPTION_OVERRIDE
10995 #define TARGET_OPTION_OVERRIDE s390_option_override
10997 #undef TARGET_ENCODE_SECTION_INFO
10998 #define TARGET_ENCODE_SECTION_INFO s390_encode_section_info
11000 #undef TARGET_SCALAR_MODE_SUPPORTED_P
11001 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
11003 #ifdef HAVE_AS_TLS
11004 #undef TARGET_HAVE_TLS
11005 #define TARGET_HAVE_TLS true
11006 #endif
11007 #undef TARGET_CANNOT_FORCE_CONST_MEM
11008 #define TARGET_CANNOT_FORCE_CONST_MEM s390_cannot_force_const_mem
11010 #undef TARGET_DELEGITIMIZE_ADDRESS
11011 #define TARGET_DELEGITIMIZE_ADDRESS s390_delegitimize_address
11013 #undef TARGET_LEGITIMIZE_ADDRESS
11014 #define TARGET_LEGITIMIZE_ADDRESS s390_legitimize_address
11016 #undef TARGET_RETURN_IN_MEMORY
11017 #define TARGET_RETURN_IN_MEMORY s390_return_in_memory
11019 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
11020 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA s390_output_addr_const_extra
11022 #undef TARGET_ASM_OUTPUT_MI_THUNK
11023 #define TARGET_ASM_OUTPUT_MI_THUNK s390_output_mi_thunk
11024 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
11025 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
11027 #undef TARGET_SCHED_ADJUST_PRIORITY
11028 #define TARGET_SCHED_ADJUST_PRIORITY s390_adjust_priority
11029 #undef TARGET_SCHED_ISSUE_RATE
11030 #define TARGET_SCHED_ISSUE_RATE s390_issue_rate
11031 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
11032 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD s390_first_cycle_multipass_dfa_lookahead
11034 #undef TARGET_SCHED_VARIABLE_ISSUE
11035 #define TARGET_SCHED_VARIABLE_ISSUE s390_sched_variable_issue
11036 #undef TARGET_SCHED_REORDER
11037 #define TARGET_SCHED_REORDER s390_sched_reorder
11038 #undef TARGET_SCHED_INIT
11039 #define TARGET_SCHED_INIT s390_sched_init
11041 #undef TARGET_CANNOT_COPY_INSN_P
11042 #define TARGET_CANNOT_COPY_INSN_P s390_cannot_copy_insn_p
11043 #undef TARGET_RTX_COSTS
11044 #define TARGET_RTX_COSTS s390_rtx_costs
11045 #undef TARGET_ADDRESS_COST
11046 #define TARGET_ADDRESS_COST s390_address_cost
11047 #undef TARGET_REGISTER_MOVE_COST
11048 #define TARGET_REGISTER_MOVE_COST s390_register_move_cost
11049 #undef TARGET_MEMORY_MOVE_COST
11050 #define TARGET_MEMORY_MOVE_COST s390_memory_move_cost
11052 #undef TARGET_MACHINE_DEPENDENT_REORG
11053 #define TARGET_MACHINE_DEPENDENT_REORG s390_reorg
11055 #undef TARGET_VALID_POINTER_MODE
11056 #define TARGET_VALID_POINTER_MODE s390_valid_pointer_mode
11058 #undef TARGET_BUILD_BUILTIN_VA_LIST
11059 #define TARGET_BUILD_BUILTIN_VA_LIST s390_build_builtin_va_list
11060 #undef TARGET_EXPAND_BUILTIN_VA_START
11061 #define TARGET_EXPAND_BUILTIN_VA_START s390_va_start
11062 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
11063 #define TARGET_GIMPLIFY_VA_ARG_EXPR s390_gimplify_va_arg
11065 #undef TARGET_PROMOTE_FUNCTION_MODE
11066 #define TARGET_PROMOTE_FUNCTION_MODE s390_promote_function_mode
11067 #undef TARGET_PASS_BY_REFERENCE
11068 #define TARGET_PASS_BY_REFERENCE s390_pass_by_reference
11070 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
11071 #define TARGET_FUNCTION_OK_FOR_SIBCALL s390_function_ok_for_sibcall
11072 #undef TARGET_FUNCTION_ARG
11073 #define TARGET_FUNCTION_ARG s390_function_arg
11074 #undef TARGET_FUNCTION_ARG_ADVANCE
11075 #define TARGET_FUNCTION_ARG_ADVANCE s390_function_arg_advance
11076 #undef TARGET_FUNCTION_VALUE
11077 #define TARGET_FUNCTION_VALUE s390_function_value
11078 #undef TARGET_LIBCALL_VALUE
11079 #define TARGET_LIBCALL_VALUE s390_libcall_value
11081 #undef TARGET_FIXED_CONDITION_CODE_REGS
11082 #define TARGET_FIXED_CONDITION_CODE_REGS s390_fixed_condition_code_regs
11084 #undef TARGET_CC_MODES_COMPATIBLE
11085 #define TARGET_CC_MODES_COMPATIBLE s390_cc_modes_compatible
11087 #undef TARGET_INVALID_WITHIN_DOLOOP
11088 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_null
11090 #ifdef HAVE_AS_TLS
11091 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
11092 #define TARGET_ASM_OUTPUT_DWARF_DTPREL s390_output_dwarf_dtprel
11093 #endif
11095 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
11096 #undef TARGET_MANGLE_TYPE
11097 #define TARGET_MANGLE_TYPE s390_mangle_type
11098 #endif
11100 #undef TARGET_SCALAR_MODE_SUPPORTED_P
11101 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
11103 #undef TARGET_PREFERRED_RELOAD_CLASS
11104 #define TARGET_PREFERRED_RELOAD_CLASS s390_preferred_reload_class
11106 #undef TARGET_SECONDARY_RELOAD
11107 #define TARGET_SECONDARY_RELOAD s390_secondary_reload
11109 #undef TARGET_LIBGCC_CMP_RETURN_MODE
11110 #define TARGET_LIBGCC_CMP_RETURN_MODE s390_libgcc_cmp_return_mode
11112 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
11113 #define TARGET_LIBGCC_SHIFT_COUNT_MODE s390_libgcc_shift_count_mode
11115 #undef TARGET_LEGITIMATE_ADDRESS_P
11116 #define TARGET_LEGITIMATE_ADDRESS_P s390_legitimate_address_p
11118 #undef TARGET_LEGITIMATE_CONSTANT_P
11119 #define TARGET_LEGITIMATE_CONSTANT_P s390_legitimate_constant_p
11121 #undef TARGET_LRA_P
11122 #define TARGET_LRA_P s390_lra_p
11124 #undef TARGET_CAN_ELIMINATE
11125 #define TARGET_CAN_ELIMINATE s390_can_eliminate
11127 #undef TARGET_CONDITIONAL_REGISTER_USAGE
11128 #define TARGET_CONDITIONAL_REGISTER_USAGE s390_conditional_register_usage
11130 #undef TARGET_LOOP_UNROLL_ADJUST
11131 #define TARGET_LOOP_UNROLL_ADJUST s390_loop_unroll_adjust
11133 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
11134 #define TARGET_ASM_TRAMPOLINE_TEMPLATE s390_asm_trampoline_template
11135 #undef TARGET_TRAMPOLINE_INIT
11136 #define TARGET_TRAMPOLINE_INIT s390_trampoline_init
11138 #undef TARGET_UNWIND_WORD_MODE
11139 #define TARGET_UNWIND_WORD_MODE s390_unwind_word_mode
11141 #undef TARGET_CANONICALIZE_COMPARISON
11142 #define TARGET_CANONICALIZE_COMPARISON s390_canonicalize_comparison
11144 struct gcc_target targetm = TARGET_INITIALIZER;
11146 #include "gt-s390.h"