1 /* Subroutines used for code generation on IBM S/390 and zSeries
2 Copyright (C) 1999-2015 Free Software Foundation, Inc.
3 Contributed by Hartmut Penner (hpenner@de.ibm.com) and
4 Ulrich Weigand (uweigand@de.ibm.com) and
5 Andreas Krebbel (Andreas.Krebbel@de.ibm.com).
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
31 #include "double-int.h"
38 #include "fold-const.h"
39 #include "print-tree.h"
40 #include "stringpool.h"
41 #include "stor-layout.h"
46 #include "hard-reg-set.h"
47 #include "insn-config.h"
48 #include "conditions.h"
50 #include "insn-attr.h"
56 #include "statistics.h"
58 #include "fixed-value.h"
66 #include "diagnostic-core.h"
68 #include "dominance.h"
74 #include "cfgcleanup.h"
75 #include "basic-block.h"
78 #include "target-def.h"
80 #include "langhooks.h"
81 #include "insn-codes.h"
83 #include "hash-table.h"
84 #include "tree-ssa-alias.h"
85 #include "internal-fn.h"
86 #include "gimple-fold.h"
88 #include "gimple-expr.h"
96 #include "tree-pass.h"
101 /* Define the specific costs for a given cpu. */
103 struct processor_costs
106 const int m
; /* cost of an M instruction. */
107 const int mghi
; /* cost of an MGHI instruction. */
108 const int mh
; /* cost of an MH instruction. */
109 const int mhi
; /* cost of an MHI instruction. */
110 const int ml
; /* cost of an ML instruction. */
111 const int mr
; /* cost of an MR instruction. */
112 const int ms
; /* cost of an MS instruction. */
113 const int msg
; /* cost of an MSG instruction. */
114 const int msgf
; /* cost of an MSGF instruction. */
115 const int msgfr
; /* cost of an MSGFR instruction. */
116 const int msgr
; /* cost of an MSGR instruction. */
117 const int msr
; /* cost of an MSR instruction. */
118 const int mult_df
; /* cost of multiplication in DFmode. */
121 const int sqxbr
; /* cost of square root in TFmode. */
122 const int sqdbr
; /* cost of square root in DFmode. */
123 const int sqebr
; /* cost of square root in SFmode. */
124 /* multiply and add */
125 const int madbr
; /* cost of multiply and add in DFmode. */
126 const int maebr
; /* cost of multiply and add in SFmode. */
138 const struct processor_costs
*s390_cost
;
141 struct processor_costs z900_cost
=
143 COSTS_N_INSNS (5), /* M */
144 COSTS_N_INSNS (10), /* MGHI */
145 COSTS_N_INSNS (5), /* MH */
146 COSTS_N_INSNS (4), /* MHI */
147 COSTS_N_INSNS (5), /* ML */
148 COSTS_N_INSNS (5), /* MR */
149 COSTS_N_INSNS (4), /* MS */
150 COSTS_N_INSNS (15), /* MSG */
151 COSTS_N_INSNS (7), /* MSGF */
152 COSTS_N_INSNS (7), /* MSGFR */
153 COSTS_N_INSNS (10), /* MSGR */
154 COSTS_N_INSNS (4), /* MSR */
155 COSTS_N_INSNS (7), /* multiplication in DFmode */
156 COSTS_N_INSNS (13), /* MXBR */
157 COSTS_N_INSNS (136), /* SQXBR */
158 COSTS_N_INSNS (44), /* SQDBR */
159 COSTS_N_INSNS (35), /* SQEBR */
160 COSTS_N_INSNS (18), /* MADBR */
161 COSTS_N_INSNS (13), /* MAEBR */
162 COSTS_N_INSNS (134), /* DXBR */
163 COSTS_N_INSNS (30), /* DDBR */
164 COSTS_N_INSNS (27), /* DEBR */
165 COSTS_N_INSNS (220), /* DLGR */
166 COSTS_N_INSNS (34), /* DLR */
167 COSTS_N_INSNS (34), /* DR */
168 COSTS_N_INSNS (32), /* DSGFR */
169 COSTS_N_INSNS (32), /* DSGR */
173 struct processor_costs z990_cost
=
175 COSTS_N_INSNS (4), /* M */
176 COSTS_N_INSNS (2), /* MGHI */
177 COSTS_N_INSNS (2), /* MH */
178 COSTS_N_INSNS (2), /* MHI */
179 COSTS_N_INSNS (4), /* ML */
180 COSTS_N_INSNS (4), /* MR */
181 COSTS_N_INSNS (5), /* MS */
182 COSTS_N_INSNS (6), /* MSG */
183 COSTS_N_INSNS (4), /* MSGF */
184 COSTS_N_INSNS (4), /* MSGFR */
185 COSTS_N_INSNS (4), /* MSGR */
186 COSTS_N_INSNS (4), /* MSR */
187 COSTS_N_INSNS (1), /* multiplication in DFmode */
188 COSTS_N_INSNS (28), /* MXBR */
189 COSTS_N_INSNS (130), /* SQXBR */
190 COSTS_N_INSNS (66), /* SQDBR */
191 COSTS_N_INSNS (38), /* SQEBR */
192 COSTS_N_INSNS (1), /* MADBR */
193 COSTS_N_INSNS (1), /* MAEBR */
194 COSTS_N_INSNS (60), /* DXBR */
195 COSTS_N_INSNS (40), /* DDBR */
196 COSTS_N_INSNS (26), /* DEBR */
197 COSTS_N_INSNS (176), /* DLGR */
198 COSTS_N_INSNS (31), /* DLR */
199 COSTS_N_INSNS (31), /* DR */
200 COSTS_N_INSNS (31), /* DSGFR */
201 COSTS_N_INSNS (31), /* DSGR */
205 struct processor_costs z9_109_cost
=
207 COSTS_N_INSNS (4), /* M */
208 COSTS_N_INSNS (2), /* MGHI */
209 COSTS_N_INSNS (2), /* MH */
210 COSTS_N_INSNS (2), /* MHI */
211 COSTS_N_INSNS (4), /* ML */
212 COSTS_N_INSNS (4), /* MR */
213 COSTS_N_INSNS (5), /* MS */
214 COSTS_N_INSNS (6), /* MSG */
215 COSTS_N_INSNS (4), /* MSGF */
216 COSTS_N_INSNS (4), /* MSGFR */
217 COSTS_N_INSNS (4), /* MSGR */
218 COSTS_N_INSNS (4), /* MSR */
219 COSTS_N_INSNS (1), /* multiplication in DFmode */
220 COSTS_N_INSNS (28), /* MXBR */
221 COSTS_N_INSNS (130), /* SQXBR */
222 COSTS_N_INSNS (66), /* SQDBR */
223 COSTS_N_INSNS (38), /* SQEBR */
224 COSTS_N_INSNS (1), /* MADBR */
225 COSTS_N_INSNS (1), /* MAEBR */
226 COSTS_N_INSNS (60), /* DXBR */
227 COSTS_N_INSNS (40), /* DDBR */
228 COSTS_N_INSNS (26), /* DEBR */
229 COSTS_N_INSNS (30), /* DLGR */
230 COSTS_N_INSNS (23), /* DLR */
231 COSTS_N_INSNS (23), /* DR */
232 COSTS_N_INSNS (24), /* DSGFR */
233 COSTS_N_INSNS (24), /* DSGR */
237 struct processor_costs z10_cost
=
239 COSTS_N_INSNS (10), /* M */
240 COSTS_N_INSNS (10), /* MGHI */
241 COSTS_N_INSNS (10), /* MH */
242 COSTS_N_INSNS (10), /* MHI */
243 COSTS_N_INSNS (10), /* ML */
244 COSTS_N_INSNS (10), /* MR */
245 COSTS_N_INSNS (10), /* MS */
246 COSTS_N_INSNS (10), /* MSG */
247 COSTS_N_INSNS (10), /* MSGF */
248 COSTS_N_INSNS (10), /* MSGFR */
249 COSTS_N_INSNS (10), /* MSGR */
250 COSTS_N_INSNS (10), /* MSR */
251 COSTS_N_INSNS (1) , /* multiplication in DFmode */
252 COSTS_N_INSNS (50), /* MXBR */
253 COSTS_N_INSNS (120), /* SQXBR */
254 COSTS_N_INSNS (52), /* SQDBR */
255 COSTS_N_INSNS (38), /* SQEBR */
256 COSTS_N_INSNS (1), /* MADBR */
257 COSTS_N_INSNS (1), /* MAEBR */
258 COSTS_N_INSNS (111), /* DXBR */
259 COSTS_N_INSNS (39), /* DDBR */
260 COSTS_N_INSNS (32), /* DEBR */
261 COSTS_N_INSNS (160), /* DLGR */
262 COSTS_N_INSNS (71), /* DLR */
263 COSTS_N_INSNS (71), /* DR */
264 COSTS_N_INSNS (71), /* DSGFR */
265 COSTS_N_INSNS (71), /* DSGR */
269 struct processor_costs z196_cost
=
271 COSTS_N_INSNS (7), /* M */
272 COSTS_N_INSNS (5), /* MGHI */
273 COSTS_N_INSNS (5), /* MH */
274 COSTS_N_INSNS (5), /* MHI */
275 COSTS_N_INSNS (7), /* ML */
276 COSTS_N_INSNS (7), /* MR */
277 COSTS_N_INSNS (6), /* MS */
278 COSTS_N_INSNS (8), /* MSG */
279 COSTS_N_INSNS (6), /* MSGF */
280 COSTS_N_INSNS (6), /* MSGFR */
281 COSTS_N_INSNS (8), /* MSGR */
282 COSTS_N_INSNS (6), /* MSR */
283 COSTS_N_INSNS (1) , /* multiplication in DFmode */
284 COSTS_N_INSNS (40), /* MXBR B+40 */
285 COSTS_N_INSNS (100), /* SQXBR B+100 */
286 COSTS_N_INSNS (42), /* SQDBR B+42 */
287 COSTS_N_INSNS (28), /* SQEBR B+28 */
288 COSTS_N_INSNS (1), /* MADBR B */
289 COSTS_N_INSNS (1), /* MAEBR B */
290 COSTS_N_INSNS (101), /* DXBR B+101 */
291 COSTS_N_INSNS (29), /* DDBR */
292 COSTS_N_INSNS (22), /* DEBR */
293 COSTS_N_INSNS (160), /* DLGR cracked */
294 COSTS_N_INSNS (160), /* DLR cracked */
295 COSTS_N_INSNS (160), /* DR expanded */
296 COSTS_N_INSNS (160), /* DSGFR cracked */
297 COSTS_N_INSNS (160), /* DSGR cracked */
301 struct processor_costs zEC12_cost
=
303 COSTS_N_INSNS (7), /* M */
304 COSTS_N_INSNS (5), /* MGHI */
305 COSTS_N_INSNS (5), /* MH */
306 COSTS_N_INSNS (5), /* MHI */
307 COSTS_N_INSNS (7), /* ML */
308 COSTS_N_INSNS (7), /* MR */
309 COSTS_N_INSNS (6), /* MS */
310 COSTS_N_INSNS (8), /* MSG */
311 COSTS_N_INSNS (6), /* MSGF */
312 COSTS_N_INSNS (6), /* MSGFR */
313 COSTS_N_INSNS (8), /* MSGR */
314 COSTS_N_INSNS (6), /* MSR */
315 COSTS_N_INSNS (1) , /* multiplication in DFmode */
316 COSTS_N_INSNS (40), /* MXBR B+40 */
317 COSTS_N_INSNS (100), /* SQXBR B+100 */
318 COSTS_N_INSNS (42), /* SQDBR B+42 */
319 COSTS_N_INSNS (28), /* SQEBR B+28 */
320 COSTS_N_INSNS (1), /* MADBR B */
321 COSTS_N_INSNS (1), /* MAEBR B */
322 COSTS_N_INSNS (131), /* DXBR B+131 */
323 COSTS_N_INSNS (29), /* DDBR */
324 COSTS_N_INSNS (22), /* DEBR */
325 COSTS_N_INSNS (160), /* DLGR cracked */
326 COSTS_N_INSNS (160), /* DLR cracked */
327 COSTS_N_INSNS (160), /* DR expanded */
328 COSTS_N_INSNS (160), /* DSGFR cracked */
329 COSTS_N_INSNS (160), /* DSGR cracked */
332 extern int reload_completed
;
334 /* Kept up to date using the SCHED_VARIABLE_ISSUE hook. */
335 static rtx_insn
*last_scheduled_insn
;
337 /* Structure used to hold the components of a S/390 memory
338 address. A legitimate address on S/390 is of the general
340 base + index + displacement
341 where any of the components is optional.
343 base and index are registers of the class ADDR_REGS,
344 displacement is an unsigned 12-bit immediate constant. */
355 /* The following structure is embedded in the machine
356 specific part of struct function. */
358 struct GTY (()) s390_frame_layout
360 /* Offset within stack frame. */
361 HOST_WIDE_INT gprs_offset
;
362 HOST_WIDE_INT f0_offset
;
363 HOST_WIDE_INT f4_offset
;
364 HOST_WIDE_INT f8_offset
;
365 HOST_WIDE_INT backchain_offset
;
367 /* Number of first and last gpr where slots in the register
368 save area are reserved for. */
369 int first_save_gpr_slot
;
370 int last_save_gpr_slot
;
372 /* Location (FP register number) where GPRs (r0-r15) should
374 0 - does not need to be saved at all
376 signed char gpr_save_slots
[16];
378 /* Number of first and last gpr to be saved, restored. */
380 int first_restore_gpr
;
382 int last_restore_gpr
;
384 /* Bits standing for floating point registers. Set, if the
385 respective register has to be saved. Starting with reg 16 (f0)
386 at the rightmost bit.
387 Bit 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
388 fpr 15 13 11 9 14 12 10 8 7 5 3 1 6 4 2 0
389 reg 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 */
390 unsigned int fpr_bitmap
;
392 /* Number of floating point registers f8-f15 which must be saved. */
395 /* Set if return address needs to be saved.
396 This flag is set by s390_return_addr_rtx if it could not use
397 the initial value of r14 and therefore depends on r14 saved
399 bool save_return_addr_p
;
401 /* Size of stack frame. */
402 HOST_WIDE_INT frame_size
;
405 /* Define the structure for the machine field in struct function. */
407 struct GTY(()) machine_function
409 struct s390_frame_layout frame_layout
;
411 /* Literal pool base register. */
414 /* True if we may need to perform branch splitting. */
415 bool split_branches_pending_p
;
417 bool has_landing_pad_p
;
419 /* True if the current function may contain a tbegin clobbering
424 /* Few accessor macros for struct cfun->machine->s390_frame_layout. */
426 #define cfun_frame_layout (cfun->machine->frame_layout)
427 #define cfun_save_high_fprs_p (!!cfun_frame_layout.high_fprs)
428 #define cfun_save_arg_fprs_p (!!(TARGET_64BIT \
429 ? cfun_frame_layout.fpr_bitmap & 0x0f \
430 : cfun_frame_layout.fpr_bitmap & 0x03))
431 #define cfun_gprs_save_area_size ((cfun_frame_layout.last_save_gpr_slot - \
432 cfun_frame_layout.first_save_gpr_slot + 1) * UNITS_PER_LONG)
433 #define cfun_set_fpr_save(REGNO) (cfun->machine->frame_layout.fpr_bitmap |= \
434 (1 << (REGNO - FPR0_REGNUM)))
435 #define cfun_fpr_save_p(REGNO) (!!(cfun->machine->frame_layout.fpr_bitmap & \
436 (1 << (REGNO - FPR0_REGNUM))))
437 #define cfun_gpr_save_slot(REGNO) \
438 cfun->machine->frame_layout.gpr_save_slots[REGNO]
440 /* Number of GPRs and FPRs used for argument passing. */
441 #define GP_ARG_NUM_REG 5
442 #define FP_ARG_NUM_REG (TARGET_64BIT? 4 : 2)
444 /* A couple of shortcuts. */
445 #define CONST_OK_FOR_J(x) \
446 CONST_OK_FOR_CONSTRAINT_P((x), 'J', "J")
447 #define CONST_OK_FOR_K(x) \
448 CONST_OK_FOR_CONSTRAINT_P((x), 'K', "K")
449 #define CONST_OK_FOR_Os(x) \
450 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Os")
451 #define CONST_OK_FOR_Op(x) \
452 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Op")
453 #define CONST_OK_FOR_On(x) \
454 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "On")
456 #define REGNO_PAIR_OK(REGNO, MODE) \
457 (HARD_REGNO_NREGS ((REGNO), (MODE)) == 1 || !((REGNO) & 1))
459 /* That's the read ahead of the dynamic branch prediction unit in
460 bytes on a z10 (or higher) CPU. */
461 #define PREDICT_DISTANCE (TARGET_Z10 ? 384 : 2048)
463 static const int s390_hotpatch_hw_max
= 1000000;
464 static int s390_hotpatch_hw_before_label
= 0;
465 static int s390_hotpatch_hw_after_label
= 0;
467 /* Check whether the hotpatch attribute is applied to a function and, if it has
468 an argument, the argument is valid. */
471 s390_handle_hotpatch_attribute (tree
*node
, tree name
, tree args
,
472 int flags ATTRIBUTE_UNUSED
, bool *no_add_attrs
)
478 if (TREE_CODE (*node
) != FUNCTION_DECL
)
480 warning (OPT_Wattributes
, "%qE attribute only applies to functions",
482 *no_add_attrs
= true;
484 if (args
!= NULL
&& TREE_CHAIN (args
) != NULL
)
486 expr
= TREE_VALUE (args
);
487 expr2
= TREE_VALUE (TREE_CHAIN (args
));
489 if (args
== NULL
|| TREE_CHAIN (args
) == NULL
)
491 else if (TREE_CODE (expr
) != INTEGER_CST
492 || !INTEGRAL_TYPE_P (TREE_TYPE (expr
))
493 || wi::gtu_p (expr
, s390_hotpatch_hw_max
))
495 else if (TREE_CODE (expr2
) != INTEGER_CST
496 || !INTEGRAL_TYPE_P (TREE_TYPE (expr2
))
497 || wi::gtu_p (expr2
, s390_hotpatch_hw_max
))
503 error ("requested %qE attribute is not a comma separated pair of"
504 " non-negative integer constants or too large (max. %d)", name
,
505 s390_hotpatch_hw_max
);
506 *no_add_attrs
= true;
512 static const struct attribute_spec s390_attribute_table
[] = {
513 { "hotpatch", 2, 2, true, false, false, s390_handle_hotpatch_attribute
, false
516 { NULL
, 0, 0, false, false, false, NULL
, false }
519 /* Return the alignment for LABEL. We default to the -falign-labels
520 value except for the literal pool base label. */
522 s390_label_align (rtx label
)
524 rtx_insn
*prev_insn
= prev_active_insn (label
);
527 if (prev_insn
== NULL_RTX
)
530 set
= single_set (prev_insn
);
537 /* Don't align literal pool base labels. */
538 if (GET_CODE (src
) == UNSPEC
539 && XINT (src
, 1) == UNSPEC_MAIN_BASE
)
543 return align_labels_log
;
547 s390_libgcc_cmp_return_mode (void)
549 return TARGET_64BIT
? DImode
: SImode
;
553 s390_libgcc_shift_count_mode (void)
555 return TARGET_64BIT
? DImode
: SImode
;
559 s390_unwind_word_mode (void)
561 return TARGET_64BIT
? DImode
: SImode
;
564 /* Return true if the back end supports mode MODE. */
566 s390_scalar_mode_supported_p (machine_mode mode
)
568 /* In contrast to the default implementation reject TImode constants on 31bit
569 TARGET_ZARCH for ABI compliance. */
570 if (!TARGET_64BIT
&& TARGET_ZARCH
&& mode
== TImode
)
573 if (DECIMAL_FLOAT_MODE_P (mode
))
574 return default_decimal_float_supported_p ();
576 return default_scalar_mode_supported_p (mode
);
579 /* Set the has_landing_pad_p flag in struct machine_function to VALUE. */
582 s390_set_has_landing_pad_p (bool value
)
584 cfun
->machine
->has_landing_pad_p
= value
;
587 /* If two condition code modes are compatible, return a condition code
588 mode which is compatible with both. Otherwise, return
592 s390_cc_modes_compatible (machine_mode m1
, machine_mode m2
)
600 if (m2
== CCUmode
|| m2
== CCTmode
|| m2
== CCZ1mode
601 || m2
== CCSmode
|| m2
== CCSRmode
|| m2
== CCURmode
)
622 /* Return true if SET either doesn't set the CC register, or else
623 the source and destination have matching CC modes and that
624 CC mode is at least as constrained as REQ_MODE. */
627 s390_match_ccmode_set (rtx set
, machine_mode req_mode
)
629 machine_mode set_mode
;
631 gcc_assert (GET_CODE (set
) == SET
);
633 if (GET_CODE (SET_DEST (set
)) != REG
|| !CC_REGNO_P (REGNO (SET_DEST (set
))))
636 set_mode
= GET_MODE (SET_DEST (set
));
650 if (req_mode
!= set_mode
)
655 if (req_mode
!= CCSmode
&& req_mode
!= CCUmode
&& req_mode
!= CCTmode
656 && req_mode
!= CCSRmode
&& req_mode
!= CCURmode
)
662 if (req_mode
!= CCAmode
)
670 return (GET_MODE (SET_SRC (set
)) == set_mode
);
673 /* Return true if every SET in INSN that sets the CC register
674 has source and destination with matching CC modes and that
675 CC mode is at least as constrained as REQ_MODE.
676 If REQ_MODE is VOIDmode, always return false. */
679 s390_match_ccmode (rtx_insn
*insn
, machine_mode req_mode
)
683 /* s390_tm_ccmode returns VOIDmode to indicate failure. */
684 if (req_mode
== VOIDmode
)
687 if (GET_CODE (PATTERN (insn
)) == SET
)
688 return s390_match_ccmode_set (PATTERN (insn
), req_mode
);
690 if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
691 for (i
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
693 rtx set
= XVECEXP (PATTERN (insn
), 0, i
);
694 if (GET_CODE (set
) == SET
)
695 if (!s390_match_ccmode_set (set
, req_mode
))
702 /* If a test-under-mask instruction can be used to implement
703 (compare (and ... OP1) OP2), return the CC mode required
704 to do that. Otherwise, return VOIDmode.
705 MIXED is true if the instruction can distinguish between
706 CC1 and CC2 for mixed selected bits (TMxx), it is false
707 if the instruction cannot (TM). */
710 s390_tm_ccmode (rtx op1
, rtx op2
, bool mixed
)
714 /* ??? Fixme: should work on CONST_DOUBLE as well. */
715 if (GET_CODE (op1
) != CONST_INT
|| GET_CODE (op2
) != CONST_INT
)
718 /* Selected bits all zero: CC0.
719 e.g.: int a; if ((a & (16 + 128)) == 0) */
720 if (INTVAL (op2
) == 0)
723 /* Selected bits all one: CC3.
724 e.g.: int a; if ((a & (16 + 128)) == 16 + 128) */
725 if (INTVAL (op2
) == INTVAL (op1
))
728 /* Exactly two bits selected, mixed zeroes and ones: CC1 or CC2. e.g.:
730 if ((a & (16 + 128)) == 16) -> CCT1
731 if ((a & (16 + 128)) == 128) -> CCT2 */
734 bit1
= exact_log2 (INTVAL (op2
));
735 bit0
= exact_log2 (INTVAL (op1
) ^ INTVAL (op2
));
736 if (bit0
!= -1 && bit1
!= -1)
737 return bit0
> bit1
? CCT1mode
: CCT2mode
;
743 /* Given a comparison code OP (EQ, NE, etc.) and the operands
744 OP0 and OP1 of a COMPARE, return the mode to be used for the
748 s390_select_ccmode (enum rtx_code code
, rtx op0
, rtx op1
)
754 if ((GET_CODE (op0
) == NEG
|| GET_CODE (op0
) == ABS
)
755 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_INT
)
757 if (GET_CODE (op0
) == PLUS
&& GET_CODE (XEXP (op0
, 1)) == CONST_INT
758 && CONST_OK_FOR_K (INTVAL (XEXP (op0
, 1))))
760 if ((GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
761 || GET_CODE (op1
) == NEG
)
762 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_INT
)
765 if (GET_CODE (op0
) == AND
)
767 /* Check whether we can potentially do it via TM. */
769 ccmode
= s390_tm_ccmode (XEXP (op0
, 1), op1
, 1);
770 if (ccmode
!= VOIDmode
)
772 /* Relax CCTmode to CCZmode to allow fall-back to AND
773 if that turns out to be beneficial. */
774 return ccmode
== CCTmode
? CCZmode
: ccmode
;
778 if (register_operand (op0
, HImode
)
779 && GET_CODE (op1
) == CONST_INT
780 && (INTVAL (op1
) == -1 || INTVAL (op1
) == 65535))
782 if (register_operand (op0
, QImode
)
783 && GET_CODE (op1
) == CONST_INT
784 && (INTVAL (op1
) == -1 || INTVAL (op1
) == 255))
793 /* The only overflow condition of NEG and ABS happens when
794 -INT_MAX is used as parameter, which stays negative. So
795 we have an overflow from a positive value to a negative.
796 Using CCAP mode the resulting cc can be used for comparisons. */
797 if ((GET_CODE (op0
) == NEG
|| GET_CODE (op0
) == ABS
)
798 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_INT
)
801 /* If constants are involved in an add instruction it is possible to use
802 the resulting cc for comparisons with zero. Knowing the sign of the
803 constant the overflow behavior gets predictable. e.g.:
804 int a, b; if ((b = a + c) > 0)
805 with c as a constant value: c < 0 -> CCAN and c >= 0 -> CCAP */
806 if (GET_CODE (op0
) == PLUS
&& GET_CODE (XEXP (op0
, 1)) == CONST_INT
807 && (CONST_OK_FOR_K (INTVAL (XEXP (op0
, 1)))
808 || (CONST_OK_FOR_CONSTRAINT_P (INTVAL (XEXP (op0
, 1)), 'O', "Os")
809 /* Avoid INT32_MIN on 32 bit. */
810 && (!TARGET_ZARCH
|| INTVAL (XEXP (op0
, 1)) != -0x7fffffff - 1))))
812 if (INTVAL (XEXP((op0
), 1)) < 0)
826 if ((GET_CODE (op0
) == SIGN_EXTEND
|| GET_CODE (op0
) == ZERO_EXTEND
)
827 && GET_CODE (op1
) != CONST_INT
)
833 if (GET_CODE (op0
) == PLUS
834 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_INT
)
837 if ((GET_CODE (op0
) == SIGN_EXTEND
|| GET_CODE (op0
) == ZERO_EXTEND
)
838 && GET_CODE (op1
) != CONST_INT
)
844 if (GET_CODE (op0
) == MINUS
845 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_INT
)
848 if ((GET_CODE (op0
) == SIGN_EXTEND
|| GET_CODE (op0
) == ZERO_EXTEND
)
849 && GET_CODE (op1
) != CONST_INT
)
858 /* Replace the comparison OP0 CODE OP1 by a semantically equivalent one
859 that we can implement more efficiently. */
862 s390_canonicalize_comparison (int *code
, rtx
*op0
, rtx
*op1
,
863 bool op0_preserve_value
)
865 if (op0_preserve_value
)
868 /* Convert ZERO_EXTRACT back to AND to enable TM patterns. */
869 if ((*code
== EQ
|| *code
== NE
)
870 && *op1
== const0_rtx
871 && GET_CODE (*op0
) == ZERO_EXTRACT
872 && GET_CODE (XEXP (*op0
, 1)) == CONST_INT
873 && GET_CODE (XEXP (*op0
, 2)) == CONST_INT
874 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0
, 0))))
876 rtx inner
= XEXP (*op0
, 0);
877 HOST_WIDE_INT modesize
= GET_MODE_BITSIZE (GET_MODE (inner
));
878 HOST_WIDE_INT len
= INTVAL (XEXP (*op0
, 1));
879 HOST_WIDE_INT pos
= INTVAL (XEXP (*op0
, 2));
881 if (len
> 0 && len
< modesize
882 && pos
>= 0 && pos
+ len
<= modesize
883 && modesize
<= HOST_BITS_PER_WIDE_INT
)
885 unsigned HOST_WIDE_INT block
;
886 block
= ((unsigned HOST_WIDE_INT
) 1 << len
) - 1;
887 block
<<= modesize
- pos
- len
;
889 *op0
= gen_rtx_AND (GET_MODE (inner
), inner
,
890 gen_int_mode (block
, GET_MODE (inner
)));
894 /* Narrow AND of memory against immediate to enable TM. */
895 if ((*code
== EQ
|| *code
== NE
)
896 && *op1
== const0_rtx
897 && GET_CODE (*op0
) == AND
898 && GET_CODE (XEXP (*op0
, 1)) == CONST_INT
899 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0
, 0))))
901 rtx inner
= XEXP (*op0
, 0);
902 rtx mask
= XEXP (*op0
, 1);
904 /* Ignore paradoxical SUBREGs if all extra bits are masked out. */
905 if (GET_CODE (inner
) == SUBREG
906 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (inner
)))
907 && (GET_MODE_SIZE (GET_MODE (inner
))
908 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner
))))
910 & GET_MODE_MASK (GET_MODE (inner
))
911 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (inner
))))
913 inner
= SUBREG_REG (inner
);
915 /* Do not change volatile MEMs. */
916 if (MEM_P (inner
) && !MEM_VOLATILE_P (inner
))
918 int part
= s390_single_part (XEXP (*op0
, 1),
919 GET_MODE (inner
), QImode
, 0);
922 mask
= gen_int_mode (s390_extract_part (mask
, QImode
, 0), QImode
);
923 inner
= adjust_address_nv (inner
, QImode
, part
);
924 *op0
= gen_rtx_AND (QImode
, inner
, mask
);
929 /* Narrow comparisons against 0xffff to HImode if possible. */
930 if ((*code
== EQ
|| *code
== NE
)
931 && GET_CODE (*op1
) == CONST_INT
932 && INTVAL (*op1
) == 0xffff
933 && SCALAR_INT_MODE_P (GET_MODE (*op0
))
934 && (nonzero_bits (*op0
, GET_MODE (*op0
))
935 & ~(unsigned HOST_WIDE_INT
) 0xffff) == 0)
937 *op0
= gen_lowpart (HImode
, *op0
);
941 /* Remove redundant UNSPEC_STRCMPCC_TO_INT conversions if possible. */
942 if (GET_CODE (*op0
) == UNSPEC
943 && XINT (*op0
, 1) == UNSPEC_STRCMPCC_TO_INT
944 && XVECLEN (*op0
, 0) == 1
945 && GET_MODE (XVECEXP (*op0
, 0, 0)) == CCUmode
946 && GET_CODE (XVECEXP (*op0
, 0, 0)) == REG
947 && REGNO (XVECEXP (*op0
, 0, 0)) == CC_REGNUM
948 && *op1
== const0_rtx
)
950 enum rtx_code new_code
= UNKNOWN
;
953 case EQ
: new_code
= EQ
; break;
954 case NE
: new_code
= NE
; break;
955 case LT
: new_code
= GTU
; break;
956 case GT
: new_code
= LTU
; break;
957 case LE
: new_code
= GEU
; break;
958 case GE
: new_code
= LEU
; break;
962 if (new_code
!= UNKNOWN
)
964 *op0
= XVECEXP (*op0
, 0, 0);
969 /* Remove redundant UNSPEC_CC_TO_INT conversions if possible. */
970 if (GET_CODE (*op0
) == UNSPEC
971 && XINT (*op0
, 1) == UNSPEC_CC_TO_INT
972 && XVECLEN (*op0
, 0) == 1
973 && GET_CODE (XVECEXP (*op0
, 0, 0)) == REG
974 && REGNO (XVECEXP (*op0
, 0, 0)) == CC_REGNUM
975 && CONST_INT_P (*op1
))
977 enum rtx_code new_code
= UNKNOWN
;
978 switch (GET_MODE (XVECEXP (*op0
, 0, 0)))
984 case EQ
: new_code
= EQ
; break;
985 case NE
: new_code
= NE
; break;
992 if (new_code
!= UNKNOWN
)
994 /* For CCRAWmode put the required cc mask into the second
996 if (GET_MODE (XVECEXP (*op0
, 0, 0)) == CCRAWmode
997 && INTVAL (*op1
) >= 0 && INTVAL (*op1
) <= 3)
998 *op1
= gen_rtx_CONST_INT (VOIDmode
, 1 << (3 - INTVAL (*op1
)));
999 *op0
= XVECEXP (*op0
, 0, 0);
1004 /* Simplify cascaded EQ, NE with const0_rtx. */
1005 if ((*code
== NE
|| *code
== EQ
)
1006 && (GET_CODE (*op0
) == EQ
|| GET_CODE (*op0
) == NE
)
1007 && GET_MODE (*op0
) == SImode
1008 && GET_MODE (XEXP (*op0
, 0)) == CCZ1mode
1009 && REG_P (XEXP (*op0
, 0))
1010 && XEXP (*op0
, 1) == const0_rtx
1011 && *op1
== const0_rtx
)
1013 if ((*code
== EQ
&& GET_CODE (*op0
) == NE
)
1014 || (*code
== NE
&& GET_CODE (*op0
) == EQ
))
1018 *op0
= XEXP (*op0
, 0);
1021 /* Prefer register over memory as first operand. */
1022 if (MEM_P (*op0
) && REG_P (*op1
))
1024 rtx tem
= *op0
; *op0
= *op1
; *op1
= tem
;
1025 *code
= (int)swap_condition ((enum rtx_code
)*code
);
1029 /* Emit a compare instruction suitable to implement the comparison
1030 OP0 CODE OP1. Return the correct condition RTL to be placed in
1031 the IF_THEN_ELSE of the conditional branch testing the result. */
1034 s390_emit_compare (enum rtx_code code
, rtx op0
, rtx op1
)
1036 machine_mode mode
= s390_select_ccmode (code
, op0
, op1
);
1039 /* Do not output a redundant compare instruction if a compare_and_swap
1040 pattern already computed the result and the machine modes are compatible. */
1041 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
)
1043 gcc_assert (s390_cc_modes_compatible (GET_MODE (op0
), mode
)
1049 cc
= gen_rtx_REG (mode
, CC_REGNUM
);
1050 emit_insn (gen_rtx_SET (VOIDmode
, cc
, gen_rtx_COMPARE (mode
, op0
, op1
)));
1053 return gen_rtx_fmt_ee (code
, VOIDmode
, cc
, const0_rtx
);
1056 /* Emit a SImode compare and swap instruction setting MEM to NEW_RTX if OLD
1058 Return the correct condition RTL to be placed in the IF_THEN_ELSE of the
1059 conditional branch testing the result. */
1062 s390_emit_compare_and_swap (enum rtx_code code
, rtx old
, rtx mem
,
1063 rtx cmp
, rtx new_rtx
)
1065 emit_insn (gen_atomic_compare_and_swapsi_internal (old
, mem
, cmp
, new_rtx
));
1066 return s390_emit_compare (code
, gen_rtx_REG (CCZ1mode
, CC_REGNUM
),
1070 /* Emit a jump instruction to TARGET and return it. If COND is
1071 NULL_RTX, emit an unconditional jump, else a conditional jump under
1075 s390_emit_jump (rtx target
, rtx cond
)
1079 target
= gen_rtx_LABEL_REF (VOIDmode
, target
);
1081 target
= gen_rtx_IF_THEN_ELSE (VOIDmode
, cond
, target
, pc_rtx
);
1083 insn
= gen_rtx_SET (VOIDmode
, pc_rtx
, target
);
1084 return emit_jump_insn (insn
);
1087 /* Return branch condition mask to implement a branch
1088 specified by CODE. Return -1 for invalid comparisons. */
1091 s390_branch_condition_mask (rtx code
)
1093 const int CC0
= 1 << 3;
1094 const int CC1
= 1 << 2;
1095 const int CC2
= 1 << 1;
1096 const int CC3
= 1 << 0;
1098 gcc_assert (GET_CODE (XEXP (code
, 0)) == REG
);
1099 gcc_assert (REGNO (XEXP (code
, 0)) == CC_REGNUM
);
1100 gcc_assert (XEXP (code
, 1) == const0_rtx
1101 || (GET_MODE (XEXP (code
, 0)) == CCRAWmode
1102 && CONST_INT_P (XEXP (code
, 1))));
1105 switch (GET_MODE (XEXP (code
, 0)))
1109 switch (GET_CODE (code
))
1111 case EQ
: return CC0
;
1112 case NE
: return CC1
| CC2
| CC3
;
1118 switch (GET_CODE (code
))
1120 case EQ
: return CC1
;
1121 case NE
: return CC0
| CC2
| CC3
;
1127 switch (GET_CODE (code
))
1129 case EQ
: return CC2
;
1130 case NE
: return CC0
| CC1
| CC3
;
1136 switch (GET_CODE (code
))
1138 case EQ
: return CC3
;
1139 case NE
: return CC0
| CC1
| CC2
;
1145 switch (GET_CODE (code
))
1147 case EQ
: return CC0
| CC2
;
1148 case NE
: return CC1
| CC3
;
1154 switch (GET_CODE (code
))
1156 case LTU
: return CC2
| CC3
; /* carry */
1157 case GEU
: return CC0
| CC1
; /* no carry */
1163 switch (GET_CODE (code
))
1165 case GTU
: return CC0
| CC1
; /* borrow */
1166 case LEU
: return CC2
| CC3
; /* no borrow */
1172 switch (GET_CODE (code
))
1174 case EQ
: return CC0
| CC2
;
1175 case NE
: return CC1
| CC3
;
1176 case LTU
: return CC1
;
1177 case GTU
: return CC3
;
1178 case LEU
: return CC1
| CC2
;
1179 case GEU
: return CC2
| CC3
;
1184 switch (GET_CODE (code
))
1186 case EQ
: return CC0
;
1187 case NE
: return CC1
| CC2
| CC3
;
1188 case LTU
: return CC1
;
1189 case GTU
: return CC2
;
1190 case LEU
: return CC0
| CC1
;
1191 case GEU
: return CC0
| CC2
;
1197 switch (GET_CODE (code
))
1199 case EQ
: return CC0
;
1200 case NE
: return CC2
| CC1
| CC3
;
1201 case LTU
: return CC2
;
1202 case GTU
: return CC1
;
1203 case LEU
: return CC0
| CC2
;
1204 case GEU
: return CC0
| CC1
;
1210 switch (GET_CODE (code
))
1212 case EQ
: return CC0
;
1213 case NE
: return CC1
| CC2
| CC3
;
1214 case LT
: return CC1
| CC3
;
1215 case GT
: return CC2
;
1216 case LE
: return CC0
| CC1
| CC3
;
1217 case GE
: return CC0
| CC2
;
1223 switch (GET_CODE (code
))
1225 case EQ
: return CC0
;
1226 case NE
: return CC1
| CC2
| CC3
;
1227 case LT
: return CC1
;
1228 case GT
: return CC2
| CC3
;
1229 case LE
: return CC0
| CC1
;
1230 case GE
: return CC0
| CC2
| CC3
;
1236 switch (GET_CODE (code
))
1238 case EQ
: return CC0
;
1239 case NE
: return CC1
| CC2
| CC3
;
1240 case LT
: return CC1
;
1241 case GT
: return CC2
;
1242 case LE
: return CC0
| CC1
;
1243 case GE
: return CC0
| CC2
;
1244 case UNORDERED
: return CC3
;
1245 case ORDERED
: return CC0
| CC1
| CC2
;
1246 case UNEQ
: return CC0
| CC3
;
1247 case UNLT
: return CC1
| CC3
;
1248 case UNGT
: return CC2
| CC3
;
1249 case UNLE
: return CC0
| CC1
| CC3
;
1250 case UNGE
: return CC0
| CC2
| CC3
;
1251 case LTGT
: return CC1
| CC2
;
1257 switch (GET_CODE (code
))
1259 case EQ
: return CC0
;
1260 case NE
: return CC2
| CC1
| CC3
;
1261 case LT
: return CC2
;
1262 case GT
: return CC1
;
1263 case LE
: return CC0
| CC2
;
1264 case GE
: return CC0
| CC1
;
1265 case UNORDERED
: return CC3
;
1266 case ORDERED
: return CC0
| CC2
| CC1
;
1267 case UNEQ
: return CC0
| CC3
;
1268 case UNLT
: return CC2
| CC3
;
1269 case UNGT
: return CC1
| CC3
;
1270 case UNLE
: return CC0
| CC2
| CC3
;
1271 case UNGE
: return CC0
| CC1
| CC3
;
1272 case LTGT
: return CC2
| CC1
;
1278 switch (GET_CODE (code
))
1281 return INTVAL (XEXP (code
, 1));
1283 return (INTVAL (XEXP (code
, 1))) ^ 0xf;
1294 /* Return branch condition mask to implement a compare and branch
1295 specified by CODE. Return -1 for invalid comparisons. */
1298 s390_compare_and_branch_condition_mask (rtx code
)
1300 const int CC0
= 1 << 3;
1301 const int CC1
= 1 << 2;
1302 const int CC2
= 1 << 1;
1304 switch (GET_CODE (code
))
1328 /* If INV is false, return assembler mnemonic string to implement
1329 a branch specified by CODE. If INV is true, return mnemonic
1330 for the corresponding inverted branch. */
1333 s390_branch_condition_mnemonic (rtx code
, int inv
)
1337 static const char *const mnemonic
[16] =
1339 NULL
, "o", "h", "nle",
1340 "l", "nhe", "lh", "ne",
1341 "e", "nlh", "he", "nl",
1342 "le", "nh", "no", NULL
1345 if (GET_CODE (XEXP (code
, 0)) == REG
1346 && REGNO (XEXP (code
, 0)) == CC_REGNUM
1347 && (XEXP (code
, 1) == const0_rtx
1348 || (GET_MODE (XEXP (code
, 0)) == CCRAWmode
1349 && CONST_INT_P (XEXP (code
, 1)))))
1350 mask
= s390_branch_condition_mask (code
);
1352 mask
= s390_compare_and_branch_condition_mask (code
);
1354 gcc_assert (mask
>= 0);
1359 gcc_assert (mask
>= 1 && mask
<= 14);
1361 return mnemonic
[mask
];
1364 /* Return the part of op which has a value different from def.
1365 The size of the part is determined by mode.
1366 Use this function only if you already know that op really
1367 contains such a part. */
1369 unsigned HOST_WIDE_INT
1370 s390_extract_part (rtx op
, machine_mode mode
, int def
)
1372 unsigned HOST_WIDE_INT value
= 0;
1373 int max_parts
= HOST_BITS_PER_WIDE_INT
/ GET_MODE_BITSIZE (mode
);
1374 int part_bits
= GET_MODE_BITSIZE (mode
);
1375 unsigned HOST_WIDE_INT part_mask
1376 = ((unsigned HOST_WIDE_INT
)1 << part_bits
) - 1;
1379 for (i
= 0; i
< max_parts
; i
++)
1382 value
= (unsigned HOST_WIDE_INT
) INTVAL (op
);
1384 value
>>= part_bits
;
1386 if ((value
& part_mask
) != (def
& part_mask
))
1387 return value
& part_mask
;
1393 /* If OP is an integer constant of mode MODE with exactly one
1394 part of mode PART_MODE unequal to DEF, return the number of that
1395 part. Otherwise, return -1. */
1398 s390_single_part (rtx op
,
1400 machine_mode part_mode
,
1403 unsigned HOST_WIDE_INT value
= 0;
1404 int n_parts
= GET_MODE_SIZE (mode
) / GET_MODE_SIZE (part_mode
);
1405 unsigned HOST_WIDE_INT part_mask
1406 = ((unsigned HOST_WIDE_INT
)1 << GET_MODE_BITSIZE (part_mode
)) - 1;
1409 if (GET_CODE (op
) != CONST_INT
)
1412 for (i
= 0; i
< n_parts
; i
++)
1415 value
= (unsigned HOST_WIDE_INT
) INTVAL (op
);
1417 value
>>= GET_MODE_BITSIZE (part_mode
);
1419 if ((value
& part_mask
) != (def
& part_mask
))
1427 return part
== -1 ? -1 : n_parts
- 1 - part
;
1430 /* Return true if IN contains a contiguous bitfield in the lower SIZE
1431 bits and no other bits are set in IN. POS and LENGTH can be used
1432 to obtain the start position and the length of the bitfield.
1434 POS gives the position of the first bit of the bitfield counting
1435 from the lowest order bit starting with zero. In order to use this
1436 value for S/390 instructions this has to be converted to "bits big
1440 s390_contiguous_bitmask_p (unsigned HOST_WIDE_INT in
, int size
,
1441 int *pos
, int *length
)
1446 unsigned HOST_WIDE_INT mask
= 1ULL;
1447 bool contiguous
= false;
1449 for (i
= 0; i
< size
; mask
<<= 1, i
++)
1473 /* Calculate a mask for all bits beyond the contiguous bits. */
1474 mask
= (-1LL & ~(((1ULL << (tmp_length
+ tmp_pos
- 1)) << 1) - 1));
1479 if (tmp_length
+ tmp_pos
- 1 > size
)
1483 *length
= tmp_length
;
1491 /* Check whether a rotate of ROTL followed by an AND of CONTIG is
1492 equivalent to a shift followed by the AND. In particular, CONTIG
1493 should not overlap the (rotated) bit 0/bit 63 gap. Negative values
1494 for ROTL indicate a rotate to the right. */
1497 s390_extzv_shift_ok (int bitsize
, int rotl
, unsigned HOST_WIDE_INT contig
)
1502 ok
= s390_contiguous_bitmask_p (contig
, bitsize
, &pos
, &len
);
1505 return ((rotl
>= 0 && rotl
<= pos
)
1506 || (rotl
< 0 && -rotl
<= bitsize
- len
- pos
));
1509 /* Check whether we can (and want to) split a double-word
1510 move in mode MODE from SRC to DST into two single-word
1511 moves, moving the subword FIRST_SUBWORD first. */
1514 s390_split_ok_p (rtx dst
, rtx src
, machine_mode mode
, int first_subword
)
1516 /* Floating point registers cannot be split. */
1517 if (FP_REG_P (src
) || FP_REG_P (dst
))
1520 /* We don't need to split if operands are directly accessible. */
1521 if (s_operand (src
, mode
) || s_operand (dst
, mode
))
1524 /* Non-offsettable memory references cannot be split. */
1525 if ((GET_CODE (src
) == MEM
&& !offsettable_memref_p (src
))
1526 || (GET_CODE (dst
) == MEM
&& !offsettable_memref_p (dst
)))
1529 /* Moving the first subword must not clobber a register
1530 needed to move the second subword. */
1531 if (register_operand (dst
, mode
))
1533 rtx subreg
= operand_subword (dst
, first_subword
, 0, mode
);
1534 if (reg_overlap_mentioned_p (subreg
, src
))
1541 /* Return true if it can be proven that [MEM1, MEM1 + SIZE]
1542 and [MEM2, MEM2 + SIZE] do overlap and false
1546 s390_overlap_p (rtx mem1
, rtx mem2
, HOST_WIDE_INT size
)
1548 rtx addr1
, addr2
, addr_delta
;
1549 HOST_WIDE_INT delta
;
1551 if (GET_CODE (mem1
) != MEM
|| GET_CODE (mem2
) != MEM
)
1557 addr1
= XEXP (mem1
, 0);
1558 addr2
= XEXP (mem2
, 0);
1560 addr_delta
= simplify_binary_operation (MINUS
, Pmode
, addr2
, addr1
);
1562 /* This overlapping check is used by peepholes merging memory block operations.
1563 Overlapping operations would otherwise be recognized by the S/390 hardware
1564 and would fall back to a slower implementation. Allowing overlapping
1565 operations would lead to slow code but not to wrong code. Therefore we are
1566 somewhat optimistic if we cannot prove that the memory blocks are
1568 That's why we return false here although this may accept operations on
1569 overlapping memory areas. */
1570 if (!addr_delta
|| GET_CODE (addr_delta
) != CONST_INT
)
1573 delta
= INTVAL (addr_delta
);
1576 || (delta
> 0 && delta
< size
)
1577 || (delta
< 0 && -delta
< size
))
1583 /* Check whether the address of memory reference MEM2 equals exactly
1584 the address of memory reference MEM1 plus DELTA. Return true if
1585 we can prove this to be the case, false otherwise. */
1588 s390_offset_p (rtx mem1
, rtx mem2
, rtx delta
)
1590 rtx addr1
, addr2
, addr_delta
;
1592 if (GET_CODE (mem1
) != MEM
|| GET_CODE (mem2
) != MEM
)
1595 addr1
= XEXP (mem1
, 0);
1596 addr2
= XEXP (mem2
, 0);
1598 addr_delta
= simplify_binary_operation (MINUS
, Pmode
, addr2
, addr1
);
1599 if (!addr_delta
|| !rtx_equal_p (addr_delta
, delta
))
1605 /* Expand logical operator CODE in mode MODE with operands OPERANDS. */
1608 s390_expand_logical_operator (enum rtx_code code
, machine_mode mode
,
1611 machine_mode wmode
= mode
;
1612 rtx dst
= operands
[0];
1613 rtx src1
= operands
[1];
1614 rtx src2
= operands
[2];
1617 /* If we cannot handle the operation directly, use a temp register. */
1618 if (!s390_logical_operator_ok_p (operands
))
1619 dst
= gen_reg_rtx (mode
);
1621 /* QImode and HImode patterns make sense only if we have a destination
1622 in memory. Otherwise perform the operation in SImode. */
1623 if ((mode
== QImode
|| mode
== HImode
) && GET_CODE (dst
) != MEM
)
1626 /* Widen operands if required. */
1629 if (GET_CODE (dst
) == SUBREG
1630 && (tem
= simplify_subreg (wmode
, dst
, mode
, 0)) != 0)
1632 else if (REG_P (dst
))
1633 dst
= gen_rtx_SUBREG (wmode
, dst
, 0);
1635 dst
= gen_reg_rtx (wmode
);
1637 if (GET_CODE (src1
) == SUBREG
1638 && (tem
= simplify_subreg (wmode
, src1
, mode
, 0)) != 0)
1640 else if (GET_MODE (src1
) != VOIDmode
)
1641 src1
= gen_rtx_SUBREG (wmode
, force_reg (mode
, src1
), 0);
1643 if (GET_CODE (src2
) == SUBREG
1644 && (tem
= simplify_subreg (wmode
, src2
, mode
, 0)) != 0)
1646 else if (GET_MODE (src2
) != VOIDmode
)
1647 src2
= gen_rtx_SUBREG (wmode
, force_reg (mode
, src2
), 0);
1650 /* Emit the instruction. */
1651 op
= gen_rtx_SET (VOIDmode
, dst
, gen_rtx_fmt_ee (code
, wmode
, src1
, src2
));
1652 clob
= gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (CCmode
, CC_REGNUM
));
1653 emit_insn (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, op
, clob
)));
1655 /* Fix up the destination if needed. */
1656 if (dst
!= operands
[0])
1657 emit_move_insn (operands
[0], gen_lowpart (mode
, dst
));
1660 /* Check whether OPERANDS are OK for a logical operation (AND, IOR, XOR). */
1663 s390_logical_operator_ok_p (rtx
*operands
)
1665 /* If the destination operand is in memory, it needs to coincide
1666 with one of the source operands. After reload, it has to be
1667 the first source operand. */
1668 if (GET_CODE (operands
[0]) == MEM
)
1669 return rtx_equal_p (operands
[0], operands
[1])
1670 || (!reload_completed
&& rtx_equal_p (operands
[0], operands
[2]));
1675 /* Narrow logical operation CODE of memory operand MEMOP with immediate
1676 operand IMMOP to switch from SS to SI type instructions. */
1679 s390_narrow_logical_operator (enum rtx_code code
, rtx
*memop
, rtx
*immop
)
1681 int def
= code
== AND
? -1 : 0;
1685 gcc_assert (GET_CODE (*memop
) == MEM
);
1686 gcc_assert (!MEM_VOLATILE_P (*memop
));
1688 mask
= s390_extract_part (*immop
, QImode
, def
);
1689 part
= s390_single_part (*immop
, GET_MODE (*memop
), QImode
, def
);
1690 gcc_assert (part
>= 0);
1692 *memop
= adjust_address (*memop
, QImode
, part
);
1693 *immop
= gen_int_mode (mask
, QImode
);
1697 /* How to allocate a 'struct machine_function'. */
1699 static struct machine_function
*
1700 s390_init_machine_status (void)
1702 return ggc_cleared_alloc
<machine_function
> ();
1705 /* Map for smallest class containing reg regno. */
1707 const enum reg_class regclass_map
[FIRST_PSEUDO_REGISTER
] =
1708 { GENERAL_REGS
, ADDR_REGS
, ADDR_REGS
, ADDR_REGS
,
1709 ADDR_REGS
, ADDR_REGS
, ADDR_REGS
, ADDR_REGS
,
1710 ADDR_REGS
, ADDR_REGS
, ADDR_REGS
, ADDR_REGS
,
1711 ADDR_REGS
, ADDR_REGS
, ADDR_REGS
, ADDR_REGS
,
1712 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
1713 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
1714 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
1715 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
1716 ADDR_REGS
, CC_REGS
, ADDR_REGS
, ADDR_REGS
,
1717 ACCESS_REGS
, ACCESS_REGS
1720 /* Return attribute type of insn. */
1722 static enum attr_type
1723 s390_safe_attr_type (rtx_insn
*insn
)
1725 if (recog_memoized (insn
) >= 0)
1726 return get_attr_type (insn
);
1731 /* Return true if DISP is a valid short displacement. */
1734 s390_short_displacement (rtx disp
)
1736 /* No displacement is OK. */
1740 /* Without the long displacement facility we don't need to
1741 distingiush between long and short displacement. */
1742 if (!TARGET_LONG_DISPLACEMENT
)
1745 /* Integer displacement in range. */
1746 if (GET_CODE (disp
) == CONST_INT
)
1747 return INTVAL (disp
) >= 0 && INTVAL (disp
) < 4096;
1749 /* GOT offset is not OK, the GOT can be large. */
1750 if (GET_CODE (disp
) == CONST
1751 && GET_CODE (XEXP (disp
, 0)) == UNSPEC
1752 && (XINT (XEXP (disp
, 0), 1) == UNSPEC_GOT
1753 || XINT (XEXP (disp
, 0), 1) == UNSPEC_GOTNTPOFF
))
1756 /* All other symbolic constants are literal pool references,
1757 which are OK as the literal pool must be small. */
1758 if (GET_CODE (disp
) == CONST
)
1764 /* Decompose a RTL expression ADDR for a memory address into
1765 its components, returned in OUT.
1767 Returns false if ADDR is not a valid memory address, true
1768 otherwise. If OUT is NULL, don't return the components,
1769 but check for validity only.
1771 Note: Only addresses in canonical form are recognized.
1772 LEGITIMIZE_ADDRESS should convert non-canonical forms to the
1773 canonical form so that they will be recognized. */
1776 s390_decompose_address (rtx addr
, struct s390_address
*out
)
1778 HOST_WIDE_INT offset
= 0;
1779 rtx base
= NULL_RTX
;
1780 rtx indx
= NULL_RTX
;
1781 rtx disp
= NULL_RTX
;
1783 bool pointer
= false;
1784 bool base_ptr
= false;
1785 bool indx_ptr
= false;
1786 bool literal_pool
= false;
1788 /* We may need to substitute the literal pool base register into the address
1789 below. However, at this point we do not know which register is going to
1790 be used as base, so we substitute the arg pointer register. This is going
1791 to be treated as holding a pointer below -- it shouldn't be used for any
1793 rtx fake_pool_base
= gen_rtx_REG (Pmode
, ARG_POINTER_REGNUM
);
1795 /* Decompose address into base + index + displacement. */
1797 if (GET_CODE (addr
) == REG
|| GET_CODE (addr
) == UNSPEC
)
1800 else if (GET_CODE (addr
) == PLUS
)
1802 rtx op0
= XEXP (addr
, 0);
1803 rtx op1
= XEXP (addr
, 1);
1804 enum rtx_code code0
= GET_CODE (op0
);
1805 enum rtx_code code1
= GET_CODE (op1
);
1807 if (code0
== REG
|| code0
== UNSPEC
)
1809 if (code1
== REG
|| code1
== UNSPEC
)
1811 indx
= op0
; /* index + base */
1817 base
= op0
; /* base + displacement */
1822 else if (code0
== PLUS
)
1824 indx
= XEXP (op0
, 0); /* index + base + disp */
1825 base
= XEXP (op0
, 1);
1836 disp
= addr
; /* displacement */
1838 /* Extract integer part of displacement. */
1842 if (GET_CODE (disp
) == CONST_INT
)
1844 offset
= INTVAL (disp
);
1847 else if (GET_CODE (disp
) == CONST
1848 && GET_CODE (XEXP (disp
, 0)) == PLUS
1849 && GET_CODE (XEXP (XEXP (disp
, 0), 1)) == CONST_INT
)
1851 offset
= INTVAL (XEXP (XEXP (disp
, 0), 1));
1852 disp
= XEXP (XEXP (disp
, 0), 0);
1856 /* Strip off CONST here to avoid special case tests later. */
1857 if (disp
&& GET_CODE (disp
) == CONST
)
1858 disp
= XEXP (disp
, 0);
1860 /* We can convert literal pool addresses to
1861 displacements by basing them off the base register. */
1862 if (disp
&& GET_CODE (disp
) == SYMBOL_REF
&& CONSTANT_POOL_ADDRESS_P (disp
))
1864 /* Either base or index must be free to hold the base register. */
1866 base
= fake_pool_base
, literal_pool
= true;
1868 indx
= fake_pool_base
, literal_pool
= true;
1872 /* Mark up the displacement. */
1873 disp
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, disp
),
1874 UNSPEC_LTREL_OFFSET
);
1877 /* Validate base register. */
1880 if (GET_CODE (base
) == UNSPEC
)
1881 switch (XINT (base
, 1))
1885 disp
= gen_rtx_UNSPEC (Pmode
,
1886 gen_rtvec (1, XVECEXP (base
, 0, 0)),
1887 UNSPEC_LTREL_OFFSET
);
1891 base
= XVECEXP (base
, 0, 1);
1894 case UNSPEC_LTREL_BASE
:
1895 if (XVECLEN (base
, 0) == 1)
1896 base
= fake_pool_base
, literal_pool
= true;
1898 base
= XVECEXP (base
, 0, 1);
1906 || (GET_MODE (base
) != SImode
1907 && GET_MODE (base
) != Pmode
))
1910 if (REGNO (base
) == STACK_POINTER_REGNUM
1911 || REGNO (base
) == FRAME_POINTER_REGNUM
1912 || ((reload_completed
|| reload_in_progress
)
1913 && frame_pointer_needed
1914 && REGNO (base
) == HARD_FRAME_POINTER_REGNUM
)
1915 || REGNO (base
) == ARG_POINTER_REGNUM
1917 && REGNO (base
) == PIC_OFFSET_TABLE_REGNUM
))
1918 pointer
= base_ptr
= true;
1920 if ((reload_completed
|| reload_in_progress
)
1921 && base
== cfun
->machine
->base_reg
)
1922 pointer
= base_ptr
= literal_pool
= true;
1925 /* Validate index register. */
1928 if (GET_CODE (indx
) == UNSPEC
)
1929 switch (XINT (indx
, 1))
1933 disp
= gen_rtx_UNSPEC (Pmode
,
1934 gen_rtvec (1, XVECEXP (indx
, 0, 0)),
1935 UNSPEC_LTREL_OFFSET
);
1939 indx
= XVECEXP (indx
, 0, 1);
1942 case UNSPEC_LTREL_BASE
:
1943 if (XVECLEN (indx
, 0) == 1)
1944 indx
= fake_pool_base
, literal_pool
= true;
1946 indx
= XVECEXP (indx
, 0, 1);
1954 || (GET_MODE (indx
) != SImode
1955 && GET_MODE (indx
) != Pmode
))
1958 if (REGNO (indx
) == STACK_POINTER_REGNUM
1959 || REGNO (indx
) == FRAME_POINTER_REGNUM
1960 || ((reload_completed
|| reload_in_progress
)
1961 && frame_pointer_needed
1962 && REGNO (indx
) == HARD_FRAME_POINTER_REGNUM
)
1963 || REGNO (indx
) == ARG_POINTER_REGNUM
1965 && REGNO (indx
) == PIC_OFFSET_TABLE_REGNUM
))
1966 pointer
= indx_ptr
= true;
1968 if ((reload_completed
|| reload_in_progress
)
1969 && indx
== cfun
->machine
->base_reg
)
1970 pointer
= indx_ptr
= literal_pool
= true;
1973 /* Prefer to use pointer as base, not index. */
1974 if (base
&& indx
&& !base_ptr
1975 && (indx_ptr
|| (!REG_POINTER (base
) && REG_POINTER (indx
))))
1982 /* Validate displacement. */
1985 /* If virtual registers are involved, the displacement will change later
1986 anyway as the virtual registers get eliminated. This could make a
1987 valid displacement invalid, but it is more likely to make an invalid
1988 displacement valid, because we sometimes access the register save area
1989 via negative offsets to one of those registers.
1990 Thus we don't check the displacement for validity here. If after
1991 elimination the displacement turns out to be invalid after all,
1992 this is fixed up by reload in any case. */
1993 /* LRA maintains always displacements up to date and we need to
1994 know the displacement is right during all LRA not only at the
1995 final elimination. */
1997 || (base
!= arg_pointer_rtx
1998 && indx
!= arg_pointer_rtx
1999 && base
!= return_address_pointer_rtx
2000 && indx
!= return_address_pointer_rtx
2001 && base
!= frame_pointer_rtx
2002 && indx
!= frame_pointer_rtx
2003 && base
!= virtual_stack_vars_rtx
2004 && indx
!= virtual_stack_vars_rtx
))
2005 if (!DISP_IN_RANGE (offset
))
2010 /* All the special cases are pointers. */
2013 /* In the small-PIC case, the linker converts @GOT
2014 and @GOTNTPOFF offsets to possible displacements. */
2015 if (GET_CODE (disp
) == UNSPEC
2016 && (XINT (disp
, 1) == UNSPEC_GOT
2017 || XINT (disp
, 1) == UNSPEC_GOTNTPOFF
)
2023 /* Accept pool label offsets. */
2024 else if (GET_CODE (disp
) == UNSPEC
2025 && XINT (disp
, 1) == UNSPEC_POOL_OFFSET
)
2028 /* Accept literal pool references. */
2029 else if (GET_CODE (disp
) == UNSPEC
2030 && XINT (disp
, 1) == UNSPEC_LTREL_OFFSET
)
2032 /* In case CSE pulled a non literal pool reference out of
2033 the pool we have to reject the address. This is
2034 especially important when loading the GOT pointer on non
2035 zarch CPUs. In this case the literal pool contains an lt
2036 relative offset to the _GLOBAL_OFFSET_TABLE_ label which
2037 will most likely exceed the displacement. */
2038 if (GET_CODE (XVECEXP (disp
, 0, 0)) != SYMBOL_REF
2039 || !CONSTANT_POOL_ADDRESS_P (XVECEXP (disp
, 0, 0)))
2042 orig_disp
= gen_rtx_CONST (Pmode
, disp
);
2045 /* If we have an offset, make sure it does not
2046 exceed the size of the constant pool entry. */
2047 rtx sym
= XVECEXP (disp
, 0, 0);
2048 if (offset
>= GET_MODE_SIZE (get_pool_mode (sym
)))
2051 orig_disp
= plus_constant (Pmode
, orig_disp
, offset
);
2066 out
->disp
= orig_disp
;
2067 out
->pointer
= pointer
;
2068 out
->literal_pool
= literal_pool
;
2074 /* Decompose a RTL expression OP for a shift count into its components,
2075 and return the base register in BASE and the offset in OFFSET.
2077 Return true if OP is a valid shift count, false if not. */
2080 s390_decompose_shift_count (rtx op
, rtx
*base
, HOST_WIDE_INT
*offset
)
2082 HOST_WIDE_INT off
= 0;
2084 /* We can have an integer constant, an address register,
2085 or a sum of the two. */
2086 if (GET_CODE (op
) == CONST_INT
)
2091 if (op
&& GET_CODE (op
) == PLUS
&& GET_CODE (XEXP (op
, 1)) == CONST_INT
)
2093 off
= INTVAL (XEXP (op
, 1));
2096 while (op
&& GET_CODE (op
) == SUBREG
)
2097 op
= SUBREG_REG (op
);
2099 if (op
&& GET_CODE (op
) != REG
)
2111 /* Return true if CODE is a valid address without index. */
2114 s390_legitimate_address_without_index_p (rtx op
)
2116 struct s390_address addr
;
2118 if (!s390_decompose_address (XEXP (op
, 0), &addr
))
2127 /* Return TRUE if ADDR is an operand valid for a load/store relative
2128 instruction. Be aware that the alignment of the operand needs to
2129 be checked separately.
2130 Valid addresses are single references or a sum of a reference and a
2131 constant integer. Return these parts in SYMREF and ADDEND. You can
2132 pass NULL in REF and/or ADDEND if you are not interested in these
2133 values. Literal pool references are *not* considered symbol
2137 s390_loadrelative_operand_p (rtx addr
, rtx
*symref
, HOST_WIDE_INT
*addend
)
2139 HOST_WIDE_INT tmpaddend
= 0;
2141 if (GET_CODE (addr
) == CONST
)
2142 addr
= XEXP (addr
, 0);
2144 if (GET_CODE (addr
) == PLUS
)
2146 if (!CONST_INT_P (XEXP (addr
, 1)))
2149 tmpaddend
= INTVAL (XEXP (addr
, 1));
2150 addr
= XEXP (addr
, 0);
2153 if ((GET_CODE (addr
) == SYMBOL_REF
&& !CONSTANT_POOL_ADDRESS_P (addr
))
2154 || (GET_CODE (addr
) == UNSPEC
2155 && (XINT (addr
, 1) == UNSPEC_GOTENT
2156 || (TARGET_CPU_ZARCH
&& XINT (addr
, 1) == UNSPEC_PLT
))))
2161 *addend
= tmpaddend
;
2168 /* Return true if the address in OP is valid for constraint letter C
2169 if wrapped in a MEM rtx. Set LIT_POOL_OK to true if it literal
2170 pool MEMs should be accepted. Only the Q, R, S, T constraint
2171 letters are allowed for C. */
2174 s390_check_qrst_address (char c
, rtx op
, bool lit_pool_ok
)
2176 struct s390_address addr
;
2177 bool decomposed
= false;
2179 /* This check makes sure that no symbolic address (except literal
2180 pool references) are accepted by the R or T constraints. */
2181 if (s390_loadrelative_operand_p (op
, NULL
, NULL
))
2184 /* Ensure literal pool references are only accepted if LIT_POOL_OK. */
2187 if (!s390_decompose_address (op
, &addr
))
2189 if (addr
.literal_pool
)
2196 case 'Q': /* no index short displacement */
2197 if (!decomposed
&& !s390_decompose_address (op
, &addr
))
2201 if (!s390_short_displacement (addr
.disp
))
2205 case 'R': /* with index short displacement */
2206 if (TARGET_LONG_DISPLACEMENT
)
2208 if (!decomposed
&& !s390_decompose_address (op
, &addr
))
2210 if (!s390_short_displacement (addr
.disp
))
2213 /* Any invalid address here will be fixed up by reload,
2214 so accept it for the most generic constraint. */
2217 case 'S': /* no index long displacement */
2218 if (!TARGET_LONG_DISPLACEMENT
)
2220 if (!decomposed
&& !s390_decompose_address (op
, &addr
))
2224 if (s390_short_displacement (addr
.disp
))
2228 case 'T': /* with index long displacement */
2229 if (!TARGET_LONG_DISPLACEMENT
)
2231 /* Any invalid address here will be fixed up by reload,
2232 so accept it for the most generic constraint. */
2233 if ((decomposed
|| s390_decompose_address (op
, &addr
))
2234 && s390_short_displacement (addr
.disp
))
2244 /* Evaluates constraint strings described by the regular expression
2245 ([A|B|Z](Q|R|S|T))|U|W|Y and returns 1 if OP is a valid operand for
2246 the constraint given in STR, or 0 else. */
2249 s390_mem_constraint (const char *str
, rtx op
)
2256 /* Check for offsettable variants of memory constraints. */
2257 if (!MEM_P (op
) || MEM_VOLATILE_P (op
))
2259 if ((reload_completed
|| reload_in_progress
)
2260 ? !offsettable_memref_p (op
) : !offsettable_nonstrict_memref_p (op
))
2262 return s390_check_qrst_address (str
[1], XEXP (op
, 0), true);
2264 /* Check for non-literal-pool variants of memory constraints. */
2267 return s390_check_qrst_address (str
[1], XEXP (op
, 0), false);
2272 if (GET_CODE (op
) != MEM
)
2274 return s390_check_qrst_address (c
, XEXP (op
, 0), true);
2276 return (s390_check_qrst_address ('Q', op
, true)
2277 || s390_check_qrst_address ('R', op
, true));
2279 return (s390_check_qrst_address ('S', op
, true)
2280 || s390_check_qrst_address ('T', op
, true));
2282 /* Simply check for the basic form of a shift count. Reload will
2283 take care of making sure we have a proper base register. */
2284 if (!s390_decompose_shift_count (op
, NULL
, NULL
))
2288 return s390_check_qrst_address (str
[1], op
, true);
2296 /* Evaluates constraint strings starting with letter O. Input
2297 parameter C is the second letter following the "O" in the constraint
2298 string. Returns 1 if VALUE meets the respective constraint and 0
2302 s390_O_constraint_str (const char c
, HOST_WIDE_INT value
)
2310 return trunc_int_for_mode (value
, SImode
) == value
;
2314 || s390_single_part (GEN_INT (value
), DImode
, SImode
, 0) == 1;
2317 return s390_single_part (GEN_INT (value
- 1), DImode
, SImode
, -1) == 1;
2325 /* Evaluates constraint strings starting with letter N. Parameter STR
2326 contains the letters following letter "N" in the constraint string.
2327 Returns true if VALUE matches the constraint. */
2330 s390_N_constraint_str (const char *str
, HOST_WIDE_INT value
)
2332 machine_mode mode
, part_mode
;
2334 int part
, part_goal
;
2340 part_goal
= str
[0] - '0';
2384 if (GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (part_mode
))
2387 part
= s390_single_part (GEN_INT (value
), mode
, part_mode
, def
);
2390 if (part_goal
!= -1 && part_goal
!= part
)
2397 /* Returns true if the input parameter VALUE is a float zero. */
2400 s390_float_const_zero_p (rtx value
)
2402 return (GET_MODE_CLASS (GET_MODE (value
)) == MODE_FLOAT
2403 && value
== CONST0_RTX (GET_MODE (value
)));
2406 /* Implement TARGET_REGISTER_MOVE_COST. */
2409 s390_register_move_cost (machine_mode mode
,
2410 reg_class_t from
, reg_class_t to
)
2412 /* On s390, copy between fprs and gprs is expensive. */
2414 /* It becomes somewhat faster having ldgr/lgdr. */
2415 if (TARGET_Z10
&& GET_MODE_SIZE (mode
) == 8)
2417 /* ldgr is single cycle. */
2418 if (reg_classes_intersect_p (from
, GENERAL_REGS
)
2419 && reg_classes_intersect_p (to
, FP_REGS
))
2421 /* lgdr needs 3 cycles. */
2422 if (reg_classes_intersect_p (to
, GENERAL_REGS
)
2423 && reg_classes_intersect_p (from
, FP_REGS
))
2427 /* Otherwise copying is done via memory. */
2428 if ((reg_classes_intersect_p (from
, GENERAL_REGS
)
2429 && reg_classes_intersect_p (to
, FP_REGS
))
2430 || (reg_classes_intersect_p (from
, FP_REGS
)
2431 && reg_classes_intersect_p (to
, GENERAL_REGS
)))
2437 /* Implement TARGET_MEMORY_MOVE_COST. */
2440 s390_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED
,
2441 reg_class_t rclass ATTRIBUTE_UNUSED
,
2442 bool in ATTRIBUTE_UNUSED
)
2447 /* Compute a (partial) cost for rtx X. Return true if the complete
2448 cost has been computed, and false if subexpressions should be
2449 scanned. In either case, *TOTAL contains the cost result.
2450 CODE contains GET_CODE (x), OUTER_CODE contains the code
2451 of the superexpression of x. */
2454 s390_rtx_costs (rtx x
, int code
, int outer_code
, int opno ATTRIBUTE_UNUSED
,
2455 int *total
, bool speed ATTRIBUTE_UNUSED
)
2478 *total
= COSTS_N_INSNS (1);
2483 *total
= COSTS_N_INSNS (1);
2487 switch (GET_MODE (x
))
2491 rtx left
= XEXP (x
, 0);
2492 rtx right
= XEXP (x
, 1);
2493 if (GET_CODE (right
) == CONST_INT
2494 && CONST_OK_FOR_K (INTVAL (right
)))
2495 *total
= s390_cost
->mhi
;
2496 else if (GET_CODE (left
) == SIGN_EXTEND
)
2497 *total
= s390_cost
->mh
;
2499 *total
= s390_cost
->ms
; /* msr, ms, msy */
2504 rtx left
= XEXP (x
, 0);
2505 rtx right
= XEXP (x
, 1);
2508 if (GET_CODE (right
) == CONST_INT
2509 && CONST_OK_FOR_K (INTVAL (right
)))
2510 *total
= s390_cost
->mghi
;
2511 else if (GET_CODE (left
) == SIGN_EXTEND
)
2512 *total
= s390_cost
->msgf
;
2514 *total
= s390_cost
->msg
; /* msgr, msg */
2516 else /* TARGET_31BIT */
2518 if (GET_CODE (left
) == SIGN_EXTEND
2519 && GET_CODE (right
) == SIGN_EXTEND
)
2520 /* mulsidi case: mr, m */
2521 *total
= s390_cost
->m
;
2522 else if (GET_CODE (left
) == ZERO_EXTEND
2523 && GET_CODE (right
) == ZERO_EXTEND
2524 && TARGET_CPU_ZARCH
)
2525 /* umulsidi case: ml, mlr */
2526 *total
= s390_cost
->ml
;
2528 /* Complex calculation is required. */
2529 *total
= COSTS_N_INSNS (40);
2535 *total
= s390_cost
->mult_df
;
2538 *total
= s390_cost
->mxbr
;
2546 switch (GET_MODE (x
))
2549 *total
= s390_cost
->madbr
;
2552 *total
= s390_cost
->maebr
;
2557 /* Negate in the third argument is free: FMSUB. */
2558 if (GET_CODE (XEXP (x
, 2)) == NEG
)
2560 *total
+= (rtx_cost (XEXP (x
, 0), FMA
, 0, speed
)
2561 + rtx_cost (XEXP (x
, 1), FMA
, 1, speed
)
2562 + rtx_cost (XEXP (XEXP (x
, 2), 0), FMA
, 2, speed
));
2569 if (GET_MODE (x
) == TImode
) /* 128 bit division */
2570 *total
= s390_cost
->dlgr
;
2571 else if (GET_MODE (x
) == DImode
)
2573 rtx right
= XEXP (x
, 1);
2574 if (GET_CODE (right
) == ZERO_EXTEND
) /* 64 by 32 bit division */
2575 *total
= s390_cost
->dlr
;
2576 else /* 64 by 64 bit division */
2577 *total
= s390_cost
->dlgr
;
2579 else if (GET_MODE (x
) == SImode
) /* 32 bit division */
2580 *total
= s390_cost
->dlr
;
2585 if (GET_MODE (x
) == DImode
)
2587 rtx right
= XEXP (x
, 1);
2588 if (GET_CODE (right
) == ZERO_EXTEND
) /* 64 by 32 bit division */
2590 *total
= s390_cost
->dsgfr
;
2592 *total
= s390_cost
->dr
;
2593 else /* 64 by 64 bit division */
2594 *total
= s390_cost
->dsgr
;
2596 else if (GET_MODE (x
) == SImode
) /* 32 bit division */
2597 *total
= s390_cost
->dlr
;
2598 else if (GET_MODE (x
) == SFmode
)
2600 *total
= s390_cost
->debr
;
2602 else if (GET_MODE (x
) == DFmode
)
2604 *total
= s390_cost
->ddbr
;
2606 else if (GET_MODE (x
) == TFmode
)
2608 *total
= s390_cost
->dxbr
;
2613 if (GET_MODE (x
) == SFmode
)
2614 *total
= s390_cost
->sqebr
;
2615 else if (GET_MODE (x
) == DFmode
)
2616 *total
= s390_cost
->sqdbr
;
2618 *total
= s390_cost
->sqxbr
;
2623 if (outer_code
== MULT
|| outer_code
== DIV
|| outer_code
== MOD
2624 || outer_code
== PLUS
|| outer_code
== MINUS
2625 || outer_code
== COMPARE
)
2630 *total
= COSTS_N_INSNS (1);
2631 if (GET_CODE (XEXP (x
, 0)) == AND
2632 && GET_CODE (XEXP (x
, 1)) == CONST_INT
2633 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
)
2635 rtx op0
= XEXP (XEXP (x
, 0), 0);
2636 rtx op1
= XEXP (XEXP (x
, 0), 1);
2637 rtx op2
= XEXP (x
, 1);
2639 if (memory_operand (op0
, GET_MODE (op0
))
2640 && s390_tm_ccmode (op1
, op2
, 0) != VOIDmode
)
2642 if (register_operand (op0
, GET_MODE (op0
))
2643 && s390_tm_ccmode (op1
, op2
, 1) != VOIDmode
)
2653 /* Return the cost of an address rtx ADDR. */
2656 s390_address_cost (rtx addr
, machine_mode mode ATTRIBUTE_UNUSED
,
2657 addr_space_t as ATTRIBUTE_UNUSED
,
2658 bool speed ATTRIBUTE_UNUSED
)
2660 struct s390_address ad
;
2661 if (!s390_decompose_address (addr
, &ad
))
2664 return ad
.indx
? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (1);
2667 /* If OP is a SYMBOL_REF of a thread-local symbol, return its TLS mode,
2668 otherwise return 0. */
2671 tls_symbolic_operand (rtx op
)
2673 if (GET_CODE (op
) != SYMBOL_REF
)
2675 return SYMBOL_REF_TLS_MODEL (op
);
2678 /* Split DImode access register reference REG (on 64-bit) into its constituent
2679 low and high parts, and store them into LO and HI. Note that gen_lowpart/
2680 gen_highpart cannot be used as they assume all registers are word-sized,
2681 while our access registers have only half that size. */
2684 s390_split_access_reg (rtx reg
, rtx
*lo
, rtx
*hi
)
2686 gcc_assert (TARGET_64BIT
);
2687 gcc_assert (ACCESS_REG_P (reg
));
2688 gcc_assert (GET_MODE (reg
) == DImode
);
2689 gcc_assert (!(REGNO (reg
) & 1));
2691 *lo
= gen_rtx_REG (SImode
, REGNO (reg
) + 1);
2692 *hi
= gen_rtx_REG (SImode
, REGNO (reg
));
2695 /* Return true if OP contains a symbol reference */
2698 symbolic_reference_mentioned_p (rtx op
)
2703 if (GET_CODE (op
) == SYMBOL_REF
|| GET_CODE (op
) == LABEL_REF
)
2706 fmt
= GET_RTX_FORMAT (GET_CODE (op
));
2707 for (i
= GET_RTX_LENGTH (GET_CODE (op
)) - 1; i
>= 0; i
--)
2713 for (j
= XVECLEN (op
, i
) - 1; j
>= 0; j
--)
2714 if (symbolic_reference_mentioned_p (XVECEXP (op
, i
, j
)))
2718 else if (fmt
[i
] == 'e' && symbolic_reference_mentioned_p (XEXP (op
, i
)))
2725 /* Return true if OP contains a reference to a thread-local symbol. */
2728 tls_symbolic_reference_mentioned_p (rtx op
)
2733 if (GET_CODE (op
) == SYMBOL_REF
)
2734 return tls_symbolic_operand (op
);
2736 fmt
= GET_RTX_FORMAT (GET_CODE (op
));
2737 for (i
= GET_RTX_LENGTH (GET_CODE (op
)) - 1; i
>= 0; i
--)
2743 for (j
= XVECLEN (op
, i
) - 1; j
>= 0; j
--)
2744 if (tls_symbolic_reference_mentioned_p (XVECEXP (op
, i
, j
)))
2748 else if (fmt
[i
] == 'e' && tls_symbolic_reference_mentioned_p (XEXP (op
, i
)))
2756 /* Return true if OP is a legitimate general operand when
2757 generating PIC code. It is given that flag_pic is on
2758 and that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2761 legitimate_pic_operand_p (rtx op
)
2763 /* Accept all non-symbolic constants. */
2764 if (!SYMBOLIC_CONST (op
))
2767 /* Reject everything else; must be handled
2768 via emit_symbolic_move. */
2772 /* Returns true if the constant value OP is a legitimate general operand.
2773 It is given that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2776 s390_legitimate_constant_p (machine_mode mode
, rtx op
)
2778 /* Accept all non-symbolic constants. */
2779 if (!SYMBOLIC_CONST (op
))
2782 /* Accept immediate LARL operands. */
2783 if (TARGET_CPU_ZARCH
&& larl_operand (op
, mode
))
2786 /* Thread-local symbols are never legal constants. This is
2787 so that emit_call knows that computing such addresses
2788 might require a function call. */
2789 if (TLS_SYMBOLIC_CONST (op
))
2792 /* In the PIC case, symbolic constants must *not* be
2793 forced into the literal pool. We accept them here,
2794 so that they will be handled by emit_symbolic_move. */
2798 /* All remaining non-PIC symbolic constants are
2799 forced into the literal pool. */
2803 /* Determine if it's legal to put X into the constant pool. This
2804 is not possible if X contains the address of a symbol that is
2805 not constant (TLS) or not known at final link time (PIC). */
2808 s390_cannot_force_const_mem (machine_mode mode
, rtx x
)
2810 switch (GET_CODE (x
))
2814 /* Accept all non-symbolic constants. */
2818 /* Labels are OK iff we are non-PIC. */
2819 return flag_pic
!= 0;
2822 /* 'Naked' TLS symbol references are never OK,
2823 non-TLS symbols are OK iff we are non-PIC. */
2824 if (tls_symbolic_operand (x
))
2827 return flag_pic
!= 0;
2830 return s390_cannot_force_const_mem (mode
, XEXP (x
, 0));
2833 return s390_cannot_force_const_mem (mode
, XEXP (x
, 0))
2834 || s390_cannot_force_const_mem (mode
, XEXP (x
, 1));
2837 switch (XINT (x
, 1))
2839 /* Only lt-relative or GOT-relative UNSPECs are OK. */
2840 case UNSPEC_LTREL_OFFSET
:
2848 case UNSPEC_GOTNTPOFF
:
2849 case UNSPEC_INDNTPOFF
:
2852 /* If the literal pool shares the code section, be put
2853 execute template placeholders into the pool as well. */
2855 return TARGET_CPU_ZARCH
;
2867 /* Returns true if the constant value OP is a legitimate general
2868 operand during and after reload. The difference to
2869 legitimate_constant_p is that this function will not accept
2870 a constant that would need to be forced to the literal pool
2871 before it can be used as operand.
2872 This function accepts all constants which can be loaded directly
2876 legitimate_reload_constant_p (rtx op
)
2878 /* Accept la(y) operands. */
2879 if (GET_CODE (op
) == CONST_INT
2880 && DISP_IN_RANGE (INTVAL (op
)))
2883 /* Accept l(g)hi/l(g)fi operands. */
2884 if (GET_CODE (op
) == CONST_INT
2885 && (CONST_OK_FOR_K (INTVAL (op
)) || CONST_OK_FOR_Os (INTVAL (op
))))
2888 /* Accept lliXX operands. */
2890 && GET_CODE (op
) == CONST_INT
2891 && trunc_int_for_mode (INTVAL (op
), word_mode
) == INTVAL (op
)
2892 && s390_single_part (op
, word_mode
, HImode
, 0) >= 0)
2896 && GET_CODE (op
) == CONST_INT
2897 && trunc_int_for_mode (INTVAL (op
), word_mode
) == INTVAL (op
)
2898 && s390_single_part (op
, word_mode
, SImode
, 0) >= 0)
2901 /* Accept larl operands. */
2902 if (TARGET_CPU_ZARCH
2903 && larl_operand (op
, VOIDmode
))
2906 /* Accept floating-point zero operands that fit into a single GPR. */
2907 if (GET_CODE (op
) == CONST_DOUBLE
2908 && s390_float_const_zero_p (op
)
2909 && GET_MODE_SIZE (GET_MODE (op
)) <= UNITS_PER_WORD
)
2912 /* Accept double-word operands that can be split. */
2913 if (GET_CODE (op
) == CONST_INT
2914 && trunc_int_for_mode (INTVAL (op
), word_mode
) != INTVAL (op
))
2916 machine_mode dword_mode
= word_mode
== SImode
? DImode
: TImode
;
2917 rtx hi
= operand_subword (op
, 0, 0, dword_mode
);
2918 rtx lo
= operand_subword (op
, 1, 0, dword_mode
);
2919 return legitimate_reload_constant_p (hi
)
2920 && legitimate_reload_constant_p (lo
);
2923 /* Everything else cannot be handled without reload. */
2927 /* Returns true if the constant value OP is a legitimate fp operand
2928 during and after reload.
2929 This function accepts all constants which can be loaded directly
2933 legitimate_reload_fp_constant_p (rtx op
)
2935 /* Accept floating-point zero operands if the load zero instruction
2936 can be used. Prior to z196 the load fp zero instruction caused a
2937 performance penalty if the result is used as BFP number. */
2939 && GET_CODE (op
) == CONST_DOUBLE
2940 && s390_float_const_zero_p (op
))
2946 /* Given an rtx OP being reloaded into a reg required to be in class RCLASS,
2947 return the class of reg to actually use. */
2950 s390_preferred_reload_class (rtx op
, reg_class_t rclass
)
2952 switch (GET_CODE (op
))
2954 /* Constants we cannot reload into general registers
2955 must be forced into the literal pool. */
2958 if (reg_class_subset_p (GENERAL_REGS
, rclass
)
2959 && legitimate_reload_constant_p (op
))
2960 return GENERAL_REGS
;
2961 else if (reg_class_subset_p (ADDR_REGS
, rclass
)
2962 && legitimate_reload_constant_p (op
))
2964 else if (reg_class_subset_p (FP_REGS
, rclass
)
2965 && legitimate_reload_fp_constant_p (op
))
2969 /* If a symbolic constant or a PLUS is reloaded,
2970 it is most likely being used as an address, so
2971 prefer ADDR_REGS. If 'class' is not a superset
2972 of ADDR_REGS, e.g. FP_REGS, reject this reload. */
2974 /* Symrefs cannot be pushed into the literal pool with -fPIC
2975 so we *MUST NOT* return NO_REGS for these cases
2976 (s390_cannot_force_const_mem will return true).
2978 On the other hand we MUST return NO_REGS for symrefs with
2979 invalid addend which might have been pushed to the literal
2980 pool (no -fPIC). Usually we would expect them to be
2981 handled via secondary reload but this does not happen if
2982 they are used as literal pool slot replacement in reload
2983 inheritance (see emit_input_reload_insns). */
2984 if (TARGET_CPU_ZARCH
2985 && GET_CODE (XEXP (op
, 0)) == PLUS
2986 && GET_CODE (XEXP (XEXP(op
, 0), 0)) == SYMBOL_REF
2987 && GET_CODE (XEXP (XEXP(op
, 0), 1)) == CONST_INT
)
2989 if (flag_pic
&& reg_class_subset_p (ADDR_REGS
, rclass
))
2997 if (!legitimate_reload_constant_p (op
))
3001 /* load address will be used. */
3002 if (reg_class_subset_p (ADDR_REGS
, rclass
))
3014 /* Return true if ADDR is SYMBOL_REF + addend with addend being a
3015 multiple of ALIGNMENT and the SYMBOL_REF being naturally
3019 s390_check_symref_alignment (rtx addr
, HOST_WIDE_INT alignment
)
3021 HOST_WIDE_INT addend
;
3024 if (!s390_loadrelative_operand_p (addr
, &symref
, &addend
))
3027 if (addend
& (alignment
- 1))
3030 if (GET_CODE (symref
) == SYMBOL_REF
3031 && !SYMBOL_REF_NOT_NATURALLY_ALIGNED_P (symref
))
3034 if (GET_CODE (symref
) == UNSPEC
3035 && alignment
<= UNITS_PER_LONG
)
3041 /* ADDR is moved into REG using larl. If ADDR isn't a valid larl
3042 operand SCRATCH is used to reload the even part of the address and
3046 s390_reload_larl_operand (rtx reg
, rtx addr
, rtx scratch
)
3048 HOST_WIDE_INT addend
;
3051 if (!s390_loadrelative_operand_p (addr
, &symref
, &addend
))
3055 /* Easy case. The addend is even so larl will do fine. */
3056 emit_move_insn (reg
, addr
);
3059 /* We can leave the scratch register untouched if the target
3060 register is a valid base register. */
3061 if (REGNO (reg
) < FIRST_PSEUDO_REGISTER
3062 && REGNO_REG_CLASS (REGNO (reg
)) == ADDR_REGS
)
3065 gcc_assert (REGNO (scratch
) < FIRST_PSEUDO_REGISTER
);
3066 gcc_assert (REGNO_REG_CLASS (REGNO (scratch
)) == ADDR_REGS
);
3069 emit_move_insn (scratch
,
3070 gen_rtx_CONST (Pmode
,
3071 gen_rtx_PLUS (Pmode
, symref
,
3072 GEN_INT (addend
- 1))));
3074 emit_move_insn (scratch
, symref
);
3076 /* Increment the address using la in order to avoid clobbering cc. */
3077 s390_load_address (reg
, gen_rtx_PLUS (Pmode
, scratch
, const1_rtx
));
3081 /* Generate what is necessary to move between REG and MEM using
3082 SCRATCH. The direction is given by TOMEM. */
3085 s390_reload_symref_address (rtx reg
, rtx mem
, rtx scratch
, bool tomem
)
3087 /* Reload might have pulled a constant out of the literal pool.
3088 Force it back in. */
3089 if (CONST_INT_P (mem
) || GET_CODE (mem
) == CONST_DOUBLE
3090 || GET_CODE (mem
) == CONST
)
3091 mem
= force_const_mem (GET_MODE (reg
), mem
);
3093 gcc_assert (MEM_P (mem
));
3095 /* For a load from memory we can leave the scratch register
3096 untouched if the target register is a valid base register. */
3098 && REGNO (reg
) < FIRST_PSEUDO_REGISTER
3099 && REGNO_REG_CLASS (REGNO (reg
)) == ADDR_REGS
3100 && GET_MODE (reg
) == GET_MODE (scratch
))
3103 /* Load address into scratch register. Since we can't have a
3104 secondary reload for a secondary reload we have to cover the case
3105 where larl would need a secondary reload here as well. */
3106 s390_reload_larl_operand (scratch
, XEXP (mem
, 0), scratch
);
3108 /* Now we can use a standard load/store to do the move. */
3110 emit_move_insn (replace_equiv_address (mem
, scratch
), reg
);
3112 emit_move_insn (reg
, replace_equiv_address (mem
, scratch
));
3115 /* Inform reload about cases where moving X with a mode MODE to a register in
3116 RCLASS requires an extra scratch or immediate register. Return the class
3117 needed for the immediate register. */
3120 s390_secondary_reload (bool in_p
, rtx x
, reg_class_t rclass_i
,
3121 machine_mode mode
, secondary_reload_info
*sri
)
3123 enum reg_class rclass
= (enum reg_class
) rclass_i
;
3125 /* Intermediate register needed. */
3126 if (reg_classes_intersect_p (CC_REGS
, rclass
))
3127 return GENERAL_REGS
;
3131 HOST_WIDE_INT offset
;
3134 /* On z10 several optimizer steps may generate larl operands with
3137 && s390_loadrelative_operand_p (x
, &symref
, &offset
)
3139 && !SYMBOL_REF_ALIGN1_P (symref
)
3140 && (offset
& 1) == 1)
3141 sri
->icode
= ((mode
== DImode
) ? CODE_FOR_reloaddi_larl_odd_addend_z10
3142 : CODE_FOR_reloadsi_larl_odd_addend_z10
);
3144 /* On z10 we need a scratch register when moving QI, TI or floating
3145 point mode values from or to a memory location with a SYMBOL_REF
3146 or if the symref addend of a SI or DI move is not aligned to the
3147 width of the access. */
3149 && s390_loadrelative_operand_p (XEXP (x
, 0), NULL
, NULL
)
3150 && (mode
== QImode
|| mode
== TImode
|| FLOAT_MODE_P (mode
)
3151 || (!TARGET_ZARCH
&& mode
== DImode
)
3152 || ((mode
== HImode
|| mode
== SImode
|| mode
== DImode
)
3153 && (!s390_check_symref_alignment (XEXP (x
, 0),
3154 GET_MODE_SIZE (mode
))))))
3156 #define __SECONDARY_RELOAD_CASE(M,m) \
3159 sri->icode = in_p ? CODE_FOR_reload##m##di_toreg_z10 : \
3160 CODE_FOR_reload##m##di_tomem_z10; \
3162 sri->icode = in_p ? CODE_FOR_reload##m##si_toreg_z10 : \
3163 CODE_FOR_reload##m##si_tomem_z10; \
3166 switch (GET_MODE (x
))
3168 __SECONDARY_RELOAD_CASE (QI
, qi
);
3169 __SECONDARY_RELOAD_CASE (HI
, hi
);
3170 __SECONDARY_RELOAD_CASE (SI
, si
);
3171 __SECONDARY_RELOAD_CASE (DI
, di
);
3172 __SECONDARY_RELOAD_CASE (TI
, ti
);
3173 __SECONDARY_RELOAD_CASE (SF
, sf
);
3174 __SECONDARY_RELOAD_CASE (DF
, df
);
3175 __SECONDARY_RELOAD_CASE (TF
, tf
);
3176 __SECONDARY_RELOAD_CASE (SD
, sd
);
3177 __SECONDARY_RELOAD_CASE (DD
, dd
);
3178 __SECONDARY_RELOAD_CASE (TD
, td
);
3183 #undef __SECONDARY_RELOAD_CASE
3187 /* We need a scratch register when loading a PLUS expression which
3188 is not a legitimate operand of the LOAD ADDRESS instruction. */
3189 /* LRA can deal with transformation of plus op very well -- so we
3190 don't need to prompt LRA in this case. */
3191 if (! lra_in_progress
&& in_p
&& s390_plus_operand (x
, mode
))
3192 sri
->icode
= (TARGET_64BIT
?
3193 CODE_FOR_reloaddi_plus
: CODE_FOR_reloadsi_plus
);
3195 /* Performing a multiword move from or to memory we have to make sure the
3196 second chunk in memory is addressable without causing a displacement
3197 overflow. If that would be the case we calculate the address in
3198 a scratch register. */
3200 && GET_CODE (XEXP (x
, 0)) == PLUS
3201 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
3202 && !DISP_IN_RANGE (INTVAL (XEXP (XEXP (x
, 0), 1))
3203 + GET_MODE_SIZE (mode
) - 1))
3205 /* For GENERAL_REGS a displacement overflow is no problem if occurring
3206 in a s_operand address since we may fallback to lm/stm. So we only
3207 have to care about overflows in the b+i+d case. */
3208 if ((reg_classes_intersect_p (GENERAL_REGS
, rclass
)
3209 && s390_class_max_nregs (GENERAL_REGS
, mode
) > 1
3210 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == PLUS
)
3211 /* For FP_REGS no lm/stm is available so this check is triggered
3212 for displacement overflows in b+i+d and b+d like addresses. */
3213 || (reg_classes_intersect_p (FP_REGS
, rclass
)
3214 && s390_class_max_nregs (FP_REGS
, mode
) > 1))
3217 sri
->icode
= (TARGET_64BIT
?
3218 CODE_FOR_reloaddi_nonoffmem_in
:
3219 CODE_FOR_reloadsi_nonoffmem_in
);
3221 sri
->icode
= (TARGET_64BIT
?
3222 CODE_FOR_reloaddi_nonoffmem_out
:
3223 CODE_FOR_reloadsi_nonoffmem_out
);
3227 /* A scratch address register is needed when a symbolic constant is
3228 copied to r0 compiling with -fPIC. In other cases the target
3229 register might be used as temporary (see legitimize_pic_address). */
3230 if (in_p
&& SYMBOLIC_CONST (x
) && flag_pic
== 2 && rclass
!= ADDR_REGS
)
3231 sri
->icode
= (TARGET_64BIT
?
3232 CODE_FOR_reloaddi_PIC_addr
:
3233 CODE_FOR_reloadsi_PIC_addr
);
3235 /* Either scratch or no register needed. */
3239 /* Generate code to load SRC, which is PLUS that is not a
3240 legitimate operand for the LA instruction, into TARGET.
3241 SCRATCH may be used as scratch register. */
3244 s390_expand_plus_operand (rtx target
, rtx src
,
3248 struct s390_address ad
;
3250 /* src must be a PLUS; get its two operands. */
3251 gcc_assert (GET_CODE (src
) == PLUS
);
3252 gcc_assert (GET_MODE (src
) == Pmode
);
3254 /* Check if any of the two operands is already scheduled
3255 for replacement by reload. This can happen e.g. when
3256 float registers occur in an address. */
3257 sum1
= find_replacement (&XEXP (src
, 0));
3258 sum2
= find_replacement (&XEXP (src
, 1));
3259 src
= gen_rtx_PLUS (Pmode
, sum1
, sum2
);
3261 /* If the address is already strictly valid, there's nothing to do. */
3262 if (!s390_decompose_address (src
, &ad
)
3263 || (ad
.base
&& !REGNO_OK_FOR_BASE_P (REGNO (ad
.base
)))
3264 || (ad
.indx
&& !REGNO_OK_FOR_INDEX_P (REGNO (ad
.indx
))))
3266 /* Otherwise, one of the operands cannot be an address register;
3267 we reload its value into the scratch register. */
3268 if (true_regnum (sum1
) < 1 || true_regnum (sum1
) > 15)
3270 emit_move_insn (scratch
, sum1
);
3273 if (true_regnum (sum2
) < 1 || true_regnum (sum2
) > 15)
3275 emit_move_insn (scratch
, sum2
);
3279 /* According to the way these invalid addresses are generated
3280 in reload.c, it should never happen (at least on s390) that
3281 *neither* of the PLUS components, after find_replacements
3282 was applied, is an address register. */
3283 if (sum1
== scratch
&& sum2
== scratch
)
3289 src
= gen_rtx_PLUS (Pmode
, sum1
, sum2
);
3292 /* Emit the LOAD ADDRESS pattern. Note that reload of PLUS
3293 is only ever performed on addresses, so we can mark the
3294 sum as legitimate for LA in any case. */
3295 s390_load_address (target
, src
);
3299 /* Return true if ADDR is a valid memory address.
3300 STRICT specifies whether strict register checking applies. */
3303 s390_legitimate_address_p (machine_mode mode
, rtx addr
, bool strict
)
3305 struct s390_address ad
;
3308 && larl_operand (addr
, VOIDmode
)
3309 && (mode
== VOIDmode
3310 || s390_check_symref_alignment (addr
, GET_MODE_SIZE (mode
))))
3313 if (!s390_decompose_address (addr
, &ad
))
3318 if (ad
.base
&& !REGNO_OK_FOR_BASE_P (REGNO (ad
.base
)))
3321 if (ad
.indx
&& !REGNO_OK_FOR_INDEX_P (REGNO (ad
.indx
)))
3327 && !(REGNO (ad
.base
) >= FIRST_PSEUDO_REGISTER
3328 || REGNO_REG_CLASS (REGNO (ad
.base
)) == ADDR_REGS
))
3332 && !(REGNO (ad
.indx
) >= FIRST_PSEUDO_REGISTER
3333 || REGNO_REG_CLASS (REGNO (ad
.indx
)) == ADDR_REGS
))
3339 /* Return true if OP is a valid operand for the LA instruction.
3340 In 31-bit, we need to prove that the result is used as an
3341 address, as LA performs only a 31-bit addition. */
3344 legitimate_la_operand_p (rtx op
)
3346 struct s390_address addr
;
3347 if (!s390_decompose_address (op
, &addr
))
3350 return (TARGET_64BIT
|| addr
.pointer
);
3353 /* Return true if it is valid *and* preferable to use LA to
3354 compute the sum of OP1 and OP2. */
3357 preferred_la_operand_p (rtx op1
, rtx op2
)
3359 struct s390_address addr
;
3361 if (op2
!= const0_rtx
)
3362 op1
= gen_rtx_PLUS (Pmode
, op1
, op2
);
3364 if (!s390_decompose_address (op1
, &addr
))
3366 if (addr
.base
&& !REGNO_OK_FOR_BASE_P (REGNO (addr
.base
)))
3368 if (addr
.indx
&& !REGNO_OK_FOR_INDEX_P (REGNO (addr
.indx
)))
3371 /* Avoid LA instructions with index register on z196; it is
3372 preferable to use regular add instructions when possible.
3373 Starting with zEC12 the la with index register is "uncracked"
3375 if (addr
.indx
&& s390_tune
== PROCESSOR_2817_Z196
)
3378 if (!TARGET_64BIT
&& !addr
.pointer
)
3384 if ((addr
.base
&& REG_P (addr
.base
) && REG_POINTER (addr
.base
))
3385 || (addr
.indx
&& REG_P (addr
.indx
) && REG_POINTER (addr
.indx
)))
3391 /* Emit a forced load-address operation to load SRC into DST.
3392 This will use the LOAD ADDRESS instruction even in situations
3393 where legitimate_la_operand_p (SRC) returns false. */
3396 s390_load_address (rtx dst
, rtx src
)
3399 emit_move_insn (dst
, src
);
3401 emit_insn (gen_force_la_31 (dst
, src
));
3404 /* Return a legitimate reference for ORIG (an address) using the
3405 register REG. If REG is 0, a new pseudo is generated.
3407 There are two types of references that must be handled:
3409 1. Global data references must load the address from the GOT, via
3410 the PIC reg. An insn is emitted to do this load, and the reg is
3413 2. Static data references, constant pool addresses, and code labels
3414 compute the address as an offset from the GOT, whose base is in
3415 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
3416 differentiate them from global data objects. The returned
3417 address is the PIC reg + an unspec constant.
3419 TARGET_LEGITIMIZE_ADDRESS_P rejects symbolic references unless the PIC
3420 reg also appears in the address. */
3423 legitimize_pic_address (rtx orig
, rtx reg
)
3426 rtx addend
= const0_rtx
;
3429 gcc_assert (!TLS_SYMBOLIC_CONST (addr
));
3431 if (GET_CODE (addr
) == CONST
)
3432 addr
= XEXP (addr
, 0);
3434 if (GET_CODE (addr
) == PLUS
)
3436 addend
= XEXP (addr
, 1);
3437 addr
= XEXP (addr
, 0);
3440 if ((GET_CODE (addr
) == LABEL_REF
3441 || (GET_CODE (addr
) == SYMBOL_REF
&& SYMBOL_REF_LOCAL_P (addr
))
3442 || (GET_CODE (addr
) == UNSPEC
&&
3443 (XINT (addr
, 1) == UNSPEC_GOTENT
3444 || (TARGET_CPU_ZARCH
&& XINT (addr
, 1) == UNSPEC_PLT
))))
3445 && GET_CODE (addend
) == CONST_INT
)
3447 /* This can be locally addressed. */
3449 /* larl_operand requires UNSPECs to be wrapped in a const rtx. */
3450 rtx const_addr
= (GET_CODE (addr
) == UNSPEC
?
3451 gen_rtx_CONST (Pmode
, addr
) : addr
);
3453 if (TARGET_CPU_ZARCH
3454 && larl_operand (const_addr
, VOIDmode
)
3455 && INTVAL (addend
) < (HOST_WIDE_INT
)1 << 31
3456 && INTVAL (addend
) >= -((HOST_WIDE_INT
)1 << 31))
3458 if (INTVAL (addend
) & 1)
3460 /* LARL can't handle odd offsets, so emit a pair of LARL
3462 rtx temp
= reg
? reg
: gen_reg_rtx (Pmode
);
3464 if (!DISP_IN_RANGE (INTVAL (addend
)))
3466 HOST_WIDE_INT even
= INTVAL (addend
) - 1;
3467 addr
= gen_rtx_PLUS (Pmode
, addr
, GEN_INT (even
));
3468 addr
= gen_rtx_CONST (Pmode
, addr
);
3469 addend
= const1_rtx
;
3472 emit_move_insn (temp
, addr
);
3473 new_rtx
= gen_rtx_PLUS (Pmode
, temp
, addend
);
3477 s390_load_address (reg
, new_rtx
);
3483 /* If the offset is even, we can just use LARL. This
3484 will happen automatically. */
3489 /* No larl - Access local symbols relative to the GOT. */
3491 rtx temp
= reg
? reg
: gen_reg_rtx (Pmode
);
3493 if (reload_in_progress
|| reload_completed
)
3494 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM
, true);
3496 addr
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
), UNSPEC_GOTOFF
);
3497 if (addend
!= const0_rtx
)
3498 addr
= gen_rtx_PLUS (Pmode
, addr
, addend
);
3499 addr
= gen_rtx_CONST (Pmode
, addr
);
3500 addr
= force_const_mem (Pmode
, addr
);
3501 emit_move_insn (temp
, addr
);
3503 new_rtx
= gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
, temp
);
3506 s390_load_address (reg
, new_rtx
);
3511 else if (GET_CODE (addr
) == SYMBOL_REF
&& addend
== const0_rtx
)
3513 /* A non-local symbol reference without addend.
3515 The symbol ref is wrapped into an UNSPEC to make sure the
3516 proper operand modifier (@GOT or @GOTENT) will be emitted.
3517 This will tell the linker to put the symbol into the GOT.
3519 Additionally the code dereferencing the GOT slot is emitted here.
3521 An addend to the symref needs to be added afterwards.
3522 legitimize_pic_address calls itself recursively to handle
3523 that case. So no need to do it here. */
3526 reg
= gen_reg_rtx (Pmode
);
3530 /* Use load relative if possible.
3531 lgrl <target>, sym@GOTENT */
3532 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
), UNSPEC_GOTENT
);
3533 new_rtx
= gen_rtx_CONST (Pmode
, new_rtx
);
3534 new_rtx
= gen_const_mem (GET_MODE (reg
), new_rtx
);
3536 emit_move_insn (reg
, new_rtx
);
3539 else if (flag_pic
== 1)
3541 /* Assume GOT offset is a valid displacement operand (< 4k
3542 or < 512k with z990). This is handled the same way in
3543 both 31- and 64-bit code (@GOT).
3544 lg <target>, sym@GOT(r12) */
3546 if (reload_in_progress
|| reload_completed
)
3547 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM
, true);
3549 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
), UNSPEC_GOT
);
3550 new_rtx
= gen_rtx_CONST (Pmode
, new_rtx
);
3551 new_rtx
= gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
, new_rtx
);
3552 new_rtx
= gen_const_mem (Pmode
, new_rtx
);
3553 emit_move_insn (reg
, new_rtx
);
3556 else if (TARGET_CPU_ZARCH
)
3558 /* If the GOT offset might be >= 4k, we determine the position
3559 of the GOT entry via a PC-relative LARL (@GOTENT).
3560 larl temp, sym@GOTENT
3561 lg <target>, 0(temp) */
3563 rtx temp
= reg
? reg
: gen_reg_rtx (Pmode
);
3565 gcc_assert (REGNO (temp
) >= FIRST_PSEUDO_REGISTER
3566 || REGNO_REG_CLASS (REGNO (temp
)) == ADDR_REGS
);
3568 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
), UNSPEC_GOTENT
);
3569 new_rtx
= gen_rtx_CONST (Pmode
, new_rtx
);
3570 emit_move_insn (temp
, new_rtx
);
3572 new_rtx
= gen_const_mem (Pmode
, temp
);
3573 emit_move_insn (reg
, new_rtx
);
3579 /* If the GOT offset might be >= 4k, we have to load it
3580 from the literal pool (@GOT).
3582 lg temp, lit-litbase(r13)
3583 lg <target>, 0(temp)
3584 lit: .long sym@GOT */
3586 rtx temp
= reg
? reg
: gen_reg_rtx (Pmode
);
3588 gcc_assert (REGNO (temp
) >= FIRST_PSEUDO_REGISTER
3589 || REGNO_REG_CLASS (REGNO (temp
)) == ADDR_REGS
);
3591 if (reload_in_progress
|| reload_completed
)
3592 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM
, true);
3594 addr
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
), UNSPEC_GOT
);
3595 addr
= gen_rtx_CONST (Pmode
, addr
);
3596 addr
= force_const_mem (Pmode
, addr
);
3597 emit_move_insn (temp
, addr
);
3599 new_rtx
= gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
, temp
);
3600 new_rtx
= gen_const_mem (Pmode
, new_rtx
);
3601 emit_move_insn (reg
, new_rtx
);
3605 else if (GET_CODE (addr
) == UNSPEC
&& GET_CODE (addend
) == CONST_INT
)
3607 gcc_assert (XVECLEN (addr
, 0) == 1);
3608 switch (XINT (addr
, 1))
3610 /* These address symbols (or PLT slots) relative to the GOT
3611 (not GOT slots!). In general this will exceed the
3612 displacement range so these value belong into the literal
3616 new_rtx
= force_const_mem (Pmode
, orig
);
3619 /* For -fPIC the GOT size might exceed the displacement
3620 range so make sure the value is in the literal pool. */
3623 new_rtx
= force_const_mem (Pmode
, orig
);
3626 /* For @GOTENT larl is used. This is handled like local
3632 /* @PLT is OK as is on 64-bit, must be converted to
3633 GOT-relative @PLTOFF on 31-bit. */
3635 if (!TARGET_CPU_ZARCH
)
3637 rtx temp
= reg
? reg
: gen_reg_rtx (Pmode
);
3639 if (reload_in_progress
|| reload_completed
)
3640 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM
, true);
3642 addr
= XVECEXP (addr
, 0, 0);
3643 addr
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
),
3645 if (addend
!= const0_rtx
)
3646 addr
= gen_rtx_PLUS (Pmode
, addr
, addend
);
3647 addr
= gen_rtx_CONST (Pmode
, addr
);
3648 addr
= force_const_mem (Pmode
, addr
);
3649 emit_move_insn (temp
, addr
);
3651 new_rtx
= gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
, temp
);
3654 s390_load_address (reg
, new_rtx
);
3659 /* On 64 bit larl can be used. This case is handled like
3660 local symbol refs. */
3664 /* Everything else cannot happen. */
3669 else if (addend
!= const0_rtx
)
3671 /* Otherwise, compute the sum. */
3673 rtx base
= legitimize_pic_address (addr
, reg
);
3674 new_rtx
= legitimize_pic_address (addend
,
3675 base
== reg
? NULL_RTX
: reg
);
3676 if (GET_CODE (new_rtx
) == CONST_INT
)
3677 new_rtx
= plus_constant (Pmode
, base
, INTVAL (new_rtx
));
3680 if (GET_CODE (new_rtx
) == PLUS
&& CONSTANT_P (XEXP (new_rtx
, 1)))
3682 base
= gen_rtx_PLUS (Pmode
, base
, XEXP (new_rtx
, 0));
3683 new_rtx
= XEXP (new_rtx
, 1);
3685 new_rtx
= gen_rtx_PLUS (Pmode
, base
, new_rtx
);
3688 if (GET_CODE (new_rtx
) == CONST
)
3689 new_rtx
= XEXP (new_rtx
, 0);
3690 new_rtx
= force_operand (new_rtx
, 0);
3696 /* Load the thread pointer into a register. */
3699 s390_get_thread_pointer (void)
3701 rtx tp
= gen_reg_rtx (Pmode
);
3703 emit_move_insn (tp
, gen_rtx_REG (Pmode
, TP_REGNUM
));
3704 mark_reg_pointer (tp
, BITS_PER_WORD
);
3709 /* Emit a tls call insn. The call target is the SYMBOL_REF stored
3710 in s390_tls_symbol which always refers to __tls_get_offset.
3711 The returned offset is written to RESULT_REG and an USE rtx is
3712 generated for TLS_CALL. */
3714 static GTY(()) rtx s390_tls_symbol
;
3717 s390_emit_tls_call_insn (rtx result_reg
, rtx tls_call
)
3722 emit_insn (s390_load_got ());
3724 if (!s390_tls_symbol
)
3725 s390_tls_symbol
= gen_rtx_SYMBOL_REF (Pmode
, "__tls_get_offset");
3727 insn
= s390_emit_call (s390_tls_symbol
, tls_call
, result_reg
,
3728 gen_rtx_REG (Pmode
, RETURN_REGNUM
));
3730 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), result_reg
);
3731 RTL_CONST_CALL_P (insn
) = 1;
3734 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3735 this (thread-local) address. REG may be used as temporary. */
3738 legitimize_tls_address (rtx addr
, rtx reg
)
3740 rtx new_rtx
, tls_call
, temp
, base
, r2
, insn
;
3742 if (GET_CODE (addr
) == SYMBOL_REF
)
3743 switch (tls_symbolic_operand (addr
))
3745 case TLS_MODEL_GLOBAL_DYNAMIC
:
3747 r2
= gen_rtx_REG (Pmode
, 2);
3748 tls_call
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
), UNSPEC_TLSGD
);
3749 new_rtx
= gen_rtx_CONST (Pmode
, tls_call
);
3750 new_rtx
= force_const_mem (Pmode
, new_rtx
);
3751 emit_move_insn (r2
, new_rtx
);
3752 s390_emit_tls_call_insn (r2
, tls_call
);
3753 insn
= get_insns ();
3756 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
), UNSPEC_NTPOFF
);
3757 temp
= gen_reg_rtx (Pmode
);
3758 emit_libcall_block (insn
, temp
, r2
, new_rtx
);
3760 new_rtx
= gen_rtx_PLUS (Pmode
, s390_get_thread_pointer (), temp
);
3763 s390_load_address (reg
, new_rtx
);
3768 case TLS_MODEL_LOCAL_DYNAMIC
:
3770 r2
= gen_rtx_REG (Pmode
, 2);
3771 tls_call
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, const0_rtx
), UNSPEC_TLSLDM
);
3772 new_rtx
= gen_rtx_CONST (Pmode
, tls_call
);
3773 new_rtx
= force_const_mem (Pmode
, new_rtx
);
3774 emit_move_insn (r2
, new_rtx
);
3775 s390_emit_tls_call_insn (r2
, tls_call
);
3776 insn
= get_insns ();
3779 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, const0_rtx
), UNSPEC_TLSLDM_NTPOFF
);
3780 temp
= gen_reg_rtx (Pmode
);
3781 emit_libcall_block (insn
, temp
, r2
, new_rtx
);
3783 new_rtx
= gen_rtx_PLUS (Pmode
, s390_get_thread_pointer (), temp
);
3784 base
= gen_reg_rtx (Pmode
);
3785 s390_load_address (base
, new_rtx
);
3787 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
), UNSPEC_DTPOFF
);
3788 new_rtx
= gen_rtx_CONST (Pmode
, new_rtx
);
3789 new_rtx
= force_const_mem (Pmode
, new_rtx
);
3790 temp
= gen_reg_rtx (Pmode
);
3791 emit_move_insn (temp
, new_rtx
);
3793 new_rtx
= gen_rtx_PLUS (Pmode
, base
, temp
);
3796 s390_load_address (reg
, new_rtx
);
3801 case TLS_MODEL_INITIAL_EXEC
:
3804 /* Assume GOT offset < 4k. This is handled the same way
3805 in both 31- and 64-bit code. */
3807 if (reload_in_progress
|| reload_completed
)
3808 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM
, true);
3810 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
), UNSPEC_GOTNTPOFF
);
3811 new_rtx
= gen_rtx_CONST (Pmode
, new_rtx
);
3812 new_rtx
= gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
, new_rtx
);
3813 new_rtx
= gen_const_mem (Pmode
, new_rtx
);
3814 temp
= gen_reg_rtx (Pmode
);
3815 emit_move_insn (temp
, new_rtx
);
3817 else if (TARGET_CPU_ZARCH
)
3819 /* If the GOT offset might be >= 4k, we determine the position
3820 of the GOT entry via a PC-relative LARL. */
3822 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
), UNSPEC_INDNTPOFF
);
3823 new_rtx
= gen_rtx_CONST (Pmode
, new_rtx
);
3824 temp
= gen_reg_rtx (Pmode
);
3825 emit_move_insn (temp
, new_rtx
);
3827 new_rtx
= gen_const_mem (Pmode
, temp
);
3828 temp
= gen_reg_rtx (Pmode
);
3829 emit_move_insn (temp
, new_rtx
);
3833 /* If the GOT offset might be >= 4k, we have to load it
3834 from the literal pool. */
3836 if (reload_in_progress
|| reload_completed
)
3837 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM
, true);
3839 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
), UNSPEC_GOTNTPOFF
);
3840 new_rtx
= gen_rtx_CONST (Pmode
, new_rtx
);
3841 new_rtx
= force_const_mem (Pmode
, new_rtx
);
3842 temp
= gen_reg_rtx (Pmode
);
3843 emit_move_insn (temp
, new_rtx
);
3845 new_rtx
= gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
, temp
);
3846 new_rtx
= gen_const_mem (Pmode
, new_rtx
);
3848 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (2, new_rtx
, addr
), UNSPEC_TLS_LOAD
);
3849 temp
= gen_reg_rtx (Pmode
);
3850 emit_insn (gen_rtx_SET (Pmode
, temp
, new_rtx
));
3854 /* In position-dependent code, load the absolute address of
3855 the GOT entry from the literal pool. */
3857 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
), UNSPEC_INDNTPOFF
);
3858 new_rtx
= gen_rtx_CONST (Pmode
, new_rtx
);
3859 new_rtx
= force_const_mem (Pmode
, new_rtx
);
3860 temp
= gen_reg_rtx (Pmode
);
3861 emit_move_insn (temp
, new_rtx
);
3864 new_rtx
= gen_const_mem (Pmode
, new_rtx
);
3865 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (2, new_rtx
, addr
), UNSPEC_TLS_LOAD
);
3866 temp
= gen_reg_rtx (Pmode
);
3867 emit_insn (gen_rtx_SET (Pmode
, temp
, new_rtx
));
3870 new_rtx
= gen_rtx_PLUS (Pmode
, s390_get_thread_pointer (), temp
);
3873 s390_load_address (reg
, new_rtx
);
3878 case TLS_MODEL_LOCAL_EXEC
:
3879 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
), UNSPEC_NTPOFF
);
3880 new_rtx
= gen_rtx_CONST (Pmode
, new_rtx
);
3881 new_rtx
= force_const_mem (Pmode
, new_rtx
);
3882 temp
= gen_reg_rtx (Pmode
);
3883 emit_move_insn (temp
, new_rtx
);
3885 new_rtx
= gen_rtx_PLUS (Pmode
, s390_get_thread_pointer (), temp
);
3888 s390_load_address (reg
, new_rtx
);
3897 else if (GET_CODE (addr
) == CONST
&& GET_CODE (XEXP (addr
, 0)) == UNSPEC
)
3899 switch (XINT (XEXP (addr
, 0), 1))
3901 case UNSPEC_INDNTPOFF
:
3902 gcc_assert (TARGET_CPU_ZARCH
);
3911 else if (GET_CODE (addr
) == CONST
&& GET_CODE (XEXP (addr
, 0)) == PLUS
3912 && GET_CODE (XEXP (XEXP (addr
, 0), 1)) == CONST_INT
)
3914 new_rtx
= XEXP (XEXP (addr
, 0), 0);
3915 if (GET_CODE (new_rtx
) != SYMBOL_REF
)
3916 new_rtx
= gen_rtx_CONST (Pmode
, new_rtx
);
3918 new_rtx
= legitimize_tls_address (new_rtx
, reg
);
3919 new_rtx
= plus_constant (Pmode
, new_rtx
,
3920 INTVAL (XEXP (XEXP (addr
, 0), 1)));
3921 new_rtx
= force_operand (new_rtx
, 0);
3925 gcc_unreachable (); /* for now ... */
3930 /* Emit insns making the address in operands[1] valid for a standard
3931 move to operands[0]. operands[1] is replaced by an address which
3932 should be used instead of the former RTX to emit the move
3936 emit_symbolic_move (rtx
*operands
)
3938 rtx temp
= !can_create_pseudo_p () ? operands
[0] : gen_reg_rtx (Pmode
);
3940 if (GET_CODE (operands
[0]) == MEM
)
3941 operands
[1] = force_reg (Pmode
, operands
[1]);
3942 else if (TLS_SYMBOLIC_CONST (operands
[1]))
3943 operands
[1] = legitimize_tls_address (operands
[1], temp
);
3945 operands
[1] = legitimize_pic_address (operands
[1], temp
);
3948 /* Try machine-dependent ways of modifying an illegitimate address X
3949 to be legitimate. If we find one, return the new, valid address.
3951 OLDX is the address as it was before break_out_memory_refs was called.
3952 In some cases it is useful to look at this to decide what needs to be done.
3954 MODE is the mode of the operand pointed to by X.
3956 When -fpic is used, special handling is needed for symbolic references.
3957 See comments by legitimize_pic_address for details. */
3960 s390_legitimize_address (rtx x
, rtx oldx ATTRIBUTE_UNUSED
,
3961 machine_mode mode ATTRIBUTE_UNUSED
)
3963 rtx constant_term
= const0_rtx
;
3965 if (TLS_SYMBOLIC_CONST (x
))
3967 x
= legitimize_tls_address (x
, 0);
3969 if (s390_legitimate_address_p (mode
, x
, FALSE
))
3972 else if (GET_CODE (x
) == PLUS
3973 && (TLS_SYMBOLIC_CONST (XEXP (x
, 0))
3974 || TLS_SYMBOLIC_CONST (XEXP (x
, 1))))
3980 if (SYMBOLIC_CONST (x
)
3981 || (GET_CODE (x
) == PLUS
3982 && (SYMBOLIC_CONST (XEXP (x
, 0))
3983 || SYMBOLIC_CONST (XEXP (x
, 1)))))
3984 x
= legitimize_pic_address (x
, 0);
3986 if (s390_legitimate_address_p (mode
, x
, FALSE
))
3990 x
= eliminate_constant_term (x
, &constant_term
);
3992 /* Optimize loading of large displacements by splitting them
3993 into the multiple of 4K and the rest; this allows the
3994 former to be CSE'd if possible.
3996 Don't do this if the displacement is added to a register
3997 pointing into the stack frame, as the offsets will
3998 change later anyway. */
4000 if (GET_CODE (constant_term
) == CONST_INT
4001 && !TARGET_LONG_DISPLACEMENT
4002 && !DISP_IN_RANGE (INTVAL (constant_term
))
4003 && !(REG_P (x
) && REGNO_PTR_FRAME_P (REGNO (x
))))
4005 HOST_WIDE_INT lower
= INTVAL (constant_term
) & 0xfff;
4006 HOST_WIDE_INT upper
= INTVAL (constant_term
) ^ lower
;
4008 rtx temp
= gen_reg_rtx (Pmode
);
4009 rtx val
= force_operand (GEN_INT (upper
), temp
);
4011 emit_move_insn (temp
, val
);
4013 x
= gen_rtx_PLUS (Pmode
, x
, temp
);
4014 constant_term
= GEN_INT (lower
);
4017 if (GET_CODE (x
) == PLUS
)
4019 if (GET_CODE (XEXP (x
, 0)) == REG
)
4021 rtx temp
= gen_reg_rtx (Pmode
);
4022 rtx val
= force_operand (XEXP (x
, 1), temp
);
4024 emit_move_insn (temp
, val
);
4026 x
= gen_rtx_PLUS (Pmode
, XEXP (x
, 0), temp
);
4029 else if (GET_CODE (XEXP (x
, 1)) == REG
)
4031 rtx temp
= gen_reg_rtx (Pmode
);
4032 rtx val
= force_operand (XEXP (x
, 0), temp
);
4034 emit_move_insn (temp
, val
);
4036 x
= gen_rtx_PLUS (Pmode
, temp
, XEXP (x
, 1));
4040 if (constant_term
!= const0_rtx
)
4041 x
= gen_rtx_PLUS (Pmode
, x
, constant_term
);
4046 /* Try a machine-dependent way of reloading an illegitimate address AD
4047 operand. If we find one, push the reload and return the new address.
4049 MODE is the mode of the enclosing MEM. OPNUM is the operand number
4050 and TYPE is the reload type of the current reload. */
4053 legitimize_reload_address (rtx ad
, machine_mode mode ATTRIBUTE_UNUSED
,
4054 int opnum
, int type
)
4056 if (!optimize
|| TARGET_LONG_DISPLACEMENT
)
4059 if (GET_CODE (ad
) == PLUS
)
4061 rtx tem
= simplify_binary_operation (PLUS
, Pmode
,
4062 XEXP (ad
, 0), XEXP (ad
, 1));
4067 if (GET_CODE (ad
) == PLUS
4068 && GET_CODE (XEXP (ad
, 0)) == REG
4069 && GET_CODE (XEXP (ad
, 1)) == CONST_INT
4070 && !DISP_IN_RANGE (INTVAL (XEXP (ad
, 1))))
4072 HOST_WIDE_INT lower
= INTVAL (XEXP (ad
, 1)) & 0xfff;
4073 HOST_WIDE_INT upper
= INTVAL (XEXP (ad
, 1)) ^ lower
;
4074 rtx cst
, tem
, new_rtx
;
4076 cst
= GEN_INT (upper
);
4077 if (!legitimate_reload_constant_p (cst
))
4078 cst
= force_const_mem (Pmode
, cst
);
4080 tem
= gen_rtx_PLUS (Pmode
, XEXP (ad
, 0), cst
);
4081 new_rtx
= gen_rtx_PLUS (Pmode
, tem
, GEN_INT (lower
));
4083 push_reload (XEXP (tem
, 1), 0, &XEXP (tem
, 1), 0,
4084 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
4085 opnum
, (enum reload_type
) type
);
4092 /* Emit code to move LEN bytes from DST to SRC. */
4095 s390_expand_movmem (rtx dst
, rtx src
, rtx len
)
4097 /* When tuning for z10 or higher we rely on the Glibc functions to
4098 do the right thing. Only for constant lengths below 64k we will
4099 generate inline code. */
4100 if (s390_tune
>= PROCESSOR_2097_Z10
4101 && (GET_CODE (len
) != CONST_INT
|| INTVAL (len
) > (1<<16)))
4104 if (GET_CODE (len
) == CONST_INT
&& INTVAL (len
) >= 0 && INTVAL (len
) <= 256)
4106 if (INTVAL (len
) > 0)
4107 emit_insn (gen_movmem_short (dst
, src
, GEN_INT (INTVAL (len
) - 1)));
4110 else if (TARGET_MVCLE
)
4112 emit_insn (gen_movmem_long (dst
, src
, convert_to_mode (Pmode
, len
, 1)));
4117 rtx dst_addr
, src_addr
, count
, blocks
, temp
;
4118 rtx_code_label
*loop_start_label
= gen_label_rtx ();
4119 rtx_code_label
*loop_end_label
= gen_label_rtx ();
4120 rtx_code_label
*end_label
= gen_label_rtx ();
4123 mode
= GET_MODE (len
);
4124 if (mode
== VOIDmode
)
4127 dst_addr
= gen_reg_rtx (Pmode
);
4128 src_addr
= gen_reg_rtx (Pmode
);
4129 count
= gen_reg_rtx (mode
);
4130 blocks
= gen_reg_rtx (mode
);
4132 convert_move (count
, len
, 1);
4133 emit_cmp_and_jump_insns (count
, const0_rtx
,
4134 EQ
, NULL_RTX
, mode
, 1, end_label
);
4136 emit_move_insn (dst_addr
, force_operand (XEXP (dst
, 0), NULL_RTX
));
4137 emit_move_insn (src_addr
, force_operand (XEXP (src
, 0), NULL_RTX
));
4138 dst
= change_address (dst
, VOIDmode
, dst_addr
);
4139 src
= change_address (src
, VOIDmode
, src_addr
);
4141 temp
= expand_binop (mode
, add_optab
, count
, constm1_rtx
, count
, 1,
4144 emit_move_insn (count
, temp
);
4146 temp
= expand_binop (mode
, lshr_optab
, count
, GEN_INT (8), blocks
, 1,
4149 emit_move_insn (blocks
, temp
);
4151 emit_cmp_and_jump_insns (blocks
, const0_rtx
,
4152 EQ
, NULL_RTX
, mode
, 1, loop_end_label
);
4154 emit_label (loop_start_label
);
4157 && (GET_CODE (len
) != CONST_INT
|| INTVAL (len
) > 768))
4161 /* Issue a read prefetch for the +3 cache line. */
4162 prefetch
= gen_prefetch (gen_rtx_PLUS (Pmode
, src_addr
, GEN_INT (768)),
4163 const0_rtx
, const0_rtx
);
4164 PREFETCH_SCHEDULE_BARRIER_P (prefetch
) = true;
4165 emit_insn (prefetch
);
4167 /* Issue a write prefetch for the +3 cache line. */
4168 prefetch
= gen_prefetch (gen_rtx_PLUS (Pmode
, dst_addr
, GEN_INT (768)),
4169 const1_rtx
, const0_rtx
);
4170 PREFETCH_SCHEDULE_BARRIER_P (prefetch
) = true;
4171 emit_insn (prefetch
);
4174 emit_insn (gen_movmem_short (dst
, src
, GEN_INT (255)));
4175 s390_load_address (dst_addr
,
4176 gen_rtx_PLUS (Pmode
, dst_addr
, GEN_INT (256)));
4177 s390_load_address (src_addr
,
4178 gen_rtx_PLUS (Pmode
, src_addr
, GEN_INT (256)));
4180 temp
= expand_binop (mode
, add_optab
, blocks
, constm1_rtx
, blocks
, 1,
4183 emit_move_insn (blocks
, temp
);
4185 emit_cmp_and_jump_insns (blocks
, const0_rtx
,
4186 EQ
, NULL_RTX
, mode
, 1, loop_end_label
);
4188 emit_jump (loop_start_label
);
4189 emit_label (loop_end_label
);
4191 emit_insn (gen_movmem_short (dst
, src
,
4192 convert_to_mode (Pmode
, count
, 1)));
4193 emit_label (end_label
);
4198 /* Emit code to set LEN bytes at DST to VAL.
4199 Make use of clrmem if VAL is zero. */
4202 s390_expand_setmem (rtx dst
, rtx len
, rtx val
)
4204 if (GET_CODE (len
) == CONST_INT
&& INTVAL (len
) == 0)
4207 gcc_assert (GET_CODE (val
) == CONST_INT
|| GET_MODE (val
) == QImode
);
4209 if (GET_CODE (len
) == CONST_INT
&& INTVAL (len
) > 0 && INTVAL (len
) <= 257)
4211 if (val
== const0_rtx
&& INTVAL (len
) <= 256)
4212 emit_insn (gen_clrmem_short (dst
, GEN_INT (INTVAL (len
) - 1)));
4215 /* Initialize memory by storing the first byte. */
4216 emit_move_insn (adjust_address (dst
, QImode
, 0), val
);
4218 if (INTVAL (len
) > 1)
4220 /* Initiate 1 byte overlap move.
4221 The first byte of DST is propagated through DSTP1.
4222 Prepare a movmem for: DST+1 = DST (length = LEN - 1).
4223 DST is set to size 1 so the rest of the memory location
4224 does not count as source operand. */
4225 rtx dstp1
= adjust_address (dst
, VOIDmode
, 1);
4226 set_mem_size (dst
, 1);
4228 emit_insn (gen_movmem_short (dstp1
, dst
,
4229 GEN_INT (INTVAL (len
) - 2)));
4234 else if (TARGET_MVCLE
)
4236 val
= force_not_mem (convert_modes (Pmode
, QImode
, val
, 1));
4237 emit_insn (gen_setmem_long (dst
, convert_to_mode (Pmode
, len
, 1), val
));
4242 rtx dst_addr
, count
, blocks
, temp
, dstp1
= NULL_RTX
;
4243 rtx_code_label
*loop_start_label
= gen_label_rtx ();
4244 rtx_code_label
*loop_end_label
= gen_label_rtx ();
4245 rtx_code_label
*end_label
= gen_label_rtx ();
4248 mode
= GET_MODE (len
);
4249 if (mode
== VOIDmode
)
4252 dst_addr
= gen_reg_rtx (Pmode
);
4253 count
= gen_reg_rtx (mode
);
4254 blocks
= gen_reg_rtx (mode
);
4256 convert_move (count
, len
, 1);
4257 emit_cmp_and_jump_insns (count
, const0_rtx
,
4258 EQ
, NULL_RTX
, mode
, 1, end_label
);
4260 emit_move_insn (dst_addr
, force_operand (XEXP (dst
, 0), NULL_RTX
));
4261 dst
= change_address (dst
, VOIDmode
, dst_addr
);
4263 if (val
== const0_rtx
)
4264 temp
= expand_binop (mode
, add_optab
, count
, constm1_rtx
, count
, 1,
4268 dstp1
= adjust_address (dst
, VOIDmode
, 1);
4269 set_mem_size (dst
, 1);
4271 /* Initialize memory by storing the first byte. */
4272 emit_move_insn (adjust_address (dst
, QImode
, 0), val
);
4274 /* If count is 1 we are done. */
4275 emit_cmp_and_jump_insns (count
, const1_rtx
,
4276 EQ
, NULL_RTX
, mode
, 1, end_label
);
4278 temp
= expand_binop (mode
, add_optab
, count
, GEN_INT (-2), count
, 1,
4282 emit_move_insn (count
, temp
);
4284 temp
= expand_binop (mode
, lshr_optab
, count
, GEN_INT (8), blocks
, 1,
4287 emit_move_insn (blocks
, temp
);
4289 emit_cmp_and_jump_insns (blocks
, const0_rtx
,
4290 EQ
, NULL_RTX
, mode
, 1, loop_end_label
);
4292 emit_label (loop_start_label
);
4295 && (GET_CODE (len
) != CONST_INT
|| INTVAL (len
) > 1024))
4297 /* Issue a write prefetch for the +4 cache line. */
4298 rtx prefetch
= gen_prefetch (gen_rtx_PLUS (Pmode
, dst_addr
,
4300 const1_rtx
, const0_rtx
);
4301 emit_insn (prefetch
);
4302 PREFETCH_SCHEDULE_BARRIER_P (prefetch
) = true;
4305 if (val
== const0_rtx
)
4306 emit_insn (gen_clrmem_short (dst
, GEN_INT (255)));
4308 emit_insn (gen_movmem_short (dstp1
, dst
, GEN_INT (255)));
4309 s390_load_address (dst_addr
,
4310 gen_rtx_PLUS (Pmode
, dst_addr
, GEN_INT (256)));
4312 temp
= expand_binop (mode
, add_optab
, blocks
, constm1_rtx
, blocks
, 1,
4315 emit_move_insn (blocks
, temp
);
4317 emit_cmp_and_jump_insns (blocks
, const0_rtx
,
4318 EQ
, NULL_RTX
, mode
, 1, loop_end_label
);
4320 emit_jump (loop_start_label
);
4321 emit_label (loop_end_label
);
4323 if (val
== const0_rtx
)
4324 emit_insn (gen_clrmem_short (dst
, convert_to_mode (Pmode
, count
, 1)));
4326 emit_insn (gen_movmem_short (dstp1
, dst
, convert_to_mode (Pmode
, count
, 1)));
4327 emit_label (end_label
);
4331 /* Emit code to compare LEN bytes at OP0 with those at OP1,
4332 and return the result in TARGET. */
4335 s390_expand_cmpmem (rtx target
, rtx op0
, rtx op1
, rtx len
)
4337 rtx ccreg
= gen_rtx_REG (CCUmode
, CC_REGNUM
);
4340 /* When tuning for z10 or higher we rely on the Glibc functions to
4341 do the right thing. Only for constant lengths below 64k we will
4342 generate inline code. */
4343 if (s390_tune
>= PROCESSOR_2097_Z10
4344 && (GET_CODE (len
) != CONST_INT
|| INTVAL (len
) > (1<<16)))
4347 /* As the result of CMPINT is inverted compared to what we need,
4348 we have to swap the operands. */
4349 tmp
= op0
; op0
= op1
; op1
= tmp
;
4351 if (GET_CODE (len
) == CONST_INT
&& INTVAL (len
) >= 0 && INTVAL (len
) <= 256)
4353 if (INTVAL (len
) > 0)
4355 emit_insn (gen_cmpmem_short (op0
, op1
, GEN_INT (INTVAL (len
) - 1)));
4356 emit_insn (gen_cmpint (target
, ccreg
));
4359 emit_move_insn (target
, const0_rtx
);
4361 else if (TARGET_MVCLE
)
4363 emit_insn (gen_cmpmem_long (op0
, op1
, convert_to_mode (Pmode
, len
, 1)));
4364 emit_insn (gen_cmpint (target
, ccreg
));
4368 rtx addr0
, addr1
, count
, blocks
, temp
;
4369 rtx_code_label
*loop_start_label
= gen_label_rtx ();
4370 rtx_code_label
*loop_end_label
= gen_label_rtx ();
4371 rtx_code_label
*end_label
= gen_label_rtx ();
4374 mode
= GET_MODE (len
);
4375 if (mode
== VOIDmode
)
4378 addr0
= gen_reg_rtx (Pmode
);
4379 addr1
= gen_reg_rtx (Pmode
);
4380 count
= gen_reg_rtx (mode
);
4381 blocks
= gen_reg_rtx (mode
);
4383 convert_move (count
, len
, 1);
4384 emit_cmp_and_jump_insns (count
, const0_rtx
,
4385 EQ
, NULL_RTX
, mode
, 1, end_label
);
4387 emit_move_insn (addr0
, force_operand (XEXP (op0
, 0), NULL_RTX
));
4388 emit_move_insn (addr1
, force_operand (XEXP (op1
, 0), NULL_RTX
));
4389 op0
= change_address (op0
, VOIDmode
, addr0
);
4390 op1
= change_address (op1
, VOIDmode
, addr1
);
4392 temp
= expand_binop (mode
, add_optab
, count
, constm1_rtx
, count
, 1,
4395 emit_move_insn (count
, temp
);
4397 temp
= expand_binop (mode
, lshr_optab
, count
, GEN_INT (8), blocks
, 1,
4400 emit_move_insn (blocks
, temp
);
4402 emit_cmp_and_jump_insns (blocks
, const0_rtx
,
4403 EQ
, NULL_RTX
, mode
, 1, loop_end_label
);
4405 emit_label (loop_start_label
);
4408 && (GET_CODE (len
) != CONST_INT
|| INTVAL (len
) > 512))
4412 /* Issue a read prefetch for the +2 cache line of operand 1. */
4413 prefetch
= gen_prefetch (gen_rtx_PLUS (Pmode
, addr0
, GEN_INT (512)),
4414 const0_rtx
, const0_rtx
);
4415 emit_insn (prefetch
);
4416 PREFETCH_SCHEDULE_BARRIER_P (prefetch
) = true;
4418 /* Issue a read prefetch for the +2 cache line of operand 2. */
4419 prefetch
= gen_prefetch (gen_rtx_PLUS (Pmode
, addr1
, GEN_INT (512)),
4420 const0_rtx
, const0_rtx
);
4421 emit_insn (prefetch
);
4422 PREFETCH_SCHEDULE_BARRIER_P (prefetch
) = true;
4425 emit_insn (gen_cmpmem_short (op0
, op1
, GEN_INT (255)));
4426 temp
= gen_rtx_NE (VOIDmode
, ccreg
, const0_rtx
);
4427 temp
= gen_rtx_IF_THEN_ELSE (VOIDmode
, temp
,
4428 gen_rtx_LABEL_REF (VOIDmode
, end_label
), pc_rtx
);
4429 temp
= gen_rtx_SET (VOIDmode
, pc_rtx
, temp
);
4430 emit_jump_insn (temp
);
4432 s390_load_address (addr0
,
4433 gen_rtx_PLUS (Pmode
, addr0
, GEN_INT (256)));
4434 s390_load_address (addr1
,
4435 gen_rtx_PLUS (Pmode
, addr1
, GEN_INT (256)));
4437 temp
= expand_binop (mode
, add_optab
, blocks
, constm1_rtx
, blocks
, 1,
4440 emit_move_insn (blocks
, temp
);
4442 emit_cmp_and_jump_insns (blocks
, const0_rtx
,
4443 EQ
, NULL_RTX
, mode
, 1, loop_end_label
);
4445 emit_jump (loop_start_label
);
4446 emit_label (loop_end_label
);
4448 emit_insn (gen_cmpmem_short (op0
, op1
,
4449 convert_to_mode (Pmode
, count
, 1)));
4450 emit_label (end_label
);
4452 emit_insn (gen_cmpint (target
, ccreg
));
4458 /* Expand conditional increment or decrement using alc/slb instructions.
4459 Should generate code setting DST to either SRC or SRC + INCREMENT,
4460 depending on the result of the comparison CMP_OP0 CMP_CODE CMP_OP1.
4461 Returns true if successful, false otherwise.
4463 That makes it possible to implement some if-constructs without jumps e.g.:
4464 (borrow = CC0 | CC1 and carry = CC2 | CC3)
4465 unsigned int a, b, c;
4466 if (a < b) c++; -> CCU b > a -> CC2; c += carry;
4467 if (a < b) c--; -> CCL3 a - b -> borrow; c -= borrow;
4468 if (a <= b) c++; -> CCL3 b - a -> borrow; c += carry;
4469 if (a <= b) c--; -> CCU a <= b -> borrow; c -= borrow;
4471 Checks for EQ and NE with a nonzero value need an additional xor e.g.:
4472 if (a == b) c++; -> CCL3 a ^= b; 0 - a -> borrow; c += carry;
4473 if (a == b) c--; -> CCU a ^= b; a <= 0 -> CC0 | CC1; c -= borrow;
4474 if (a != b) c++; -> CCU a ^= b; a > 0 -> CC2; c += carry;
4475 if (a != b) c--; -> CCL3 a ^= b; 0 - a -> borrow; c -= borrow; */
4478 s390_expand_addcc (enum rtx_code cmp_code
, rtx cmp_op0
, rtx cmp_op1
,
4479 rtx dst
, rtx src
, rtx increment
)
4481 machine_mode cmp_mode
;
4482 machine_mode cc_mode
;
4488 if ((GET_MODE (cmp_op0
) == SImode
|| GET_MODE (cmp_op0
) == VOIDmode
)
4489 && (GET_MODE (cmp_op1
) == SImode
|| GET_MODE (cmp_op1
) == VOIDmode
))
4491 else if ((GET_MODE (cmp_op0
) == DImode
|| GET_MODE (cmp_op0
) == VOIDmode
)
4492 && (GET_MODE (cmp_op1
) == DImode
|| GET_MODE (cmp_op1
) == VOIDmode
))
4497 /* Try ADD LOGICAL WITH CARRY. */
4498 if (increment
== const1_rtx
)
4500 /* Determine CC mode to use. */
4501 if (cmp_code
== EQ
|| cmp_code
== NE
)
4503 if (cmp_op1
!= const0_rtx
)
4505 cmp_op0
= expand_simple_binop (cmp_mode
, XOR
, cmp_op0
, cmp_op1
,
4506 NULL_RTX
, 0, OPTAB_WIDEN
);
4507 cmp_op1
= const0_rtx
;
4510 cmp_code
= cmp_code
== EQ
? LEU
: GTU
;
4513 if (cmp_code
== LTU
|| cmp_code
== LEU
)
4518 cmp_code
= swap_condition (cmp_code
);
4535 /* Emit comparison instruction pattern. */
4536 if (!register_operand (cmp_op0
, cmp_mode
))
4537 cmp_op0
= force_reg (cmp_mode
, cmp_op0
);
4539 insn
= gen_rtx_SET (VOIDmode
, gen_rtx_REG (cc_mode
, CC_REGNUM
),
4540 gen_rtx_COMPARE (cc_mode
, cmp_op0
, cmp_op1
));
4541 /* We use insn_invalid_p here to add clobbers if required. */
4542 ret
= insn_invalid_p (emit_insn (insn
), false);
4545 /* Emit ALC instruction pattern. */
4546 op_res
= gen_rtx_fmt_ee (cmp_code
, GET_MODE (dst
),
4547 gen_rtx_REG (cc_mode
, CC_REGNUM
),
4550 if (src
!= const0_rtx
)
4552 if (!register_operand (src
, GET_MODE (dst
)))
4553 src
= force_reg (GET_MODE (dst
), src
);
4555 op_res
= gen_rtx_PLUS (GET_MODE (dst
), op_res
, src
);
4556 op_res
= gen_rtx_PLUS (GET_MODE (dst
), op_res
, const0_rtx
);
4559 p
= rtvec_alloc (2);
4561 gen_rtx_SET (VOIDmode
, dst
, op_res
);
4563 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (CCmode
, CC_REGNUM
));
4564 emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
4569 /* Try SUBTRACT LOGICAL WITH BORROW. */
4570 if (increment
== constm1_rtx
)
4572 /* Determine CC mode to use. */
4573 if (cmp_code
== EQ
|| cmp_code
== NE
)
4575 if (cmp_op1
!= const0_rtx
)
4577 cmp_op0
= expand_simple_binop (cmp_mode
, XOR
, cmp_op0
, cmp_op1
,
4578 NULL_RTX
, 0, OPTAB_WIDEN
);
4579 cmp_op1
= const0_rtx
;
4582 cmp_code
= cmp_code
== EQ
? LEU
: GTU
;
4585 if (cmp_code
== GTU
|| cmp_code
== GEU
)
4590 cmp_code
= swap_condition (cmp_code
);
4607 /* Emit comparison instruction pattern. */
4608 if (!register_operand (cmp_op0
, cmp_mode
))
4609 cmp_op0
= force_reg (cmp_mode
, cmp_op0
);
4611 insn
= gen_rtx_SET (VOIDmode
, gen_rtx_REG (cc_mode
, CC_REGNUM
),
4612 gen_rtx_COMPARE (cc_mode
, cmp_op0
, cmp_op1
));
4613 /* We use insn_invalid_p here to add clobbers if required. */
4614 ret
= insn_invalid_p (emit_insn (insn
), false);
4617 /* Emit SLB instruction pattern. */
4618 if (!register_operand (src
, GET_MODE (dst
)))
4619 src
= force_reg (GET_MODE (dst
), src
);
4621 op_res
= gen_rtx_MINUS (GET_MODE (dst
),
4622 gen_rtx_MINUS (GET_MODE (dst
), src
, const0_rtx
),
4623 gen_rtx_fmt_ee (cmp_code
, GET_MODE (dst
),
4624 gen_rtx_REG (cc_mode
, CC_REGNUM
),
4626 p
= rtvec_alloc (2);
4628 gen_rtx_SET (VOIDmode
, dst
, op_res
);
4630 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (CCmode
, CC_REGNUM
));
4631 emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
4639 /* Expand code for the insv template. Return true if successful. */
4642 s390_expand_insv (rtx dest
, rtx op1
, rtx op2
, rtx src
)
4644 int bitsize
= INTVAL (op1
);
4645 int bitpos
= INTVAL (op2
);
4646 machine_mode mode
= GET_MODE (dest
);
4648 int smode_bsize
, mode_bsize
;
4651 if (bitsize
+ bitpos
> GET_MODE_BITSIZE (mode
))
4654 /* Generate INSERT IMMEDIATE (IILL et al). */
4655 /* (set (ze (reg)) (const_int)). */
4657 && register_operand (dest
, word_mode
)
4658 && (bitpos
% 16) == 0
4659 && (bitsize
% 16) == 0
4660 && const_int_operand (src
, VOIDmode
))
4662 HOST_WIDE_INT val
= INTVAL (src
);
4663 int regpos
= bitpos
+ bitsize
;
4665 while (regpos
> bitpos
)
4667 machine_mode putmode
;
4670 if (TARGET_EXTIMM
&& (regpos
% 32 == 0) && (regpos
>= bitpos
+ 32))
4675 putsize
= GET_MODE_BITSIZE (putmode
);
4677 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode
, dest
,
4680 gen_int_mode (val
, putmode
));
4683 gcc_assert (regpos
== bitpos
);
4687 smode
= smallest_mode_for_size (bitsize
, MODE_INT
);
4688 smode_bsize
= GET_MODE_BITSIZE (smode
);
4689 mode_bsize
= GET_MODE_BITSIZE (mode
);
4691 /* Generate STORE CHARACTERS UNDER MASK (STCM et al). */
4693 && (bitsize
% BITS_PER_UNIT
) == 0
4695 && (register_operand (src
, word_mode
)
4696 || const_int_operand (src
, VOIDmode
)))
4698 /* Emit standard pattern if possible. */
4699 if (smode_bsize
== bitsize
)
4701 emit_move_insn (adjust_address (dest
, smode
, 0),
4702 gen_lowpart (smode
, src
));
4706 /* (set (ze (mem)) (const_int)). */
4707 else if (const_int_operand (src
, VOIDmode
))
4709 int size
= bitsize
/ BITS_PER_UNIT
;
4710 rtx src_mem
= adjust_address (force_const_mem (word_mode
, src
),
4712 UNITS_PER_WORD
- size
);
4714 dest
= adjust_address (dest
, BLKmode
, 0);
4715 set_mem_size (dest
, size
);
4716 s390_expand_movmem (dest
, src_mem
, GEN_INT (size
));
4720 /* (set (ze (mem)) (reg)). */
4721 else if (register_operand (src
, word_mode
))
4724 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode
, dest
, op1
,
4728 /* Emit st,stcmh sequence. */
4729 int stcmh_width
= bitsize
- 32;
4730 int size
= stcmh_width
/ BITS_PER_UNIT
;
4732 emit_move_insn (adjust_address (dest
, SImode
, size
),
4733 gen_lowpart (SImode
, src
));
4734 set_mem_size (dest
, size
);
4735 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode
, dest
,
4736 GEN_INT (stcmh_width
),
4738 gen_rtx_LSHIFTRT (word_mode
, src
, GEN_INT (32)));
4744 /* Generate INSERT CHARACTERS UNDER MASK (IC, ICM et al). */
4745 if ((bitpos
% BITS_PER_UNIT
) == 0
4746 && (bitsize
% BITS_PER_UNIT
) == 0
4747 && (bitpos
& 32) == ((bitpos
+ bitsize
- 1) & 32)
4749 && (mode
== DImode
|| mode
== SImode
)
4750 && register_operand (dest
, mode
))
4752 /* Emit a strict_low_part pattern if possible. */
4753 if (smode_bsize
== bitsize
&& bitpos
== mode_bsize
- smode_bsize
)
4755 op
= gen_rtx_STRICT_LOW_PART (VOIDmode
, gen_lowpart (smode
, dest
));
4756 op
= gen_rtx_SET (VOIDmode
, op
, gen_lowpart (smode
, src
));
4757 clobber
= gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (CCmode
, CC_REGNUM
));
4758 emit_insn (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, op
, clobber
)));
4762 /* ??? There are more powerful versions of ICM that are not
4763 completely represented in the md file. */
4766 /* For z10, generate ROTATE THEN INSERT SELECTED BITS (RISBG et al). */
4767 if (TARGET_Z10
&& (mode
== DImode
|| mode
== SImode
))
4769 machine_mode mode_s
= GET_MODE (src
);
4771 if (mode_s
== VOIDmode
)
4773 /* Assume const_int etc already in the proper mode. */
4774 src
= force_reg (mode
, src
);
4776 else if (mode_s
!= mode
)
4778 gcc_assert (GET_MODE_BITSIZE (mode_s
) >= bitsize
);
4779 src
= force_reg (mode_s
, src
);
4780 src
= gen_lowpart (mode
, src
);
4783 op
= gen_rtx_ZERO_EXTRACT (mode
, dest
, op1
, op2
),
4784 op
= gen_rtx_SET (VOIDmode
, op
, src
);
4788 clobber
= gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (CCmode
, CC_REGNUM
));
4789 op
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, op
, clobber
));
4799 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic which returns a
4800 register that holds VAL of mode MODE shifted by COUNT bits. */
4803 s390_expand_mask_and_shift (rtx val
, machine_mode mode
, rtx count
)
4805 val
= expand_simple_binop (SImode
, AND
, val
, GEN_INT (GET_MODE_MASK (mode
)),
4806 NULL_RTX
, 1, OPTAB_DIRECT
);
4807 return expand_simple_binop (SImode
, ASHIFT
, val
, count
,
4808 NULL_RTX
, 1, OPTAB_DIRECT
);
4811 /* Structure to hold the initial parameters for a compare_and_swap operation
4812 in HImode and QImode. */
4814 struct alignment_context
4816 rtx memsi
; /* SI aligned memory location. */
4817 rtx shift
; /* Bit offset with regard to lsb. */
4818 rtx modemask
; /* Mask of the HQImode shifted by SHIFT bits. */
4819 rtx modemaski
; /* ~modemask */
4820 bool aligned
; /* True if memory is aligned, false else. */
4823 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic to initialize
4824 structure AC for transparent simplifying, if the memory alignment is known
4825 to be at least 32bit. MEM is the memory location for the actual operation
4826 and MODE its mode. */
4829 init_alignment_context (struct alignment_context
*ac
, rtx mem
,
4832 ac
->shift
= GEN_INT (GET_MODE_SIZE (SImode
) - GET_MODE_SIZE (mode
));
4833 ac
->aligned
= (MEM_ALIGN (mem
) >= GET_MODE_BITSIZE (SImode
));
4836 ac
->memsi
= adjust_address (mem
, SImode
, 0); /* Memory is aligned. */
4839 /* Alignment is unknown. */
4840 rtx byteoffset
, addr
, align
;
4842 /* Force the address into a register. */
4843 addr
= force_reg (Pmode
, XEXP (mem
, 0));
4845 /* Align it to SImode. */
4846 align
= expand_simple_binop (Pmode
, AND
, addr
,
4847 GEN_INT (-GET_MODE_SIZE (SImode
)),
4848 NULL_RTX
, 1, OPTAB_DIRECT
);
4850 ac
->memsi
= gen_rtx_MEM (SImode
, align
);
4851 MEM_VOLATILE_P (ac
->memsi
) = MEM_VOLATILE_P (mem
);
4852 set_mem_alias_set (ac
->memsi
, ALIAS_SET_MEMORY_BARRIER
);
4853 set_mem_align (ac
->memsi
, GET_MODE_BITSIZE (SImode
));
4855 /* Calculate shiftcount. */
4856 byteoffset
= expand_simple_binop (Pmode
, AND
, addr
,
4857 GEN_INT (GET_MODE_SIZE (SImode
) - 1),
4858 NULL_RTX
, 1, OPTAB_DIRECT
);
4859 /* As we already have some offset, evaluate the remaining distance. */
4860 ac
->shift
= expand_simple_binop (SImode
, MINUS
, ac
->shift
, byteoffset
,
4861 NULL_RTX
, 1, OPTAB_DIRECT
);
4864 /* Shift is the byte count, but we need the bitcount. */
4865 ac
->shift
= expand_simple_binop (SImode
, ASHIFT
, ac
->shift
, GEN_INT (3),
4866 NULL_RTX
, 1, OPTAB_DIRECT
);
4868 /* Calculate masks. */
4869 ac
->modemask
= expand_simple_binop (SImode
, ASHIFT
,
4870 GEN_INT (GET_MODE_MASK (mode
)),
4871 ac
->shift
, NULL_RTX
, 1, OPTAB_DIRECT
);
4872 ac
->modemaski
= expand_simple_unop (SImode
, NOT
, ac
->modemask
,
4876 /* A subroutine of s390_expand_cs_hqi. Insert INS into VAL. If possible,
4877 use a single insv insn into SEQ2. Otherwise, put prep insns in SEQ1 and
4878 perform the merge in SEQ2. */
4881 s390_two_part_insv (struct alignment_context
*ac
, rtx
*seq1
, rtx
*seq2
,
4882 machine_mode mode
, rtx val
, rtx ins
)
4889 tmp
= copy_to_mode_reg (SImode
, val
);
4890 if (s390_expand_insv (tmp
, GEN_INT (GET_MODE_BITSIZE (mode
)),
4894 *seq2
= get_insns ();
4901 /* Failed to use insv. Generate a two part shift and mask. */
4903 tmp
= s390_expand_mask_and_shift (ins
, mode
, ac
->shift
);
4904 *seq1
= get_insns ();
4908 tmp
= expand_simple_binop (SImode
, IOR
, tmp
, val
, NULL_RTX
, 1, OPTAB_DIRECT
);
4909 *seq2
= get_insns ();
4915 /* Expand an atomic compare and swap operation for HImode and QImode. MEM is
4916 the memory location, CMP the old value to compare MEM with and NEW_RTX the
4917 value to set if CMP == MEM. */
4920 s390_expand_cs_hqi (machine_mode mode
, rtx btarget
, rtx vtarget
, rtx mem
,
4921 rtx cmp
, rtx new_rtx
, bool is_weak
)
4923 struct alignment_context ac
;
4924 rtx cmpv
, newv
, val
, cc
, seq0
, seq1
, seq2
, seq3
;
4925 rtx res
= gen_reg_rtx (SImode
);
4926 rtx_code_label
*csloop
= NULL
, *csend
= NULL
;
4928 gcc_assert (MEM_P (mem
));
4930 init_alignment_context (&ac
, mem
, mode
);
4932 /* Load full word. Subsequent loads are performed by CS. */
4933 val
= expand_simple_binop (SImode
, AND
, ac
.memsi
, ac
.modemaski
,
4934 NULL_RTX
, 1, OPTAB_DIRECT
);
4936 /* Prepare insertions of cmp and new_rtx into the loaded value. When
4937 possible, we try to use insv to make this happen efficiently. If
4938 that fails we'll generate code both inside and outside the loop. */
4939 cmpv
= s390_two_part_insv (&ac
, &seq0
, &seq2
, mode
, val
, cmp
);
4940 newv
= s390_two_part_insv (&ac
, &seq1
, &seq3
, mode
, val
, new_rtx
);
4947 /* Start CS loop. */
4950 /* Begin assuming success. */
4951 emit_move_insn (btarget
, const1_rtx
);
4953 csloop
= gen_label_rtx ();
4954 csend
= gen_label_rtx ();
4955 emit_label (csloop
);
4958 /* val = "<mem>00..0<mem>"
4959 * cmp = "00..0<cmp>00..0"
4960 * new = "00..0<new>00..0"
4966 cc
= s390_emit_compare_and_swap (EQ
, res
, ac
.memsi
, cmpv
, newv
);
4968 emit_insn (gen_cstorecc4 (btarget
, cc
, XEXP (cc
, 0), XEXP (cc
, 1)));
4973 /* Jump to end if we're done (likely?). */
4974 s390_emit_jump (csend
, cc
);
4976 /* Check for changes outside mode, and loop internal if so.
4977 Arrange the moves so that the compare is adjacent to the
4978 branch so that we can generate CRJ. */
4979 tmp
= copy_to_reg (val
);
4980 force_expand_binop (SImode
, and_optab
, res
, ac
.modemaski
, val
,
4982 cc
= s390_emit_compare (NE
, val
, tmp
);
4983 s390_emit_jump (csloop
, cc
);
4986 emit_move_insn (btarget
, const0_rtx
);
4990 /* Return the correct part of the bitfield. */
4991 convert_move (vtarget
, expand_simple_binop (SImode
, LSHIFTRT
, res
, ac
.shift
,
4992 NULL_RTX
, 1, OPTAB_DIRECT
), 1);
4995 /* Expand an atomic operation CODE of mode MODE. MEM is the memory location
4996 and VAL the value to play with. If AFTER is true then store the value
4997 MEM holds after the operation, if AFTER is false then store the value MEM
4998 holds before the operation. If TARGET is zero then discard that value, else
4999 store it to TARGET. */
5002 s390_expand_atomic (machine_mode mode
, enum rtx_code code
,
5003 rtx target
, rtx mem
, rtx val
, bool after
)
5005 struct alignment_context ac
;
5007 rtx new_rtx
= gen_reg_rtx (SImode
);
5008 rtx orig
= gen_reg_rtx (SImode
);
5009 rtx_code_label
*csloop
= gen_label_rtx ();
5011 gcc_assert (!target
|| register_operand (target
, VOIDmode
));
5012 gcc_assert (MEM_P (mem
));
5014 init_alignment_context (&ac
, mem
, mode
);
5016 /* Shift val to the correct bit positions.
5017 Preserve "icm", but prevent "ex icm". */
5018 if (!(ac
.aligned
&& code
== SET
&& MEM_P (val
)))
5019 val
= s390_expand_mask_and_shift (val
, mode
, ac
.shift
);
5021 /* Further preparation insns. */
5022 if (code
== PLUS
|| code
== MINUS
)
5023 emit_move_insn (orig
, val
);
5024 else if (code
== MULT
|| code
== AND
) /* val = "11..1<val>11..1" */
5025 val
= expand_simple_binop (SImode
, XOR
, val
, ac
.modemaski
,
5026 NULL_RTX
, 1, OPTAB_DIRECT
);
5028 /* Load full word. Subsequent loads are performed by CS. */
5029 cmp
= force_reg (SImode
, ac
.memsi
);
5031 /* Start CS loop. */
5032 emit_label (csloop
);
5033 emit_move_insn (new_rtx
, cmp
);
5035 /* Patch new with val at correct position. */
5040 val
= expand_simple_binop (SImode
, code
, new_rtx
, orig
,
5041 NULL_RTX
, 1, OPTAB_DIRECT
);
5042 val
= expand_simple_binop (SImode
, AND
, val
, ac
.modemask
,
5043 NULL_RTX
, 1, OPTAB_DIRECT
);
5046 if (ac
.aligned
&& MEM_P (val
))
5047 store_bit_field (new_rtx
, GET_MODE_BITSIZE (mode
), 0,
5051 new_rtx
= expand_simple_binop (SImode
, AND
, new_rtx
, ac
.modemaski
,
5052 NULL_RTX
, 1, OPTAB_DIRECT
);
5053 new_rtx
= expand_simple_binop (SImode
, IOR
, new_rtx
, val
,
5054 NULL_RTX
, 1, OPTAB_DIRECT
);
5060 new_rtx
= expand_simple_binop (SImode
, code
, new_rtx
, val
,
5061 NULL_RTX
, 1, OPTAB_DIRECT
);
5063 case MULT
: /* NAND */
5064 new_rtx
= expand_simple_binop (SImode
, AND
, new_rtx
, val
,
5065 NULL_RTX
, 1, OPTAB_DIRECT
);
5066 new_rtx
= expand_simple_binop (SImode
, XOR
, new_rtx
, ac
.modemask
,
5067 NULL_RTX
, 1, OPTAB_DIRECT
);
5073 s390_emit_jump (csloop
, s390_emit_compare_and_swap (NE
, cmp
,
5074 ac
.memsi
, cmp
, new_rtx
));
5076 /* Return the correct part of the bitfield. */
5078 convert_move (target
, expand_simple_binop (SImode
, LSHIFTRT
,
5079 after
? new_rtx
: cmp
, ac
.shift
,
5080 NULL_RTX
, 1, OPTAB_DIRECT
), 1);
5083 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
5084 We need to emit DTP-relative relocations. */
5086 static void s390_output_dwarf_dtprel (FILE *, int, rtx
) ATTRIBUTE_UNUSED
;
5089 s390_output_dwarf_dtprel (FILE *file
, int size
, rtx x
)
5094 fputs ("\t.long\t", file
);
5097 fputs ("\t.quad\t", file
);
5102 output_addr_const (file
, x
);
5103 fputs ("@DTPOFF", file
);
5106 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
5107 /* Implement TARGET_MANGLE_TYPE. */
5110 s390_mangle_type (const_tree type
)
5112 if (TYPE_MAIN_VARIANT (type
) == long_double_type_node
5113 && TARGET_LONG_DOUBLE_128
)
5116 /* For all other types, use normal C++ mangling. */
5121 /* In the name of slightly smaller debug output, and to cater to
5122 general assembler lossage, recognize various UNSPEC sequences
5123 and turn them back into a direct symbol reference. */
5126 s390_delegitimize_address (rtx orig_x
)
5130 orig_x
= delegitimize_mem_from_attrs (orig_x
);
5133 /* Extract the symbol ref from:
5134 (plus:SI (reg:SI 12 %r12)
5135 (const:SI (unspec:SI [(symbol_ref/f:SI ("*.LC0"))]
5136 UNSPEC_GOTOFF/PLTOFF)))
5138 (plus:SI (reg:SI 12 %r12)
5139 (const:SI (plus:SI (unspec:SI [(symbol_ref:SI ("L"))]
5140 UNSPEC_GOTOFF/PLTOFF)
5141 (const_int 4 [0x4])))) */
5142 if (GET_CODE (x
) == PLUS
5143 && REG_P (XEXP (x
, 0))
5144 && REGNO (XEXP (x
, 0)) == PIC_OFFSET_TABLE_REGNUM
5145 && GET_CODE (XEXP (x
, 1)) == CONST
)
5147 HOST_WIDE_INT offset
= 0;
5149 /* The const operand. */
5150 y
= XEXP (XEXP (x
, 1), 0);
5152 if (GET_CODE (y
) == PLUS
5153 && GET_CODE (XEXP (y
, 1)) == CONST_INT
)
5155 offset
= INTVAL (XEXP (y
, 1));
5159 if (GET_CODE (y
) == UNSPEC
5160 && (XINT (y
, 1) == UNSPEC_GOTOFF
5161 || XINT (y
, 1) == UNSPEC_PLTOFF
))
5162 return plus_constant (Pmode
, XVECEXP (y
, 0, 0), offset
);
5165 if (GET_CODE (x
) != MEM
)
5169 if (GET_CODE (x
) == PLUS
5170 && GET_CODE (XEXP (x
, 1)) == CONST
5171 && GET_CODE (XEXP (x
, 0)) == REG
5172 && REGNO (XEXP (x
, 0)) == PIC_OFFSET_TABLE_REGNUM
)
5174 y
= XEXP (XEXP (x
, 1), 0);
5175 if (GET_CODE (y
) == UNSPEC
5176 && XINT (y
, 1) == UNSPEC_GOT
)
5177 y
= XVECEXP (y
, 0, 0);
5181 else if (GET_CODE (x
) == CONST
)
5183 /* Extract the symbol ref from:
5184 (mem:QI (const:DI (unspec:DI [(symbol_ref:DI ("foo"))]
5185 UNSPEC_PLT/GOTENT))) */
5188 if (GET_CODE (y
) == UNSPEC
5189 && (XINT (y
, 1) == UNSPEC_GOTENT
5190 || XINT (y
, 1) == UNSPEC_PLT
))
5191 y
= XVECEXP (y
, 0, 0);
5198 if (GET_MODE (orig_x
) != Pmode
)
5200 if (GET_MODE (orig_x
) == BLKmode
)
5202 y
= lowpart_subreg (GET_MODE (orig_x
), y
, Pmode
);
5209 /* Output operand OP to stdio stream FILE.
5210 OP is an address (register + offset) which is not used to address data;
5211 instead the rightmost bits are interpreted as the value. */
5214 print_shift_count_operand (FILE *file
, rtx op
)
5216 HOST_WIDE_INT offset
;
5219 /* Extract base register and offset. */
5220 if (!s390_decompose_shift_count (op
, &base
, &offset
))
5226 gcc_assert (GET_CODE (base
) == REG
);
5227 gcc_assert (REGNO (base
) < FIRST_PSEUDO_REGISTER
);
5228 gcc_assert (REGNO_REG_CLASS (REGNO (base
)) == ADDR_REGS
);
5231 /* Offsets are constricted to twelve bits. */
5232 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, offset
& ((1 << 12) - 1));
5234 fprintf (file
, "(%s)", reg_names
[REGNO (base
)]);
5237 /* Returns false if the function should not be made hotpatchable.
5238 Otherwise it assigns the number of NOP halfwords to be emitted
5239 before and after the function label to hw_before and hw_after.
5240 Both must not be NULL. */
5243 s390_function_num_hotpatch_hw (tree decl
,
5252 attr
= lookup_attribute ("hotpatch", DECL_ATTRIBUTES (decl
));
5254 /* Handle the arguments of the hotpatch attribute. The values
5255 specified via attribute might override the cmdline argument
5259 tree args
= TREE_VALUE (attr
);
5261 *hw_before
= TREE_INT_CST_LOW (TREE_VALUE (args
));
5262 *hw_after
= TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (args
)));
5266 /* Use the values specified by the cmdline arguments. */
5267 *hw_before
= s390_hotpatch_hw_before_label
;
5268 *hw_after
= s390_hotpatch_hw_after_label
;
5271 if (*hw_before
== 0 && *hw_after
== 0)
5274 if (decl_function_context (decl
) != NULL_TREE
)
5276 warning_at (DECL_SOURCE_LOCATION (decl
), OPT_mhotpatch_
,
5277 "hotpatching is not compatible with nested functions");
5284 /* Write the extra assembler code needed to declare a function properly. */
5287 s390_asm_output_function_label (FILE *asm_out_file
, const char *fname
,
5290 int hw_before
, hw_after
;
5291 bool hotpatch_p
= (decl
5292 ? s390_function_num_hotpatch_hw (decl
,
5293 &hw_before
, &hw_after
)
5300 /* Add a trampoline code area before the function label and initialize it
5301 with two-byte nop instructions. This area can be overwritten with code
5302 that jumps to a patched version of the function. */
5303 for (i
= 0; i
< hw_before
; i
++)
5304 asm_fprintf (asm_out_file
, "\tnopr\t%%r7\n");
5305 /* Note: The function label must be aligned so that (a) the bytes of the
5306 following nop do not cross a cacheline boundary, and (b) a jump address
5307 (eight bytes for 64 bit targets, 4 bytes for 32 bit targets) can be
5308 stored directly before the label without crossing a cacheline
5309 boundary. All this is necessary to make sure the trampoline code can
5310 be changed atomically. */
5313 ASM_OUTPUT_LABEL (asm_out_file
, fname
);
5315 /* Output a series of NOPs after the function label. */
5318 while (hw_after
> 0)
5320 if (hw_after
>= 3 && TARGET_CPU_ZARCH
)
5322 asm_fprintf (asm_out_file
, "\tbrcl\t\t0,0\n");
5325 else if (hw_after
>= 2)
5327 gcc_assert (hw_after
== 2 || !TARGET_CPU_ZARCH
);
5328 asm_fprintf (asm_out_file
, "\tnop\t0\n");
5333 gcc_assert (hw_after
== 1);
5334 asm_fprintf (asm_out_file
, "\tnopr\t%%r7\n");
5341 /* Output machine-dependent UNSPECs occurring in address constant X
5342 in assembler syntax to stdio stream FILE. Returns true if the
5343 constant X could be recognized, false otherwise. */
5346 s390_output_addr_const_extra (FILE *file
, rtx x
)
5348 if (GET_CODE (x
) == UNSPEC
&& XVECLEN (x
, 0) == 1)
5349 switch (XINT (x
, 1))
5352 output_addr_const (file
, XVECEXP (x
, 0, 0));
5353 fprintf (file
, "@GOTENT");
5356 output_addr_const (file
, XVECEXP (x
, 0, 0));
5357 fprintf (file
, "@GOT");
5360 output_addr_const (file
, XVECEXP (x
, 0, 0));
5361 fprintf (file
, "@GOTOFF");
5364 output_addr_const (file
, XVECEXP (x
, 0, 0));
5365 fprintf (file
, "@PLT");
5368 output_addr_const (file
, XVECEXP (x
, 0, 0));
5369 fprintf (file
, "@PLTOFF");
5372 output_addr_const (file
, XVECEXP (x
, 0, 0));
5373 fprintf (file
, "@TLSGD");
5376 assemble_name (file
, get_some_local_dynamic_name ());
5377 fprintf (file
, "@TLSLDM");
5380 output_addr_const (file
, XVECEXP (x
, 0, 0));
5381 fprintf (file
, "@DTPOFF");
5384 output_addr_const (file
, XVECEXP (x
, 0, 0));
5385 fprintf (file
, "@NTPOFF");
5387 case UNSPEC_GOTNTPOFF
:
5388 output_addr_const (file
, XVECEXP (x
, 0, 0));
5389 fprintf (file
, "@GOTNTPOFF");
5391 case UNSPEC_INDNTPOFF
:
5392 output_addr_const (file
, XVECEXP (x
, 0, 0));
5393 fprintf (file
, "@INDNTPOFF");
5397 if (GET_CODE (x
) == UNSPEC
&& XVECLEN (x
, 0) == 2)
5398 switch (XINT (x
, 1))
5400 case UNSPEC_POOL_OFFSET
:
5401 x
= gen_rtx_MINUS (GET_MODE (x
), XVECEXP (x
, 0, 0), XVECEXP (x
, 0, 1));
5402 output_addr_const (file
, x
);
5408 /* Output address operand ADDR in assembler syntax to
5409 stdio stream FILE. */
5412 print_operand_address (FILE *file
, rtx addr
)
5414 struct s390_address ad
;
5416 if (s390_loadrelative_operand_p (addr
, NULL
, NULL
))
5420 output_operand_lossage ("symbolic memory references are "
5421 "only supported on z10 or later");
5424 output_addr_const (file
, addr
);
5428 if (!s390_decompose_address (addr
, &ad
)
5429 || (ad
.base
&& !REGNO_OK_FOR_BASE_P (REGNO (ad
.base
)))
5430 || (ad
.indx
&& !REGNO_OK_FOR_INDEX_P (REGNO (ad
.indx
))))
5431 output_operand_lossage ("cannot decompose address");
5434 output_addr_const (file
, ad
.disp
);
5436 fprintf (file
, "0");
5438 if (ad
.base
&& ad
.indx
)
5439 fprintf (file
, "(%s,%s)", reg_names
[REGNO (ad
.indx
)],
5440 reg_names
[REGNO (ad
.base
)]);
5442 fprintf (file
, "(%s)", reg_names
[REGNO (ad
.base
)]);
5445 /* Output operand X in assembler syntax to stdio stream FILE.
5446 CODE specified the format flag. The following format flags
5449 'C': print opcode suffix for branch condition.
5450 'D': print opcode suffix for inverse branch condition.
5451 'E': print opcode suffix for branch on index instruction.
5452 'G': print the size of the operand in bytes.
5453 'J': print tls_load/tls_gdcall/tls_ldcall suffix
5454 'M': print the second word of a TImode operand.
5455 'N': print the second word of a DImode operand.
5456 'O': print only the displacement of a memory reference.
5457 'R': print only the base register of a memory reference.
5458 'S': print S-type memory reference (base+displacement).
5459 'Y': print shift count operand.
5461 'b': print integer X as if it's an unsigned byte.
5462 'c': print integer X as if it's an signed byte.
5463 'e': "end" of DImode contiguous bitmask X.
5464 'f': "end" of SImode contiguous bitmask X.
5465 'h': print integer X as if it's a signed halfword.
5466 'i': print the first nonzero HImode part of X.
5467 'j': print the first HImode part unequal to -1 of X.
5468 'k': print the first nonzero SImode part of X.
5469 'm': print the first SImode part unequal to -1 of X.
5470 'o': print integer X as if it's an unsigned 32bit word.
5471 's': "start" of DImode contiguous bitmask X.
5472 't': "start" of SImode contiguous bitmask X.
5473 'x': print integer X as if it's an unsigned halfword.
5477 print_operand (FILE *file
, rtx x
, int code
)
5484 fprintf (file
, s390_branch_condition_mnemonic (x
, FALSE
));
5488 fprintf (file
, s390_branch_condition_mnemonic (x
, TRUE
));
5492 if (GET_CODE (x
) == LE
)
5493 fprintf (file
, "l");
5494 else if (GET_CODE (x
) == GT
)
5495 fprintf (file
, "h");
5497 output_operand_lossage ("invalid comparison operator "
5498 "for 'E' output modifier");
5502 if (GET_CODE (x
) == SYMBOL_REF
)
5504 fprintf (file
, "%s", ":tls_load:");
5505 output_addr_const (file
, x
);
5507 else if (GET_CODE (x
) == UNSPEC
&& XINT (x
, 1) == UNSPEC_TLSGD
)
5509 fprintf (file
, "%s", ":tls_gdcall:");
5510 output_addr_const (file
, XVECEXP (x
, 0, 0));
5512 else if (GET_CODE (x
) == UNSPEC
&& XINT (x
, 1) == UNSPEC_TLSLDM
)
5514 fprintf (file
, "%s", ":tls_ldcall:");
5515 const char *name
= get_some_local_dynamic_name ();
5517 assemble_name (file
, name
);
5520 output_operand_lossage ("invalid reference for 'J' output modifier");
5524 fprintf (file
, "%u", GET_MODE_SIZE (GET_MODE (x
)));
5529 struct s390_address ad
;
5534 output_operand_lossage ("memory reference expected for "
5535 "'O' output modifier");
5539 ret
= s390_decompose_address (XEXP (x
, 0), &ad
);
5542 || (ad
.base
&& !REGNO_OK_FOR_BASE_P (REGNO (ad
.base
)))
5545 output_operand_lossage ("invalid address for 'O' output modifier");
5550 output_addr_const (file
, ad
.disp
);
5552 fprintf (file
, "0");
5558 struct s390_address ad
;
5563 output_operand_lossage ("memory reference expected for "
5564 "'R' output modifier");
5568 ret
= s390_decompose_address (XEXP (x
, 0), &ad
);
5571 || (ad
.base
&& !REGNO_OK_FOR_BASE_P (REGNO (ad
.base
)))
5574 output_operand_lossage ("invalid address for 'R' output modifier");
5579 fprintf (file
, "%s", reg_names
[REGNO (ad
.base
)]);
5581 fprintf (file
, "0");
5587 struct s390_address ad
;
5592 output_operand_lossage ("memory reference expected for "
5593 "'S' output modifier");
5596 ret
= s390_decompose_address (XEXP (x
, 0), &ad
);
5599 || (ad
.base
&& !REGNO_OK_FOR_BASE_P (REGNO (ad
.base
)))
5602 output_operand_lossage ("invalid address for 'S' output modifier");
5607 output_addr_const (file
, ad
.disp
);
5609 fprintf (file
, "0");
5612 fprintf (file
, "(%s)", reg_names
[REGNO (ad
.base
)]);
5617 if (GET_CODE (x
) == REG
)
5618 x
= gen_rtx_REG (GET_MODE (x
), REGNO (x
) + 1);
5619 else if (GET_CODE (x
) == MEM
)
5620 x
= change_address (x
, VOIDmode
,
5621 plus_constant (Pmode
, XEXP (x
, 0), 4));
5623 output_operand_lossage ("register or memory expression expected "
5624 "for 'N' output modifier");
5628 if (GET_CODE (x
) == REG
)
5629 x
= gen_rtx_REG (GET_MODE (x
), REGNO (x
) + 1);
5630 else if (GET_CODE (x
) == MEM
)
5631 x
= change_address (x
, VOIDmode
,
5632 plus_constant (Pmode
, XEXP (x
, 0), 8));
5634 output_operand_lossage ("register or memory expression expected "
5635 "for 'M' output modifier");
5639 print_shift_count_operand (file
, x
);
5643 switch (GET_CODE (x
))
5646 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
5650 output_address (XEXP (x
, 0));
5657 output_addr_const (file
, x
);
5670 ival
= ((ival
& 0xff) ^ 0x80) - 0x80;
5676 ival
= ((ival
& 0xffff) ^ 0x8000) - 0x8000;
5679 ival
= s390_extract_part (x
, HImode
, 0);
5682 ival
= s390_extract_part (x
, HImode
, -1);
5685 ival
= s390_extract_part (x
, SImode
, 0);
5688 ival
= s390_extract_part (x
, SImode
, -1);
5699 len
= (code
== 's' || code
== 'e' ? 64 : 32);
5700 ok
= s390_contiguous_bitmask_p (ival
, len
, &pos
, &len
);
5702 if (code
== 's' || code
== 't')
5703 ival
= 64 - pos
- len
;
5705 ival
= 64 - 1 - pos
;
5709 output_operand_lossage ("invalid constant for output modifier '%c'", code
);
5711 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, ival
);
5715 gcc_assert (GET_MODE (x
) == VOIDmode
);
5717 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, CONST_DOUBLE_LOW (x
) & 0xff);
5718 else if (code
== 'x')
5719 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, CONST_DOUBLE_LOW (x
) & 0xffff);
5720 else if (code
== 'h')
5721 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
,
5722 ((CONST_DOUBLE_LOW (x
) & 0xffff) ^ 0x8000) - 0x8000);
5726 output_operand_lossage ("invalid constant - try using "
5727 "an output modifier");
5729 output_operand_lossage ("invalid constant for output modifier '%c'",
5736 output_operand_lossage ("invalid expression - try using "
5737 "an output modifier");
5739 output_operand_lossage ("invalid expression for output "
5740 "modifier '%c'", code
);
5745 /* Target hook for assembling integer objects. We need to define it
5746 here to work a round a bug in some versions of GAS, which couldn't
5747 handle values smaller than INT_MIN when printed in decimal. */
5750 s390_assemble_integer (rtx x
, unsigned int size
, int aligned_p
)
5752 if (size
== 8 && aligned_p
5753 && GET_CODE (x
) == CONST_INT
&& INTVAL (x
) < INT_MIN
)
5755 fprintf (asm_out_file
, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX
"\n",
5759 return default_assemble_integer (x
, size
, aligned_p
);
5762 /* Returns true if register REGNO is used for forming
5763 a memory address in expression X. */
5766 reg_used_in_mem_p (int regno
, rtx x
)
5768 enum rtx_code code
= GET_CODE (x
);
5774 if (refers_to_regno_p (regno
, XEXP (x
, 0)))
5777 else if (code
== SET
5778 && GET_CODE (SET_DEST (x
)) == PC
)
5780 if (refers_to_regno_p (regno
, SET_SRC (x
)))
5784 fmt
= GET_RTX_FORMAT (code
);
5785 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
5788 && reg_used_in_mem_p (regno
, XEXP (x
, i
)))
5791 else if (fmt
[i
] == 'E')
5792 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
5793 if (reg_used_in_mem_p (regno
, XVECEXP (x
, i
, j
)))
5799 /* Returns true if expression DEP_RTX sets an address register
5800 used by instruction INSN to address memory. */
5803 addr_generation_dependency_p (rtx dep_rtx
, rtx_insn
*insn
)
5807 if (NONJUMP_INSN_P (dep_rtx
))
5808 dep_rtx
= PATTERN (dep_rtx
);
5810 if (GET_CODE (dep_rtx
) == SET
)
5812 target
= SET_DEST (dep_rtx
);
5813 if (GET_CODE (target
) == STRICT_LOW_PART
)
5814 target
= XEXP (target
, 0);
5815 while (GET_CODE (target
) == SUBREG
)
5816 target
= SUBREG_REG (target
);
5818 if (GET_CODE (target
) == REG
)
5820 int regno
= REGNO (target
);
5822 if (s390_safe_attr_type (insn
) == TYPE_LA
)
5824 pat
= PATTERN (insn
);
5825 if (GET_CODE (pat
) == PARALLEL
)
5827 gcc_assert (XVECLEN (pat
, 0) == 2);
5828 pat
= XVECEXP (pat
, 0, 0);
5830 gcc_assert (GET_CODE (pat
) == SET
);
5831 return refers_to_regno_p (regno
, SET_SRC (pat
));
5833 else if (get_attr_atype (insn
) == ATYPE_AGEN
)
5834 return reg_used_in_mem_p (regno
, PATTERN (insn
));
5840 /* Return 1, if dep_insn sets register used in insn in the agen unit. */
5843 s390_agen_dep_p (rtx_insn
*dep_insn
, rtx_insn
*insn
)
5845 rtx dep_rtx
= PATTERN (dep_insn
);
5848 if (GET_CODE (dep_rtx
) == SET
5849 && addr_generation_dependency_p (dep_rtx
, insn
))
5851 else if (GET_CODE (dep_rtx
) == PARALLEL
)
5853 for (i
= 0; i
< XVECLEN (dep_rtx
, 0); i
++)
5855 if (addr_generation_dependency_p (XVECEXP (dep_rtx
, 0, i
), insn
))
5863 /* A C statement (sans semicolon) to update the integer scheduling priority
5864 INSN_PRIORITY (INSN). Increase the priority to execute the INSN earlier,
5865 reduce the priority to execute INSN later. Do not define this macro if
5866 you do not need to adjust the scheduling priorities of insns.
5868 A STD instruction should be scheduled earlier,
5869 in order to use the bypass. */
5871 s390_adjust_priority (rtx_insn
*insn
, int priority
)
5873 if (! INSN_P (insn
))
5876 if (s390_tune
!= PROCESSOR_2084_Z990
5877 && s390_tune
!= PROCESSOR_2094_Z9_109
5878 && s390_tune
!= PROCESSOR_2097_Z10
5879 && s390_tune
!= PROCESSOR_2817_Z196
5880 && s390_tune
!= PROCESSOR_2827_ZEC12
)
5883 switch (s390_safe_attr_type (insn
))
5887 priority
= priority
<< 3;
5891 priority
= priority
<< 1;
5900 /* The number of instructions that can be issued per cycle. */
5903 s390_issue_rate (void)
5907 case PROCESSOR_2084_Z990
:
5908 case PROCESSOR_2094_Z9_109
:
5909 case PROCESSOR_2817_Z196
:
5911 case PROCESSOR_2097_Z10
:
5912 case PROCESSOR_2827_ZEC12
:
5920 s390_first_cycle_multipass_dfa_lookahead (void)
5925 /* Annotate every literal pool reference in X by an UNSPEC_LTREF expression.
5926 Fix up MEMs as required. */
5929 annotate_constant_pool_refs (rtx
*x
)
5934 gcc_assert (GET_CODE (*x
) != SYMBOL_REF
5935 || !CONSTANT_POOL_ADDRESS_P (*x
));
5937 /* Literal pool references can only occur inside a MEM ... */
5938 if (GET_CODE (*x
) == MEM
)
5940 rtx memref
= XEXP (*x
, 0);
5942 if (GET_CODE (memref
) == SYMBOL_REF
5943 && CONSTANT_POOL_ADDRESS_P (memref
))
5945 rtx base
= cfun
->machine
->base_reg
;
5946 rtx addr
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (2, memref
, base
),
5949 *x
= replace_equiv_address (*x
, addr
);
5953 if (GET_CODE (memref
) == CONST
5954 && GET_CODE (XEXP (memref
, 0)) == PLUS
5955 && GET_CODE (XEXP (XEXP (memref
, 0), 1)) == CONST_INT
5956 && GET_CODE (XEXP (XEXP (memref
, 0), 0)) == SYMBOL_REF
5957 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (memref
, 0), 0)))
5959 HOST_WIDE_INT off
= INTVAL (XEXP (XEXP (memref
, 0), 1));
5960 rtx sym
= XEXP (XEXP (memref
, 0), 0);
5961 rtx base
= cfun
->machine
->base_reg
;
5962 rtx addr
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (2, sym
, base
),
5965 *x
= replace_equiv_address (*x
, plus_constant (Pmode
, addr
, off
));
5970 /* ... or a load-address type pattern. */
5971 if (GET_CODE (*x
) == SET
)
5973 rtx addrref
= SET_SRC (*x
);
5975 if (GET_CODE (addrref
) == SYMBOL_REF
5976 && CONSTANT_POOL_ADDRESS_P (addrref
))
5978 rtx base
= cfun
->machine
->base_reg
;
5979 rtx addr
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (2, addrref
, base
),
5982 SET_SRC (*x
) = addr
;
5986 if (GET_CODE (addrref
) == CONST
5987 && GET_CODE (XEXP (addrref
, 0)) == PLUS
5988 && GET_CODE (XEXP (XEXP (addrref
, 0), 1)) == CONST_INT
5989 && GET_CODE (XEXP (XEXP (addrref
, 0), 0)) == SYMBOL_REF
5990 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (addrref
, 0), 0)))
5992 HOST_WIDE_INT off
= INTVAL (XEXP (XEXP (addrref
, 0), 1));
5993 rtx sym
= XEXP (XEXP (addrref
, 0), 0);
5994 rtx base
= cfun
->machine
->base_reg
;
5995 rtx addr
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (2, sym
, base
),
5998 SET_SRC (*x
) = plus_constant (Pmode
, addr
, off
);
6003 /* Annotate LTREL_BASE as well. */
6004 if (GET_CODE (*x
) == UNSPEC
6005 && XINT (*x
, 1) == UNSPEC_LTREL_BASE
)
6007 rtx base
= cfun
->machine
->base_reg
;
6008 *x
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (2, XVECEXP (*x
, 0, 0), base
),
6013 fmt
= GET_RTX_FORMAT (GET_CODE (*x
));
6014 for (i
= GET_RTX_LENGTH (GET_CODE (*x
)) - 1; i
>= 0; i
--)
6018 annotate_constant_pool_refs (&XEXP (*x
, i
));
6020 else if (fmt
[i
] == 'E')
6022 for (j
= 0; j
< XVECLEN (*x
, i
); j
++)
6023 annotate_constant_pool_refs (&XVECEXP (*x
, i
, j
));
6028 /* Split all branches that exceed the maximum distance.
6029 Returns true if this created a new literal pool entry. */
6032 s390_split_branches (void)
6034 rtx temp_reg
= gen_rtx_REG (Pmode
, RETURN_REGNUM
);
6035 int new_literal
= 0, ret
;
6040 /* We need correct insn addresses. */
6042 shorten_branches (get_insns ());
6044 /* Find all branches that exceed 64KB, and split them. */
6046 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
6048 if (! JUMP_P (insn
) || tablejump_p (insn
, NULL
, NULL
))
6051 pat
= PATTERN (insn
);
6052 if (GET_CODE (pat
) == PARALLEL
)
6053 pat
= XVECEXP (pat
, 0, 0);
6054 if (GET_CODE (pat
) != SET
|| SET_DEST (pat
) != pc_rtx
)
6057 if (GET_CODE (SET_SRC (pat
)) == LABEL_REF
)
6059 label
= &SET_SRC (pat
);
6061 else if (GET_CODE (SET_SRC (pat
)) == IF_THEN_ELSE
)
6063 if (GET_CODE (XEXP (SET_SRC (pat
), 1)) == LABEL_REF
)
6064 label
= &XEXP (SET_SRC (pat
), 1);
6065 else if (GET_CODE (XEXP (SET_SRC (pat
), 2)) == LABEL_REF
)
6066 label
= &XEXP (SET_SRC (pat
), 2);
6073 if (get_attr_length (insn
) <= 4)
6076 /* We are going to use the return register as scratch register,
6077 make sure it will be saved/restored by the prologue/epilogue. */
6078 cfun_frame_layout
.save_return_addr_p
= 1;
6083 rtx mem
= force_const_mem (Pmode
, *label
);
6084 rtx_insn
*set_insn
= emit_insn_before (gen_rtx_SET (Pmode
, temp_reg
, mem
), insn
);
6085 INSN_ADDRESSES_NEW (set_insn
, -1);
6086 annotate_constant_pool_refs (&PATTERN (set_insn
));
6093 target
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, *label
),
6094 UNSPEC_LTREL_OFFSET
);
6095 target
= gen_rtx_CONST (Pmode
, target
);
6096 target
= force_const_mem (Pmode
, target
);
6097 rtx_insn
*set_insn
= emit_insn_before (gen_rtx_SET (Pmode
, temp_reg
, target
), insn
);
6098 INSN_ADDRESSES_NEW (set_insn
, -1);
6099 annotate_constant_pool_refs (&PATTERN (set_insn
));
6101 target
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (2, XEXP (target
, 0),
6102 cfun
->machine
->base_reg
),
6104 target
= gen_rtx_PLUS (Pmode
, temp_reg
, target
);
6107 ret
= validate_change (insn
, label
, target
, 0);
6115 /* Find an annotated literal pool symbol referenced in RTX X,
6116 and store it at REF. Will abort if X contains references to
6117 more than one such pool symbol; multiple references to the same
6118 symbol are allowed, however.
6120 The rtx pointed to by REF must be initialized to NULL_RTX
6121 by the caller before calling this routine. */
6124 find_constant_pool_ref (rtx x
, rtx
*ref
)
6129 /* Ignore LTREL_BASE references. */
6130 if (GET_CODE (x
) == UNSPEC
6131 && XINT (x
, 1) == UNSPEC_LTREL_BASE
)
6133 /* Likewise POOL_ENTRY insns. */
6134 if (GET_CODE (x
) == UNSPEC_VOLATILE
6135 && XINT (x
, 1) == UNSPECV_POOL_ENTRY
)
6138 gcc_assert (GET_CODE (x
) != SYMBOL_REF
6139 || !CONSTANT_POOL_ADDRESS_P (x
));
6141 if (GET_CODE (x
) == UNSPEC
&& XINT (x
, 1) == UNSPEC_LTREF
)
6143 rtx sym
= XVECEXP (x
, 0, 0);
6144 gcc_assert (GET_CODE (sym
) == SYMBOL_REF
6145 && CONSTANT_POOL_ADDRESS_P (sym
));
6147 if (*ref
== NULL_RTX
)
6150 gcc_assert (*ref
== sym
);
6155 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
6156 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
6160 find_constant_pool_ref (XEXP (x
, i
), ref
);
6162 else if (fmt
[i
] == 'E')
6164 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
6165 find_constant_pool_ref (XVECEXP (x
, i
, j
), ref
);
6170 /* Replace every reference to the annotated literal pool
6171 symbol REF in X by its base plus OFFSET. */
6174 replace_constant_pool_ref (rtx
*x
, rtx ref
, rtx offset
)
6179 gcc_assert (*x
!= ref
);
6181 if (GET_CODE (*x
) == UNSPEC
6182 && XINT (*x
, 1) == UNSPEC_LTREF
6183 && XVECEXP (*x
, 0, 0) == ref
)
6185 *x
= gen_rtx_PLUS (Pmode
, XVECEXP (*x
, 0, 1), offset
);
6189 if (GET_CODE (*x
) == PLUS
6190 && GET_CODE (XEXP (*x
, 1)) == CONST_INT
6191 && GET_CODE (XEXP (*x
, 0)) == UNSPEC
6192 && XINT (XEXP (*x
, 0), 1) == UNSPEC_LTREF
6193 && XVECEXP (XEXP (*x
, 0), 0, 0) == ref
)
6195 rtx addr
= gen_rtx_PLUS (Pmode
, XVECEXP (XEXP (*x
, 0), 0, 1), offset
);
6196 *x
= plus_constant (Pmode
, addr
, INTVAL (XEXP (*x
, 1)));
6200 fmt
= GET_RTX_FORMAT (GET_CODE (*x
));
6201 for (i
= GET_RTX_LENGTH (GET_CODE (*x
)) - 1; i
>= 0; i
--)
6205 replace_constant_pool_ref (&XEXP (*x
, i
), ref
, offset
);
6207 else if (fmt
[i
] == 'E')
6209 for (j
= 0; j
< XVECLEN (*x
, i
); j
++)
6210 replace_constant_pool_ref (&XVECEXP (*x
, i
, j
), ref
, offset
);
6215 /* Check whether X contains an UNSPEC_LTREL_BASE.
6216 Return its constant pool symbol if found, NULL_RTX otherwise. */
6219 find_ltrel_base (rtx x
)
6224 if (GET_CODE (x
) == UNSPEC
6225 && XINT (x
, 1) == UNSPEC_LTREL_BASE
)
6226 return XVECEXP (x
, 0, 0);
6228 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
6229 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
6233 rtx fnd
= find_ltrel_base (XEXP (x
, i
));
6237 else if (fmt
[i
] == 'E')
6239 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
6241 rtx fnd
= find_ltrel_base (XVECEXP (x
, i
, j
));
6251 /* Replace any occurrence of UNSPEC_LTREL_BASE in X with its base. */
6254 replace_ltrel_base (rtx
*x
)
6259 if (GET_CODE (*x
) == UNSPEC
6260 && XINT (*x
, 1) == UNSPEC_LTREL_BASE
)
6262 *x
= XVECEXP (*x
, 0, 1);
6266 fmt
= GET_RTX_FORMAT (GET_CODE (*x
));
6267 for (i
= GET_RTX_LENGTH (GET_CODE (*x
)) - 1; i
>= 0; i
--)
6271 replace_ltrel_base (&XEXP (*x
, i
));
6273 else if (fmt
[i
] == 'E')
6275 for (j
= 0; j
< XVECLEN (*x
, i
); j
++)
6276 replace_ltrel_base (&XVECEXP (*x
, i
, j
));
6282 /* We keep a list of constants which we have to add to internal
6283 constant tables in the middle of large functions. */
6285 #define NR_C_MODES 11
6286 machine_mode constant_modes
[NR_C_MODES
] =
6288 TFmode
, TImode
, TDmode
,
6289 DFmode
, DImode
, DDmode
,
6290 SFmode
, SImode
, SDmode
,
6297 struct constant
*next
;
6299 rtx_code_label
*label
;
6302 struct constant_pool
6304 struct constant_pool
*next
;
6305 rtx_insn
*first_insn
;
6306 rtx_insn
*pool_insn
;
6308 rtx_insn
*emit_pool_after
;
6310 struct constant
*constants
[NR_C_MODES
];
6311 struct constant
*execute
;
6312 rtx_code_label
*label
;
6316 /* Allocate new constant_pool structure. */
6318 static struct constant_pool
*
6319 s390_alloc_pool (void)
6321 struct constant_pool
*pool
;
6324 pool
= (struct constant_pool
*) xmalloc (sizeof *pool
);
6326 for (i
= 0; i
< NR_C_MODES
; i
++)
6327 pool
->constants
[i
] = NULL
;
6329 pool
->execute
= NULL
;
6330 pool
->label
= gen_label_rtx ();
6331 pool
->first_insn
= NULL
;
6332 pool
->pool_insn
= NULL
;
6333 pool
->insns
= BITMAP_ALLOC (NULL
);
6335 pool
->emit_pool_after
= NULL
;
6340 /* Create new constant pool covering instructions starting at INSN
6341 and chain it to the end of POOL_LIST. */
6343 static struct constant_pool
*
6344 s390_start_pool (struct constant_pool
**pool_list
, rtx_insn
*insn
)
6346 struct constant_pool
*pool
, **prev
;
6348 pool
= s390_alloc_pool ();
6349 pool
->first_insn
= insn
;
6351 for (prev
= pool_list
; *prev
; prev
= &(*prev
)->next
)
6358 /* End range of instructions covered by POOL at INSN and emit
6359 placeholder insn representing the pool. */
6362 s390_end_pool (struct constant_pool
*pool
, rtx_insn
*insn
)
6364 rtx pool_size
= GEN_INT (pool
->size
+ 8 /* alignment slop */);
6367 insn
= get_last_insn ();
6369 pool
->pool_insn
= emit_insn_after (gen_pool (pool_size
), insn
);
6370 INSN_ADDRESSES_NEW (pool
->pool_insn
, -1);
6373 /* Add INSN to the list of insns covered by POOL. */
6376 s390_add_pool_insn (struct constant_pool
*pool
, rtx insn
)
6378 bitmap_set_bit (pool
->insns
, INSN_UID (insn
));
6381 /* Return pool out of POOL_LIST that covers INSN. */
6383 static struct constant_pool
*
6384 s390_find_pool (struct constant_pool
*pool_list
, rtx insn
)
6386 struct constant_pool
*pool
;
6388 for (pool
= pool_list
; pool
; pool
= pool
->next
)
6389 if (bitmap_bit_p (pool
->insns
, INSN_UID (insn
)))
6395 /* Add constant VAL of mode MODE to the constant pool POOL. */
6398 s390_add_constant (struct constant_pool
*pool
, rtx val
, machine_mode mode
)
6403 for (i
= 0; i
< NR_C_MODES
; i
++)
6404 if (constant_modes
[i
] == mode
)
6406 gcc_assert (i
!= NR_C_MODES
);
6408 for (c
= pool
->constants
[i
]; c
!= NULL
; c
= c
->next
)
6409 if (rtx_equal_p (val
, c
->value
))
6414 c
= (struct constant
*) xmalloc (sizeof *c
);
6416 c
->label
= gen_label_rtx ();
6417 c
->next
= pool
->constants
[i
];
6418 pool
->constants
[i
] = c
;
6419 pool
->size
+= GET_MODE_SIZE (mode
);
6423 /* Return an rtx that represents the offset of X from the start of
6427 s390_pool_offset (struct constant_pool
*pool
, rtx x
)
6431 label
= gen_rtx_LABEL_REF (GET_MODE (x
), pool
->label
);
6432 x
= gen_rtx_UNSPEC (GET_MODE (x
), gen_rtvec (2, x
, label
),
6433 UNSPEC_POOL_OFFSET
);
6434 return gen_rtx_CONST (GET_MODE (x
), x
);
6437 /* Find constant VAL of mode MODE in the constant pool POOL.
6438 Return an RTX describing the distance from the start of
6439 the pool to the location of the new constant. */
6442 s390_find_constant (struct constant_pool
*pool
, rtx val
,
6448 for (i
= 0; i
< NR_C_MODES
; i
++)
6449 if (constant_modes
[i
] == mode
)
6451 gcc_assert (i
!= NR_C_MODES
);
6453 for (c
= pool
->constants
[i
]; c
!= NULL
; c
= c
->next
)
6454 if (rtx_equal_p (val
, c
->value
))
6459 return s390_pool_offset (pool
, gen_rtx_LABEL_REF (Pmode
, c
->label
));
6462 /* Check whether INSN is an execute. Return the label_ref to its
6463 execute target template if so, NULL_RTX otherwise. */
6466 s390_execute_label (rtx insn
)
6468 if (NONJUMP_INSN_P (insn
)
6469 && GET_CODE (PATTERN (insn
)) == PARALLEL
6470 && GET_CODE (XVECEXP (PATTERN (insn
), 0, 0)) == UNSPEC
6471 && XINT (XVECEXP (PATTERN (insn
), 0, 0), 1) == UNSPEC_EXECUTE
)
6472 return XVECEXP (XVECEXP (PATTERN (insn
), 0, 0), 0, 2);
6477 /* Add execute target for INSN to the constant pool POOL. */
6480 s390_add_execute (struct constant_pool
*pool
, rtx insn
)
6484 for (c
= pool
->execute
; c
!= NULL
; c
= c
->next
)
6485 if (INSN_UID (insn
) == INSN_UID (c
->value
))
6490 c
= (struct constant
*) xmalloc (sizeof *c
);
6492 c
->label
= gen_label_rtx ();
6493 c
->next
= pool
->execute
;
6499 /* Find execute target for INSN in the constant pool POOL.
6500 Return an RTX describing the distance from the start of
6501 the pool to the location of the execute target. */
6504 s390_find_execute (struct constant_pool
*pool
, rtx insn
)
6508 for (c
= pool
->execute
; c
!= NULL
; c
= c
->next
)
6509 if (INSN_UID (insn
) == INSN_UID (c
->value
))
6514 return s390_pool_offset (pool
, gen_rtx_LABEL_REF (Pmode
, c
->label
));
6517 /* For an execute INSN, extract the execute target template. */
6520 s390_execute_target (rtx insn
)
6522 rtx pattern
= PATTERN (insn
);
6523 gcc_assert (s390_execute_label (insn
));
6525 if (XVECLEN (pattern
, 0) == 2)
6527 pattern
= copy_rtx (XVECEXP (pattern
, 0, 1));
6531 rtvec vec
= rtvec_alloc (XVECLEN (pattern
, 0) - 1);
6534 for (i
= 0; i
< XVECLEN (pattern
, 0) - 1; i
++)
6535 RTVEC_ELT (vec
, i
) = copy_rtx (XVECEXP (pattern
, 0, i
+ 1));
6537 pattern
= gen_rtx_PARALLEL (VOIDmode
, vec
);
6543 /* Indicate that INSN cannot be duplicated. This is the case for
6544 execute insns that carry a unique label. */
6547 s390_cannot_copy_insn_p (rtx_insn
*insn
)
6549 rtx label
= s390_execute_label (insn
);
6550 return label
&& label
!= const0_rtx
;
6553 /* Dump out the constants in POOL. If REMOTE_LABEL is true,
6554 do not emit the pool base label. */
6557 s390_dump_pool (struct constant_pool
*pool
, bool remote_label
)
6560 rtx_insn
*insn
= pool
->pool_insn
;
6563 /* Switch to rodata section. */
6564 if (TARGET_CPU_ZARCH
)
6566 insn
= emit_insn_after (gen_pool_section_start (), insn
);
6567 INSN_ADDRESSES_NEW (insn
, -1);
6570 /* Ensure minimum pool alignment. */
6571 if (TARGET_CPU_ZARCH
)
6572 insn
= emit_insn_after (gen_pool_align (GEN_INT (8)), insn
);
6574 insn
= emit_insn_after (gen_pool_align (GEN_INT (4)), insn
);
6575 INSN_ADDRESSES_NEW (insn
, -1);
6577 /* Emit pool base label. */
6580 insn
= emit_label_after (pool
->label
, insn
);
6581 INSN_ADDRESSES_NEW (insn
, -1);
6584 /* Dump constants in descending alignment requirement order,
6585 ensuring proper alignment for every constant. */
6586 for (i
= 0; i
< NR_C_MODES
; i
++)
6587 for (c
= pool
->constants
[i
]; c
; c
= c
->next
)
6589 /* Convert UNSPEC_LTREL_OFFSET unspecs to pool-relative references. */
6590 rtx value
= copy_rtx (c
->value
);
6591 if (GET_CODE (value
) == CONST
6592 && GET_CODE (XEXP (value
, 0)) == UNSPEC
6593 && XINT (XEXP (value
, 0), 1) == UNSPEC_LTREL_OFFSET
6594 && XVECLEN (XEXP (value
, 0), 0) == 1)
6595 value
= s390_pool_offset (pool
, XVECEXP (XEXP (value
, 0), 0, 0));
6597 insn
= emit_label_after (c
->label
, insn
);
6598 INSN_ADDRESSES_NEW (insn
, -1);
6600 value
= gen_rtx_UNSPEC_VOLATILE (constant_modes
[i
],
6601 gen_rtvec (1, value
),
6602 UNSPECV_POOL_ENTRY
);
6603 insn
= emit_insn_after (value
, insn
);
6604 INSN_ADDRESSES_NEW (insn
, -1);
6607 /* Ensure minimum alignment for instructions. */
6608 insn
= emit_insn_after (gen_pool_align (GEN_INT (2)), insn
);
6609 INSN_ADDRESSES_NEW (insn
, -1);
6611 /* Output in-pool execute template insns. */
6612 for (c
= pool
->execute
; c
; c
= c
->next
)
6614 insn
= emit_label_after (c
->label
, insn
);
6615 INSN_ADDRESSES_NEW (insn
, -1);
6617 insn
= emit_insn_after (s390_execute_target (c
->value
), insn
);
6618 INSN_ADDRESSES_NEW (insn
, -1);
6621 /* Switch back to previous section. */
6622 if (TARGET_CPU_ZARCH
)
6624 insn
= emit_insn_after (gen_pool_section_end (), insn
);
6625 INSN_ADDRESSES_NEW (insn
, -1);
6628 insn
= emit_barrier_after (insn
);
6629 INSN_ADDRESSES_NEW (insn
, -1);
6631 /* Remove placeholder insn. */
6632 remove_insn (pool
->pool_insn
);
6635 /* Free all memory used by POOL. */
6638 s390_free_pool (struct constant_pool
*pool
)
6640 struct constant
*c
, *next
;
6643 for (i
= 0; i
< NR_C_MODES
; i
++)
6644 for (c
= pool
->constants
[i
]; c
; c
= next
)
6650 for (c
= pool
->execute
; c
; c
= next
)
6656 BITMAP_FREE (pool
->insns
);
6661 /* Collect main literal pool. Return NULL on overflow. */
6663 static struct constant_pool
*
6664 s390_mainpool_start (void)
6666 struct constant_pool
*pool
;
6669 pool
= s390_alloc_pool ();
6671 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
6673 if (NONJUMP_INSN_P (insn
)
6674 && GET_CODE (PATTERN (insn
)) == SET
6675 && GET_CODE (SET_SRC (PATTERN (insn
))) == UNSPEC_VOLATILE
6676 && XINT (SET_SRC (PATTERN (insn
)), 1) == UNSPECV_MAIN_POOL
)
6678 /* There might be two main_pool instructions if base_reg
6679 is call-clobbered; one for shrink-wrapped code and one
6680 for the rest. We want to keep the first. */
6681 if (pool
->pool_insn
)
6683 insn
= PREV_INSN (insn
);
6684 delete_insn (NEXT_INSN (insn
));
6687 pool
->pool_insn
= insn
;
6690 if (!TARGET_CPU_ZARCH
&& s390_execute_label (insn
))
6692 s390_add_execute (pool
, insn
);
6694 else if (NONJUMP_INSN_P (insn
) || CALL_P (insn
))
6696 rtx pool_ref
= NULL_RTX
;
6697 find_constant_pool_ref (PATTERN (insn
), &pool_ref
);
6700 rtx constant
= get_pool_constant (pool_ref
);
6701 machine_mode mode
= get_pool_mode (pool_ref
);
6702 s390_add_constant (pool
, constant
, mode
);
6706 /* If hot/cold partitioning is enabled we have to make sure that
6707 the literal pool is emitted in the same section where the
6708 initialization of the literal pool base pointer takes place.
6709 emit_pool_after is only used in the non-overflow case on non
6710 Z cpus where we can emit the literal pool at the end of the
6711 function body within the text section. */
6713 && NOTE_KIND (insn
) == NOTE_INSN_SWITCH_TEXT_SECTIONS
6714 && !pool
->emit_pool_after
)
6715 pool
->emit_pool_after
= PREV_INSN (insn
);
6718 gcc_assert (pool
->pool_insn
|| pool
->size
== 0);
6720 if (pool
->size
>= 4096)
6722 /* We're going to chunkify the pool, so remove the main
6723 pool placeholder insn. */
6724 remove_insn (pool
->pool_insn
);
6726 s390_free_pool (pool
);
6730 /* If the functions ends with the section where the literal pool
6731 should be emitted set the marker to its end. */
6732 if (pool
&& !pool
->emit_pool_after
)
6733 pool
->emit_pool_after
= get_last_insn ();
6738 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6739 Modify the current function to output the pool constants as well as
6740 the pool register setup instruction. */
6743 s390_mainpool_finish (struct constant_pool
*pool
)
6745 rtx base_reg
= cfun
->machine
->base_reg
;
6747 /* If the pool is empty, we're done. */
6748 if (pool
->size
== 0)
6750 /* We don't actually need a base register after all. */
6751 cfun
->machine
->base_reg
= NULL_RTX
;
6753 if (pool
->pool_insn
)
6754 remove_insn (pool
->pool_insn
);
6755 s390_free_pool (pool
);
6759 /* We need correct insn addresses. */
6760 shorten_branches (get_insns ());
6762 /* On zSeries, we use a LARL to load the pool register. The pool is
6763 located in the .rodata section, so we emit it after the function. */
6764 if (TARGET_CPU_ZARCH
)
6766 rtx set
= gen_main_base_64 (base_reg
, pool
->label
);
6767 rtx_insn
*insn
= emit_insn_after (set
, pool
->pool_insn
);
6768 INSN_ADDRESSES_NEW (insn
, -1);
6769 remove_insn (pool
->pool_insn
);
6771 insn
= get_last_insn ();
6772 pool
->pool_insn
= emit_insn_after (gen_pool (const0_rtx
), insn
);
6773 INSN_ADDRESSES_NEW (pool
->pool_insn
, -1);
6775 s390_dump_pool (pool
, 0);
6778 /* On S/390, if the total size of the function's code plus literal pool
6779 does not exceed 4096 bytes, we use BASR to set up a function base
6780 pointer, and emit the literal pool at the end of the function. */
6781 else if (INSN_ADDRESSES (INSN_UID (pool
->emit_pool_after
))
6782 + pool
->size
+ 8 /* alignment slop */ < 4096)
6784 rtx set
= gen_main_base_31_small (base_reg
, pool
->label
);
6785 rtx_insn
*insn
= emit_insn_after (set
, pool
->pool_insn
);
6786 INSN_ADDRESSES_NEW (insn
, -1);
6787 remove_insn (pool
->pool_insn
);
6789 insn
= emit_label_after (pool
->label
, insn
);
6790 INSN_ADDRESSES_NEW (insn
, -1);
6792 /* emit_pool_after will be set by s390_mainpool_start to the
6793 last insn of the section where the literal pool should be
6795 insn
= pool
->emit_pool_after
;
6797 pool
->pool_insn
= emit_insn_after (gen_pool (const0_rtx
), insn
);
6798 INSN_ADDRESSES_NEW (pool
->pool_insn
, -1);
6800 s390_dump_pool (pool
, 1);
6803 /* Otherwise, we emit an inline literal pool and use BASR to branch
6804 over it, setting up the pool register at the same time. */
6807 rtx_code_label
*pool_end
= gen_label_rtx ();
6809 rtx pat
= gen_main_base_31_large (base_reg
, pool
->label
, pool_end
);
6810 rtx_insn
*insn
= emit_jump_insn_after (pat
, pool
->pool_insn
);
6811 JUMP_LABEL (insn
) = pool_end
;
6812 INSN_ADDRESSES_NEW (insn
, -1);
6813 remove_insn (pool
->pool_insn
);
6815 insn
= emit_label_after (pool
->label
, insn
);
6816 INSN_ADDRESSES_NEW (insn
, -1);
6818 pool
->pool_insn
= emit_insn_after (gen_pool (const0_rtx
), insn
);
6819 INSN_ADDRESSES_NEW (pool
->pool_insn
, -1);
6821 insn
= emit_label_after (pool_end
, pool
->pool_insn
);
6822 INSN_ADDRESSES_NEW (insn
, -1);
6824 s390_dump_pool (pool
, 1);
6828 /* Replace all literal pool references. */
6830 for (rtx_insn
*insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
6833 replace_ltrel_base (&PATTERN (insn
));
6835 if (NONJUMP_INSN_P (insn
) || CALL_P (insn
))
6837 rtx addr
, pool_ref
= NULL_RTX
;
6838 find_constant_pool_ref (PATTERN (insn
), &pool_ref
);
6841 if (s390_execute_label (insn
))
6842 addr
= s390_find_execute (pool
, insn
);
6844 addr
= s390_find_constant (pool
, get_pool_constant (pool_ref
),
6845 get_pool_mode (pool_ref
));
6847 replace_constant_pool_ref (&PATTERN (insn
), pool_ref
, addr
);
6848 INSN_CODE (insn
) = -1;
6854 /* Free the pool. */
6855 s390_free_pool (pool
);
6858 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6859 We have decided we cannot use this pool, so revert all changes
6860 to the current function that were done by s390_mainpool_start. */
6862 s390_mainpool_cancel (struct constant_pool
*pool
)
6864 /* We didn't actually change the instruction stream, so simply
6865 free the pool memory. */
6866 s390_free_pool (pool
);
6870 /* Chunkify the literal pool. */
6872 #define S390_POOL_CHUNK_MIN 0xc00
6873 #define S390_POOL_CHUNK_MAX 0xe00
6875 static struct constant_pool
*
6876 s390_chunkify_start (void)
6878 struct constant_pool
*curr_pool
= NULL
, *pool_list
= NULL
;
6881 rtx pending_ltrel
= NULL_RTX
;
6884 rtx (*gen_reload_base
) (rtx
, rtx
) =
6885 TARGET_CPU_ZARCH
? gen_reload_base_64
: gen_reload_base_31
;
6888 /* We need correct insn addresses. */
6890 shorten_branches (get_insns ());
6892 /* Scan all insns and move literals to pool chunks. */
6894 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
6896 bool section_switch_p
= false;
6898 /* Check for pending LTREL_BASE. */
6901 rtx ltrel_base
= find_ltrel_base (PATTERN (insn
));
6904 gcc_assert (ltrel_base
== pending_ltrel
);
6905 pending_ltrel
= NULL_RTX
;
6909 if (!TARGET_CPU_ZARCH
&& s390_execute_label (insn
))
6912 curr_pool
= s390_start_pool (&pool_list
, insn
);
6914 s390_add_execute (curr_pool
, insn
);
6915 s390_add_pool_insn (curr_pool
, insn
);
6917 else if (NONJUMP_INSN_P (insn
) || CALL_P (insn
))
6919 rtx pool_ref
= NULL_RTX
;
6920 find_constant_pool_ref (PATTERN (insn
), &pool_ref
);
6923 rtx constant
= get_pool_constant (pool_ref
);
6924 machine_mode mode
= get_pool_mode (pool_ref
);
6927 curr_pool
= s390_start_pool (&pool_list
, insn
);
6929 s390_add_constant (curr_pool
, constant
, mode
);
6930 s390_add_pool_insn (curr_pool
, insn
);
6932 /* Don't split the pool chunk between a LTREL_OFFSET load
6933 and the corresponding LTREL_BASE. */
6934 if (GET_CODE (constant
) == CONST
6935 && GET_CODE (XEXP (constant
, 0)) == UNSPEC
6936 && XINT (XEXP (constant
, 0), 1) == UNSPEC_LTREL_OFFSET
)
6938 gcc_assert (!pending_ltrel
);
6939 pending_ltrel
= pool_ref
;
6944 if (JUMP_P (insn
) || JUMP_TABLE_DATA_P (insn
) || LABEL_P (insn
))
6947 s390_add_pool_insn (curr_pool
, insn
);
6948 /* An LTREL_BASE must follow within the same basic block. */
6949 gcc_assert (!pending_ltrel
);
6953 switch (NOTE_KIND (insn
))
6955 case NOTE_INSN_SWITCH_TEXT_SECTIONS
:
6956 section_switch_p
= true;
6958 case NOTE_INSN_VAR_LOCATION
:
6959 case NOTE_INSN_CALL_ARG_LOCATION
:
6966 || INSN_ADDRESSES_SIZE () <= (size_t) INSN_UID (insn
)
6967 || INSN_ADDRESSES (INSN_UID (insn
)) == -1)
6970 if (TARGET_CPU_ZARCH
)
6972 if (curr_pool
->size
< S390_POOL_CHUNK_MAX
)
6975 s390_end_pool (curr_pool
, NULL
);
6980 int chunk_size
= INSN_ADDRESSES (INSN_UID (insn
))
6981 - INSN_ADDRESSES (INSN_UID (curr_pool
->first_insn
))
6984 /* We will later have to insert base register reload insns.
6985 Those will have an effect on code size, which we need to
6986 consider here. This calculation makes rather pessimistic
6987 worst-case assumptions. */
6991 if (chunk_size
< S390_POOL_CHUNK_MIN
6992 && curr_pool
->size
< S390_POOL_CHUNK_MIN
6993 && !section_switch_p
)
6996 /* Pool chunks can only be inserted after BARRIERs ... */
6997 if (BARRIER_P (insn
))
6999 s390_end_pool (curr_pool
, insn
);
7004 /* ... so if we don't find one in time, create one. */
7005 else if (chunk_size
> S390_POOL_CHUNK_MAX
7006 || curr_pool
->size
> S390_POOL_CHUNK_MAX
7007 || section_switch_p
)
7009 rtx_insn
*label
, *jump
, *barrier
, *next
, *prev
;
7011 if (!section_switch_p
)
7013 /* We can insert the barrier only after a 'real' insn. */
7014 if (! NONJUMP_INSN_P (insn
) && ! CALL_P (insn
))
7016 if (get_attr_length (insn
) == 0)
7018 /* Don't separate LTREL_BASE from the corresponding
7019 LTREL_OFFSET load. */
7026 next
= NEXT_INSN (insn
);
7030 && (NOTE_KIND (next
) == NOTE_INSN_VAR_LOCATION
7031 || NOTE_KIND (next
) == NOTE_INSN_CALL_ARG_LOCATION
));
7035 gcc_assert (!pending_ltrel
);
7037 /* The old pool has to end before the section switch
7038 note in order to make it part of the current
7040 insn
= PREV_INSN (insn
);
7043 label
= gen_label_rtx ();
7045 if (prev
&& NOTE_P (prev
))
7046 prev
= prev_nonnote_insn (prev
);
7048 jump
= emit_jump_insn_after_setloc (gen_jump (label
), insn
,
7049 INSN_LOCATION (prev
));
7051 jump
= emit_jump_insn_after_noloc (gen_jump (label
), insn
);
7052 barrier
= emit_barrier_after (jump
);
7053 insn
= emit_label_after (label
, barrier
);
7054 JUMP_LABEL (jump
) = label
;
7055 LABEL_NUSES (label
) = 1;
7057 INSN_ADDRESSES_NEW (jump
, -1);
7058 INSN_ADDRESSES_NEW (barrier
, -1);
7059 INSN_ADDRESSES_NEW (insn
, -1);
7061 s390_end_pool (curr_pool
, barrier
);
7069 s390_end_pool (curr_pool
, NULL
);
7070 gcc_assert (!pending_ltrel
);
7072 /* Find all labels that are branched into
7073 from an insn belonging to a different chunk. */
7075 far_labels
= BITMAP_ALLOC (NULL
);
7077 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
7079 rtx_jump_table_data
*table
;
7081 /* Labels marked with LABEL_PRESERVE_P can be target
7082 of non-local jumps, so we have to mark them.
7083 The same holds for named labels.
7085 Don't do that, however, if it is the label before
7089 && (LABEL_PRESERVE_P (insn
) || LABEL_NAME (insn
)))
7091 rtx_insn
*vec_insn
= NEXT_INSN (insn
);
7092 if (! vec_insn
|| ! JUMP_TABLE_DATA_P (vec_insn
))
7093 bitmap_set_bit (far_labels
, CODE_LABEL_NUMBER (insn
));
7095 /* Check potential targets in a table jump (casesi_jump). */
7096 else if (tablejump_p (insn
, NULL
, &table
))
7098 rtx vec_pat
= PATTERN (table
);
7099 int i
, diff_p
= GET_CODE (vec_pat
) == ADDR_DIFF_VEC
;
7101 for (i
= 0; i
< XVECLEN (vec_pat
, diff_p
); i
++)
7103 rtx label
= XEXP (XVECEXP (vec_pat
, diff_p
, i
), 0);
7105 if (s390_find_pool (pool_list
, label
)
7106 != s390_find_pool (pool_list
, insn
))
7107 bitmap_set_bit (far_labels
, CODE_LABEL_NUMBER (label
));
7110 /* If we have a direct jump (conditional or unconditional),
7111 check all potential targets. */
7112 else if (JUMP_P (insn
))
7114 rtx pat
= PATTERN (insn
);
7116 if (GET_CODE (pat
) == PARALLEL
)
7117 pat
= XVECEXP (pat
, 0, 0);
7119 if (GET_CODE (pat
) == SET
)
7121 rtx label
= JUMP_LABEL (insn
);
7122 if (label
&& !ANY_RETURN_P (label
))
7124 if (s390_find_pool (pool_list
, label
)
7125 != s390_find_pool (pool_list
, insn
))
7126 bitmap_set_bit (far_labels
, CODE_LABEL_NUMBER (label
));
7132 /* Insert base register reload insns before every pool. */
7134 for (curr_pool
= pool_list
; curr_pool
; curr_pool
= curr_pool
->next
)
7136 rtx new_insn
= gen_reload_base (cfun
->machine
->base_reg
,
7138 rtx_insn
*insn
= curr_pool
->first_insn
;
7139 INSN_ADDRESSES_NEW (emit_insn_before (new_insn
, insn
), -1);
7142 /* Insert base register reload insns at every far label. */
7144 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
7146 && bitmap_bit_p (far_labels
, CODE_LABEL_NUMBER (insn
)))
7148 struct constant_pool
*pool
= s390_find_pool (pool_list
, insn
);
7151 rtx new_insn
= gen_reload_base (cfun
->machine
->base_reg
,
7153 INSN_ADDRESSES_NEW (emit_insn_after (new_insn
, insn
), -1);
7158 BITMAP_FREE (far_labels
);
7161 /* Recompute insn addresses. */
7163 init_insn_lengths ();
7164 shorten_branches (get_insns ());
7169 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
7170 After we have decided to use this list, finish implementing
7171 all changes to the current function as required. */
7174 s390_chunkify_finish (struct constant_pool
*pool_list
)
7176 struct constant_pool
*curr_pool
= NULL
;
7180 /* Replace all literal pool references. */
7182 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
7185 replace_ltrel_base (&PATTERN (insn
));
7187 curr_pool
= s390_find_pool (pool_list
, insn
);
7191 if (NONJUMP_INSN_P (insn
) || CALL_P (insn
))
7193 rtx addr
, pool_ref
= NULL_RTX
;
7194 find_constant_pool_ref (PATTERN (insn
), &pool_ref
);
7197 if (s390_execute_label (insn
))
7198 addr
= s390_find_execute (curr_pool
, insn
);
7200 addr
= s390_find_constant (curr_pool
,
7201 get_pool_constant (pool_ref
),
7202 get_pool_mode (pool_ref
));
7204 replace_constant_pool_ref (&PATTERN (insn
), pool_ref
, addr
);
7205 INSN_CODE (insn
) = -1;
7210 /* Dump out all literal pools. */
7212 for (curr_pool
= pool_list
; curr_pool
; curr_pool
= curr_pool
->next
)
7213 s390_dump_pool (curr_pool
, 0);
7215 /* Free pool list. */
7219 struct constant_pool
*next
= pool_list
->next
;
7220 s390_free_pool (pool_list
);
7225 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
7226 We have decided we cannot use this list, so revert all changes
7227 to the current function that were done by s390_chunkify_start. */
7230 s390_chunkify_cancel (struct constant_pool
*pool_list
)
7232 struct constant_pool
*curr_pool
= NULL
;
7235 /* Remove all pool placeholder insns. */
7237 for (curr_pool
= pool_list
; curr_pool
; curr_pool
= curr_pool
->next
)
7239 /* Did we insert an extra barrier? Remove it. */
7240 rtx_insn
*barrier
= PREV_INSN (curr_pool
->pool_insn
);
7241 rtx_insn
*jump
= barrier
? PREV_INSN (barrier
) : NULL
;
7242 rtx_insn
*label
= NEXT_INSN (curr_pool
->pool_insn
);
7244 if (jump
&& JUMP_P (jump
)
7245 && barrier
&& BARRIER_P (barrier
)
7246 && label
&& LABEL_P (label
)
7247 && GET_CODE (PATTERN (jump
)) == SET
7248 && SET_DEST (PATTERN (jump
)) == pc_rtx
7249 && GET_CODE (SET_SRC (PATTERN (jump
))) == LABEL_REF
7250 && XEXP (SET_SRC (PATTERN (jump
)), 0) == label
)
7253 remove_insn (barrier
);
7254 remove_insn (label
);
7257 remove_insn (curr_pool
->pool_insn
);
7260 /* Remove all base register reload insns. */
7262 for (insn
= get_insns (); insn
; )
7264 rtx_insn
*next_insn
= NEXT_INSN (insn
);
7266 if (NONJUMP_INSN_P (insn
)
7267 && GET_CODE (PATTERN (insn
)) == SET
7268 && GET_CODE (SET_SRC (PATTERN (insn
))) == UNSPEC
7269 && XINT (SET_SRC (PATTERN (insn
)), 1) == UNSPEC_RELOAD_BASE
)
7275 /* Free pool list. */
7279 struct constant_pool
*next
= pool_list
->next
;
7280 s390_free_pool (pool_list
);
7285 /* Output the constant pool entry EXP in mode MODE with alignment ALIGN. */
7288 s390_output_pool_entry (rtx exp
, machine_mode mode
, unsigned int align
)
7292 switch (GET_MODE_CLASS (mode
))
7295 case MODE_DECIMAL_FLOAT
:
7296 gcc_assert (GET_CODE (exp
) == CONST_DOUBLE
);
7298 REAL_VALUE_FROM_CONST_DOUBLE (r
, exp
);
7299 assemble_real (r
, mode
, align
);
7303 assemble_integer (exp
, GET_MODE_SIZE (mode
), align
, 1);
7304 mark_symbol_refs_as_used (exp
);
7313 /* Return an RTL expression representing the value of the return address
7314 for the frame COUNT steps up from the current frame. FRAME is the
7315 frame pointer of that frame. */
7318 s390_return_addr_rtx (int count
, rtx frame ATTRIBUTE_UNUSED
)
7323 /* Without backchain, we fail for all but the current frame. */
7325 if (!TARGET_BACKCHAIN
&& count
> 0)
7328 /* For the current frame, we need to make sure the initial
7329 value of RETURN_REGNUM is actually saved. */
7333 /* On non-z architectures branch splitting could overwrite r14. */
7334 if (TARGET_CPU_ZARCH
)
7335 return get_hard_reg_initial_val (Pmode
, RETURN_REGNUM
);
7338 cfun_frame_layout
.save_return_addr_p
= true;
7339 return gen_rtx_MEM (Pmode
, return_address_pointer_rtx
);
7343 if (TARGET_PACKED_STACK
)
7344 offset
= -2 * UNITS_PER_LONG
;
7346 offset
= RETURN_REGNUM
* UNITS_PER_LONG
;
7348 addr
= plus_constant (Pmode
, frame
, offset
);
7349 addr
= memory_address (Pmode
, addr
);
7350 return gen_rtx_MEM (Pmode
, addr
);
7353 /* Return an RTL expression representing the back chain stored in
7354 the current stack frame. */
7357 s390_back_chain_rtx (void)
7361 gcc_assert (TARGET_BACKCHAIN
);
7363 if (TARGET_PACKED_STACK
)
7364 chain
= plus_constant (Pmode
, stack_pointer_rtx
,
7365 STACK_POINTER_OFFSET
- UNITS_PER_LONG
);
7367 chain
= stack_pointer_rtx
;
7369 chain
= gen_rtx_MEM (Pmode
, chain
);
7373 /* Find first call clobbered register unused in a function.
7374 This could be used as base register in a leaf function
7375 or for holding the return address before epilogue. */
7378 find_unused_clobbered_reg (void)
7381 for (i
= 0; i
< 6; i
++)
7382 if (!df_regs_ever_live_p (i
))
7388 /* Helper function for s390_regs_ever_clobbered. Sets the fields in DATA for all
7389 clobbered hard regs in SETREG. */
7392 s390_reg_clobbered_rtx (rtx setreg
, const_rtx set_insn ATTRIBUTE_UNUSED
, void *data
)
7394 char *regs_ever_clobbered
= (char *)data
;
7395 unsigned int i
, regno
;
7396 machine_mode mode
= GET_MODE (setreg
);
7398 if (GET_CODE (setreg
) == SUBREG
)
7400 rtx inner
= SUBREG_REG (setreg
);
7401 if (!GENERAL_REG_P (inner
) && !FP_REG_P (inner
))
7403 regno
= subreg_regno (setreg
);
7405 else if (GENERAL_REG_P (setreg
) || FP_REG_P (setreg
))
7406 regno
= REGNO (setreg
);
7411 i
< regno
+ HARD_REGNO_NREGS (regno
, mode
);
7413 regs_ever_clobbered
[i
] = 1;
7416 /* Walks through all basic blocks of the current function looking
7417 for clobbered hard regs using s390_reg_clobbered_rtx. The fields
7418 of the passed integer array REGS_EVER_CLOBBERED are set to one for
7419 each of those regs. */
7422 s390_regs_ever_clobbered (char regs_ever_clobbered
[])
7428 memset (regs_ever_clobbered
, 0, 32);
7430 /* For non-leaf functions we have to consider all call clobbered regs to be
7434 for (i
= 0; i
< 32; i
++)
7435 regs_ever_clobbered
[i
] = call_really_used_regs
[i
];
7438 /* Make the "magic" eh_return registers live if necessary. For regs_ever_live
7439 this work is done by liveness analysis (mark_regs_live_at_end).
7440 Special care is needed for functions containing landing pads. Landing pads
7441 may use the eh registers, but the code which sets these registers is not
7442 contained in that function. Hence s390_regs_ever_clobbered is not able to
7443 deal with this automatically. */
7444 if (crtl
->calls_eh_return
|| cfun
->machine
->has_landing_pad_p
)
7445 for (i
= 0; EH_RETURN_DATA_REGNO (i
) != INVALID_REGNUM
; i
++)
7446 if (crtl
->calls_eh_return
7447 || (cfun
->machine
->has_landing_pad_p
7448 && df_regs_ever_live_p (EH_RETURN_DATA_REGNO (i
))))
7449 regs_ever_clobbered
[EH_RETURN_DATA_REGNO (i
)] = 1;
7451 /* For nonlocal gotos all call-saved registers have to be saved.
7452 This flag is also set for the unwinding code in libgcc.
7453 See expand_builtin_unwind_init. For regs_ever_live this is done by
7455 if (crtl
->saves_all_registers
)
7456 for (i
= 0; i
< 32; i
++)
7457 if (!call_really_used_regs
[i
])
7458 regs_ever_clobbered
[i
] = 1;
7460 FOR_EACH_BB_FN (cur_bb
, cfun
)
7462 FOR_BB_INSNS (cur_bb
, cur_insn
)
7466 if (!INSN_P (cur_insn
))
7469 pat
= PATTERN (cur_insn
);
7471 /* Ignore GPR restore insns. */
7472 if (epilogue_completed
&& RTX_FRAME_RELATED_P (cur_insn
))
7474 if (GET_CODE (pat
) == SET
7475 && GENERAL_REG_P (SET_DEST (pat
)))
7478 if (GET_MODE (SET_SRC (pat
)) == DImode
7479 && FP_REG_P (SET_SRC (pat
)))
7483 if (GET_CODE (SET_SRC (pat
)) == MEM
)
7488 if (GET_CODE (pat
) == PARALLEL
7489 && load_multiple_operation (pat
, VOIDmode
))
7494 s390_reg_clobbered_rtx
,
7495 regs_ever_clobbered
);
7500 /* Determine the frame area which actually has to be accessed
7501 in the function epilogue. The values are stored at the
7502 given pointers AREA_BOTTOM (address of the lowest used stack
7503 address) and AREA_TOP (address of the first item which does
7504 not belong to the stack frame). */
7507 s390_frame_area (int *area_bottom
, int *area_top
)
7514 if (cfun_frame_layout
.first_restore_gpr
!= -1)
7516 b
= (cfun_frame_layout
.gprs_offset
7517 + cfun_frame_layout
.first_restore_gpr
* UNITS_PER_LONG
);
7518 t
= b
+ (cfun_frame_layout
.last_restore_gpr
7519 - cfun_frame_layout
.first_restore_gpr
+ 1) * UNITS_PER_LONG
;
7522 if (TARGET_64BIT
&& cfun_save_high_fprs_p
)
7524 b
= MIN (b
, cfun_frame_layout
.f8_offset
);
7525 t
= MAX (t
, (cfun_frame_layout
.f8_offset
7526 + cfun_frame_layout
.high_fprs
* 8));
7531 if (cfun_fpr_save_p (FPR4_REGNUM
))
7533 b
= MIN (b
, cfun_frame_layout
.f4_offset
);
7534 t
= MAX (t
, cfun_frame_layout
.f4_offset
+ 8);
7536 if (cfun_fpr_save_p (FPR6_REGNUM
))
7538 b
= MIN (b
, cfun_frame_layout
.f4_offset
+ 8);
7539 t
= MAX (t
, cfun_frame_layout
.f4_offset
+ 16);
7545 /* Update gpr_save_slots in the frame layout trying to make use of
7546 FPRs as GPR save slots.
7547 This is a helper routine of s390_register_info. */
7550 s390_register_info_gprtofpr ()
7552 int save_reg_slot
= FPR0_REGNUM
;
7555 if (!TARGET_Z10
|| !TARGET_HARD_FLOAT
|| !crtl
->is_leaf
)
7558 for (i
= 15; i
>= 6; i
--)
7560 if (cfun_gpr_save_slot (i
) == 0)
7563 /* Advance to the next FP register which can be used as a
7565 while ((!call_really_used_regs
[save_reg_slot
]
7566 || df_regs_ever_live_p (save_reg_slot
)
7567 || cfun_fpr_save_p (save_reg_slot
))
7568 && FP_REGNO_P (save_reg_slot
))
7570 if (!FP_REGNO_P (save_reg_slot
))
7572 /* We only want to use ldgr/lgdr if we can get rid of
7573 stm/lm entirely. So undo the gpr slot allocation in
7574 case we ran out of FPR save slots. */
7575 for (j
= 6; j
<= 15; j
++)
7576 if (FP_REGNO_P (cfun_gpr_save_slot (j
)))
7577 cfun_gpr_save_slot (j
) = -1;
7580 cfun_gpr_save_slot (i
) = save_reg_slot
++;
7584 /* Set the bits in fpr_bitmap for FPRs which need to be saved due to
7586 This is a helper routine for s390_register_info. */
7589 s390_register_info_stdarg_fpr ()
7595 /* Save the FP argument regs for stdarg. f0, f2 for 31 bit and
7596 f0-f4 for 64 bit. */
7598 || !TARGET_HARD_FLOAT
7599 || !cfun
->va_list_fpr_size
7600 || crtl
->args
.info
.fprs
>= FP_ARG_NUM_REG
)
7603 min_fpr
= crtl
->args
.info
.fprs
;
7604 max_fpr
= min_fpr
+ cfun
->va_list_fpr_size
;
7605 if (max_fpr
> FP_ARG_NUM_REG
)
7606 max_fpr
= FP_ARG_NUM_REG
;
7608 for (i
= min_fpr
; i
< max_fpr
; i
++)
7609 cfun_set_fpr_save (i
+ FPR0_REGNUM
);
7612 /* Reserve the GPR save slots for GPRs which need to be saved due to
7614 This is a helper routine for s390_register_info. */
7617 s390_register_info_stdarg_gpr ()
7624 || !cfun
->va_list_gpr_size
7625 || crtl
->args
.info
.gprs
>= GP_ARG_NUM_REG
)
7628 min_gpr
= crtl
->args
.info
.gprs
;
7629 max_gpr
= min_gpr
+ cfun
->va_list_gpr_size
;
7630 if (max_gpr
> GP_ARG_NUM_REG
)
7631 max_gpr
= GP_ARG_NUM_REG
;
7633 for (i
= min_gpr
; i
< max_gpr
; i
++)
7634 cfun_gpr_save_slot (2 + i
) = -1;
7637 /* The GPR and FPR save slots in cfun->machine->frame_layout are set
7638 for registers which need to be saved in function prologue.
7639 This function can be used until the insns emitted for save/restore
7640 of the regs are visible in the RTL stream. */
7643 s390_register_info ()
7646 char clobbered_regs
[32];
7648 gcc_assert (!epilogue_completed
);
7650 if (reload_completed
)
7651 /* After reload we rely on our own routine to determine which
7652 registers need saving. */
7653 s390_regs_ever_clobbered (clobbered_regs
);
7655 /* During reload we use regs_ever_live as a base since reload
7656 does changes in there which we otherwise would not be aware
7658 for (i
= 0; i
< 32; i
++)
7659 clobbered_regs
[i
] = df_regs_ever_live_p (i
);
7661 for (i
= 0; i
< 32; i
++)
7662 clobbered_regs
[i
] = clobbered_regs
[i
] && !global_regs
[i
];
7664 /* Mark the call-saved FPRs which need to be saved.
7665 This needs to be done before checking the special GPRs since the
7666 stack pointer usage depends on whether high FPRs have to be saved
7668 cfun_frame_layout
.fpr_bitmap
= 0;
7669 cfun_frame_layout
.high_fprs
= 0;
7670 for (i
= FPR0_REGNUM
; i
<= FPR15_REGNUM
; i
++)
7671 if (clobbered_regs
[i
] && !call_really_used_regs
[i
])
7673 cfun_set_fpr_save (i
);
7674 if (i
>= FPR8_REGNUM
)
7675 cfun_frame_layout
.high_fprs
++;
7679 clobbered_regs
[PIC_OFFSET_TABLE_REGNUM
]
7680 |= !!df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM
);
7682 clobbered_regs
[BASE_REGNUM
]
7683 |= (cfun
->machine
->base_reg
7684 && REGNO (cfun
->machine
->base_reg
) == BASE_REGNUM
);
7686 clobbered_regs
[HARD_FRAME_POINTER_REGNUM
]
7687 |= !!frame_pointer_needed
;
7689 /* On pre z900 machines this might take until machine dependent
7691 save_return_addr_p will only be set on non-zarch machines so
7692 there is no risk that r14 goes into an FPR instead of a stack
7694 clobbered_regs
[RETURN_REGNUM
]
7696 || TARGET_TPF_PROFILING
7697 || cfun
->machine
->split_branches_pending_p
7698 || cfun_frame_layout
.save_return_addr_p
7699 || crtl
->calls_eh_return
);
7701 clobbered_regs
[STACK_POINTER_REGNUM
]
7703 || TARGET_TPF_PROFILING
7704 || cfun_save_high_fprs_p
7705 || get_frame_size () > 0
7706 || (reload_completed
&& cfun_frame_layout
.frame_size
> 0)
7707 || cfun
->calls_alloca
);
7709 memset (cfun_frame_layout
.gpr_save_slots
, 0, 16);
7711 for (i
= 6; i
< 16; i
++)
7712 if (clobbered_regs
[i
])
7713 cfun_gpr_save_slot (i
) = -1;
7715 s390_register_info_stdarg_fpr ();
7716 s390_register_info_gprtofpr ();
7718 /* First find the range of GPRs to be restored. Vararg regs don't
7719 need to be restored so we do it before assigning slots to the
7721 for (i
= 0; i
< 16 && cfun_gpr_save_slot (i
) != -1; i
++);
7722 for (j
= 15; j
> i
&& cfun_gpr_save_slot (j
) != -1; j
--);
7723 cfun_frame_layout
.first_restore_gpr
= (i
== 16) ? -1 : i
;
7724 cfun_frame_layout
.last_restore_gpr
= (i
== 16) ? -1 : j
;
7726 /* stdarg functions might need to save GPRs 2 to 6. This might
7727 override the GPR->FPR save decision made above for r6 since
7728 vararg regs must go to the stack. */
7729 s390_register_info_stdarg_gpr ();
7731 /* Now the range of GPRs which need saving. */
7732 for (i
= 0; i
< 16 && cfun_gpr_save_slot (i
) != -1; i
++);
7733 for (j
= 15; j
> i
&& cfun_gpr_save_slot (j
) != -1; j
--);
7734 cfun_frame_layout
.first_save_gpr
= (i
== 16) ? -1 : i
;
7735 cfun_frame_layout
.last_save_gpr
= (i
== 16) ? -1 : j
;
7738 /* This function is called by s390_optimize_prologue in order to get
7739 rid of unnecessary GPR save/restore instructions. The register info
7740 for the GPRs is re-computed and the ranges are re-calculated. */
7743 s390_optimize_register_info ()
7745 char clobbered_regs
[32];
7748 gcc_assert (epilogue_completed
);
7749 gcc_assert (!cfun
->machine
->split_branches_pending_p
);
7751 s390_regs_ever_clobbered (clobbered_regs
);
7753 for (i
= 0; i
< 32; i
++)
7754 clobbered_regs
[i
] = clobbered_regs
[i
] && !global_regs
[i
];
7756 /* There is still special treatment needed for cases invisible to
7757 s390_regs_ever_clobbered. */
7758 clobbered_regs
[RETURN_REGNUM
]
7759 |= (TARGET_TPF_PROFILING
7760 /* When expanding builtin_return_addr in ESA mode we do not
7761 know whether r14 will later be needed as scratch reg when
7762 doing branch splitting. So the builtin always accesses the
7763 r14 save slot and we need to stick to the save/restore
7764 decision for r14 even if it turns out that it didn't get
7766 || cfun_frame_layout
.save_return_addr_p
7767 || crtl
->calls_eh_return
);
7769 memset (cfun_frame_layout
.gpr_save_slots
, 0, 6);
7771 for (i
= 6; i
< 16; i
++)
7772 if (!clobbered_regs
[i
])
7773 cfun_gpr_save_slot (i
) = 0;
7775 for (i
= 0; i
< 16 && cfun_gpr_save_slot (i
) != -1; i
++);
7776 for (j
= 15; j
> i
&& cfun_gpr_save_slot (j
) != -1; j
--);
7777 cfun_frame_layout
.first_restore_gpr
= (i
== 16) ? -1 : i
;
7778 cfun_frame_layout
.last_restore_gpr
= (i
== 16) ? -1 : j
;
7780 s390_register_info_stdarg_gpr ();
7782 for (i
= 0; i
< 16 && cfun_gpr_save_slot (i
) != -1; i
++);
7783 for (j
= 15; j
> i
&& cfun_gpr_save_slot (j
) != -1; j
--);
7784 cfun_frame_layout
.first_save_gpr
= (i
== 16) ? -1 : i
;
7785 cfun_frame_layout
.last_save_gpr
= (i
== 16) ? -1 : j
;
7788 /* Fill cfun->machine with info about frame of current function. */
7791 s390_frame_info (void)
7793 HOST_WIDE_INT lowest_offset
;
7795 cfun_frame_layout
.first_save_gpr_slot
= cfun_frame_layout
.first_save_gpr
;
7796 cfun_frame_layout
.last_save_gpr_slot
= cfun_frame_layout
.last_save_gpr
;
7798 /* The va_arg builtin uses a constant distance of 16 *
7799 UNITS_PER_LONG (r0-r15) to reach the FPRs from the reg_save_area
7800 pointer. So even if we are going to save the stack pointer in an
7801 FPR we need the stack space in order to keep the offsets
7803 if (cfun
->stdarg
&& cfun_save_arg_fprs_p
)
7805 cfun_frame_layout
.last_save_gpr_slot
= STACK_POINTER_REGNUM
;
7807 if (cfun_frame_layout
.first_save_gpr_slot
== -1)
7808 cfun_frame_layout
.first_save_gpr_slot
= STACK_POINTER_REGNUM
;
7811 cfun_frame_layout
.frame_size
= get_frame_size ();
7812 if (!TARGET_64BIT
&& cfun_frame_layout
.frame_size
> 0x7fff0000)
7813 fatal_error ("total size of local variables exceeds architecture limit");
7815 if (!TARGET_PACKED_STACK
)
7817 /* Fixed stack layout. */
7818 cfun_frame_layout
.backchain_offset
= 0;
7819 cfun_frame_layout
.f0_offset
= 16 * UNITS_PER_LONG
;
7820 cfun_frame_layout
.f4_offset
= cfun_frame_layout
.f0_offset
+ 2 * 8;
7821 cfun_frame_layout
.f8_offset
= -cfun_frame_layout
.high_fprs
* 8;
7822 cfun_frame_layout
.gprs_offset
= (cfun_frame_layout
.first_save_gpr_slot
7825 else if (TARGET_BACKCHAIN
)
7827 /* Kernel stack layout - packed stack, backchain, no float */
7828 gcc_assert (TARGET_SOFT_FLOAT
);
7829 cfun_frame_layout
.backchain_offset
= (STACK_POINTER_OFFSET
7832 /* The distance between the backchain and the return address
7833 save slot must not change. So we always need a slot for the
7834 stack pointer which resides in between. */
7835 cfun_frame_layout
.last_save_gpr_slot
= STACK_POINTER_REGNUM
;
7837 cfun_frame_layout
.gprs_offset
7838 = cfun_frame_layout
.backchain_offset
- cfun_gprs_save_area_size
;
7840 /* FPRs will not be saved. Nevertheless pick sane values to
7841 keep area calculations valid. */
7842 cfun_frame_layout
.f0_offset
=
7843 cfun_frame_layout
.f4_offset
=
7844 cfun_frame_layout
.f8_offset
= cfun_frame_layout
.gprs_offset
;
7850 /* Packed stack layout without backchain. */
7852 /* With stdarg FPRs need their dedicated slots. */
7853 num_fprs
= (TARGET_64BIT
&& cfun
->stdarg
? 2
7854 : (cfun_fpr_save_p (FPR4_REGNUM
) +
7855 cfun_fpr_save_p (FPR6_REGNUM
)));
7856 cfun_frame_layout
.f4_offset
= STACK_POINTER_OFFSET
- 8 * num_fprs
;
7858 num_fprs
= (cfun
->stdarg
? 2
7859 : (cfun_fpr_save_p (FPR0_REGNUM
)
7860 + cfun_fpr_save_p (FPR2_REGNUM
)));
7861 cfun_frame_layout
.f0_offset
= cfun_frame_layout
.f4_offset
- 8 * num_fprs
;
7863 cfun_frame_layout
.gprs_offset
7864 = cfun_frame_layout
.f0_offset
- cfun_gprs_save_area_size
;
7866 cfun_frame_layout
.f8_offset
= (cfun_frame_layout
.gprs_offset
7867 - cfun_frame_layout
.high_fprs
* 8);
7870 if (cfun_save_high_fprs_p
)
7871 cfun_frame_layout
.frame_size
+= cfun_frame_layout
.high_fprs
* 8;
7874 cfun_frame_layout
.frame_size
+= crtl
->outgoing_args_size
;
7876 /* In the following cases we have to allocate a STACK_POINTER_OFFSET
7877 sized area at the bottom of the stack. This is required also for
7878 leaf functions. When GCC generates a local stack reference it
7879 will always add STACK_POINTER_OFFSET to all these references. */
7881 && !TARGET_TPF_PROFILING
7882 && cfun_frame_layout
.frame_size
== 0
7883 && !cfun
->calls_alloca
)
7886 /* Calculate the number of bytes we have used in our own register
7887 save area. With the packed stack layout we can re-use the
7888 remaining bytes for normal stack elements. */
7890 if (TARGET_PACKED_STACK
)
7891 lowest_offset
= MIN (MIN (cfun_frame_layout
.f0_offset
,
7892 cfun_frame_layout
.f4_offset
),
7893 cfun_frame_layout
.gprs_offset
);
7897 if (TARGET_BACKCHAIN
)
7898 lowest_offset
= MIN (lowest_offset
, cfun_frame_layout
.backchain_offset
);
7900 cfun_frame_layout
.frame_size
+= STACK_POINTER_OFFSET
- lowest_offset
;
7902 /* If under 31 bit an odd number of gprs has to be saved we have to
7903 adjust the frame size to sustain 8 byte alignment of stack
7905 cfun_frame_layout
.frame_size
= ((cfun_frame_layout
.frame_size
+
7906 STACK_BOUNDARY
/ BITS_PER_UNIT
- 1)
7907 & ~(STACK_BOUNDARY
/ BITS_PER_UNIT
- 1));
7910 /* Generate frame layout. Fills in register and frame data for the current
7911 function in cfun->machine. This routine can be called multiple times;
7912 it will re-do the complete frame layout every time. */
7915 s390_init_frame_layout (void)
7917 HOST_WIDE_INT frame_size
;
7920 gcc_assert (!reload_completed
);
7922 /* On S/390 machines, we may need to perform branch splitting, which
7923 will require both base and return address register. We have no
7924 choice but to assume we're going to need them until right at the
7925 end of the machine dependent reorg phase. */
7926 if (!TARGET_CPU_ZARCH
)
7927 cfun
->machine
->split_branches_pending_p
= true;
7931 frame_size
= cfun_frame_layout
.frame_size
;
7933 /* Try to predict whether we'll need the base register. */
7934 base_used
= cfun
->machine
->split_branches_pending_p
7935 || crtl
->uses_const_pool
7936 || (!DISP_IN_RANGE (frame_size
)
7937 && !CONST_OK_FOR_K (frame_size
));
7939 /* Decide which register to use as literal pool base. In small
7940 leaf functions, try to use an unused call-clobbered register
7941 as base register to avoid save/restore overhead. */
7943 cfun
->machine
->base_reg
= NULL_RTX
;
7944 else if (crtl
->is_leaf
&& !df_regs_ever_live_p (5))
7945 cfun
->machine
->base_reg
= gen_rtx_REG (Pmode
, 5);
7947 cfun
->machine
->base_reg
= gen_rtx_REG (Pmode
, BASE_REGNUM
);
7949 s390_register_info ();
7952 while (frame_size
!= cfun_frame_layout
.frame_size
);
7955 /* Remove the FPR clobbers from a tbegin insn if it can be proven that
7956 the TX is nonescaping. A transaction is considered escaping if
7957 there is at least one path from tbegin returning CC0 to the
7958 function exit block without an tend.
7960 The check so far has some limitations:
7961 - only single tbegin/tend BBs are supported
7962 - the first cond jump after tbegin must separate the CC0 path from ~CC0
7963 - when CC is copied to a GPR and the CC0 check is done with the GPR
7964 this is not supported
7968 s390_optimize_nonescaping_tx (void)
7970 const unsigned int CC0
= 1 << 3;
7971 basic_block tbegin_bb
= NULL
;
7972 basic_block tend_bb
= NULL
;
7977 rtx_insn
*tbegin_insn
= NULL
;
7979 if (!cfun
->machine
->tbegin_p
)
7982 for (bb_index
= 0; bb_index
< n_basic_blocks_for_fn (cfun
); bb_index
++)
7984 bb
= BASIC_BLOCK_FOR_FN (cfun
, bb_index
);
7989 FOR_BB_INSNS (bb
, insn
)
7991 rtx ite
, cc
, pat
, target
;
7992 unsigned HOST_WIDE_INT mask
;
7994 if (!INSN_P (insn
) || INSN_CODE (insn
) <= 0)
7997 pat
= PATTERN (insn
);
7999 if (GET_CODE (pat
) == PARALLEL
)
8000 pat
= XVECEXP (pat
, 0, 0);
8002 if (GET_CODE (pat
) != SET
8003 || GET_CODE (SET_SRC (pat
)) != UNSPEC_VOLATILE
)
8006 if (XINT (SET_SRC (pat
), 1) == UNSPECV_TBEGIN
)
8012 /* Just return if the tbegin doesn't have clobbers. */
8013 if (GET_CODE (PATTERN (insn
)) != PARALLEL
)
8016 if (tbegin_bb
!= NULL
)
8019 /* Find the next conditional jump. */
8020 for (tmp
= NEXT_INSN (insn
);
8022 tmp
= NEXT_INSN (tmp
))
8024 if (reg_set_p (gen_rtx_REG (CCmode
, CC_REGNUM
), tmp
))
8029 ite
= SET_SRC (PATTERN (tmp
));
8030 if (GET_CODE (ite
) != IF_THEN_ELSE
)
8033 cc
= XEXP (XEXP (ite
, 0), 0);
8034 if (!REG_P (cc
) || !CC_REGNO_P (REGNO (cc
))
8035 || GET_MODE (cc
) != CCRAWmode
8036 || GET_CODE (XEXP (XEXP (ite
, 0), 1)) != CONST_INT
)
8039 if (bb
->succs
->length () != 2)
8042 mask
= INTVAL (XEXP (XEXP (ite
, 0), 1));
8043 if (GET_CODE (XEXP (ite
, 0)) == NE
)
8047 target
= XEXP (ite
, 1);
8048 else if (mask
== (CC0
^ 0xf))
8049 target
= XEXP (ite
, 2);
8057 ei
= ei_start (bb
->succs
);
8058 e1
= ei_safe_edge (ei
);
8060 e2
= ei_safe_edge (ei
);
8062 if (e2
->flags
& EDGE_FALLTHRU
)
8065 e1
= ei_safe_edge (ei
);
8068 if (!(e1
->flags
& EDGE_FALLTHRU
))
8071 tbegin_bb
= (target
== pc_rtx
) ? e1
->dest
: e2
->dest
;
8073 if (tmp
== BB_END (bb
))
8078 if (XINT (SET_SRC (pat
), 1) == UNSPECV_TEND
)
8080 if (tend_bb
!= NULL
)
8087 /* Either we successfully remove the FPR clobbers here or we are not
8088 able to do anything for this TX. Both cases don't qualify for
8090 cfun
->machine
->tbegin_p
= false;
8092 if (tbegin_bb
== NULL
|| tend_bb
== NULL
)
8095 calculate_dominance_info (CDI_POST_DOMINATORS
);
8096 result
= dominated_by_p (CDI_POST_DOMINATORS
, tbegin_bb
, tend_bb
);
8097 free_dominance_info (CDI_POST_DOMINATORS
);
8102 PATTERN (tbegin_insn
) = gen_rtx_PARALLEL (VOIDmode
,
8104 XVECEXP (PATTERN (tbegin_insn
), 0, 0),
8105 XVECEXP (PATTERN (tbegin_insn
), 0, 1)));
8106 INSN_CODE (tbegin_insn
) = -1;
8107 df_insn_rescan (tbegin_insn
);
8112 /* Return true if it is legal to put a value with MODE into REGNO. */
8115 s390_hard_regno_mode_ok (unsigned int regno
, machine_mode mode
)
8117 switch (REGNO_REG_CLASS (regno
))
8120 if (REGNO_PAIR_OK (regno
, mode
))
8122 if (mode
== SImode
|| mode
== DImode
)
8125 if (FLOAT_MODE_P (mode
) && GET_MODE_CLASS (mode
) != MODE_VECTOR_FLOAT
)
8130 if (FRAME_REGNO_P (regno
) && mode
== Pmode
)
8135 if (REGNO_PAIR_OK (regno
, mode
))
8138 || (mode
!= TFmode
&& mode
!= TCmode
&& mode
!= TDmode
))
8143 if (GET_MODE_CLASS (mode
) == MODE_CC
)
8147 if (REGNO_PAIR_OK (regno
, mode
))
8149 if (mode
== SImode
|| mode
== Pmode
)
8160 /* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */
8163 s390_hard_regno_rename_ok (unsigned int old_reg
, unsigned int new_reg
)
8165 /* Once we've decided upon a register to use as base register, it must
8166 no longer be used for any other purpose. */
8167 if (cfun
->machine
->base_reg
)
8168 if (REGNO (cfun
->machine
->base_reg
) == old_reg
8169 || REGNO (cfun
->machine
->base_reg
) == new_reg
)
8172 /* Prevent regrename from using call-saved regs which haven't
8173 actually been saved. This is necessary since regrename assumes
8174 the backend save/restore decisions are based on
8175 df_regs_ever_live. Since we have our own routine we have to tell
8176 regrename manually about it. */
8177 if (GENERAL_REGNO_P (new_reg
)
8178 && !call_really_used_regs
[new_reg
]
8179 && cfun_gpr_save_slot (new_reg
) == 0)
8185 /* Return nonzero if register REGNO can be used as a scratch register
8189 s390_hard_regno_scratch_ok (unsigned int regno
)
8191 /* See s390_hard_regno_rename_ok. */
8192 if (GENERAL_REGNO_P (regno
)
8193 && !call_really_used_regs
[regno
]
8194 && cfun_gpr_save_slot (regno
) == 0)
8200 /* Maximum number of registers to represent a value of mode MODE
8201 in a register of class RCLASS. */
8204 s390_class_max_nregs (enum reg_class rclass
, machine_mode mode
)
8209 if (GET_MODE_CLASS (mode
) == MODE_COMPLEX_FLOAT
)
8210 return 2 * ((GET_MODE_SIZE (mode
) / 2 + 8 - 1) / 8);
8212 return (GET_MODE_SIZE (mode
) + 8 - 1) / 8;
8214 return (GET_MODE_SIZE (mode
) + 4 - 1) / 4;
8218 return (GET_MODE_SIZE (mode
) + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
8221 /* Return true if we use LRA instead of reload pass. */
8225 return s390_lra_flag
;
8228 /* Return true if register FROM can be eliminated via register TO. */
8231 s390_can_eliminate (const int from
, const int to
)
8233 /* On zSeries machines, we have not marked the base register as fixed.
8234 Instead, we have an elimination rule BASE_REGNUM -> BASE_REGNUM.
8235 If a function requires the base register, we say here that this
8236 elimination cannot be performed. This will cause reload to free
8237 up the base register (as if it were fixed). On the other hand,
8238 if the current function does *not* require the base register, we
8239 say here the elimination succeeds, which in turn allows reload
8240 to allocate the base register for any other purpose. */
8241 if (from
== BASE_REGNUM
&& to
== BASE_REGNUM
)
8243 if (TARGET_CPU_ZARCH
)
8245 s390_init_frame_layout ();
8246 return cfun
->machine
->base_reg
== NULL_RTX
;
8252 /* Everything else must point into the stack frame. */
8253 gcc_assert (to
== STACK_POINTER_REGNUM
8254 || to
== HARD_FRAME_POINTER_REGNUM
);
8256 gcc_assert (from
== FRAME_POINTER_REGNUM
8257 || from
== ARG_POINTER_REGNUM
8258 || from
== RETURN_ADDRESS_POINTER_REGNUM
);
8260 /* Make sure we actually saved the return address. */
8261 if (from
== RETURN_ADDRESS_POINTER_REGNUM
)
8262 if (!crtl
->calls_eh_return
8264 && !cfun_frame_layout
.save_return_addr_p
)
8270 /* Return offset between register FROM and TO initially after prolog. */
8273 s390_initial_elimination_offset (int from
, int to
)
8275 HOST_WIDE_INT offset
;
8277 /* ??? Why are we called for non-eliminable pairs? */
8278 if (!s390_can_eliminate (from
, to
))
8283 case FRAME_POINTER_REGNUM
:
8284 offset
= (get_frame_size()
8285 + STACK_POINTER_OFFSET
8286 + crtl
->outgoing_args_size
);
8289 case ARG_POINTER_REGNUM
:
8290 s390_init_frame_layout ();
8291 offset
= cfun_frame_layout
.frame_size
+ STACK_POINTER_OFFSET
;
8294 case RETURN_ADDRESS_POINTER_REGNUM
:
8295 s390_init_frame_layout ();
8297 if (cfun_frame_layout
.first_save_gpr_slot
== -1)
8299 /* If it turns out that for stdarg nothing went into the reg
8300 save area we also do not need the return address
8302 if (cfun
->stdarg
&& !cfun_save_arg_fprs_p
)
8308 /* In order to make the following work it is not necessary for
8309 r14 to have a save slot. It is sufficient if one other GPR
8310 got one. Since the GPRs are always stored without gaps we
8311 are able to calculate where the r14 save slot would
8313 offset
= (cfun_frame_layout
.frame_size
+ cfun_frame_layout
.gprs_offset
+
8314 (RETURN_REGNUM
- cfun_frame_layout
.first_save_gpr_slot
) *
8329 /* Emit insn to save fpr REGNUM at offset OFFSET relative
8330 to register BASE. Return generated insn. */
8333 save_fpr (rtx base
, int offset
, int regnum
)
8336 addr
= gen_rtx_MEM (DFmode
, plus_constant (Pmode
, base
, offset
));
8338 if (regnum
>= 16 && regnum
<= (16 + FP_ARG_NUM_REG
))
8339 set_mem_alias_set (addr
, get_varargs_alias_set ());
8341 set_mem_alias_set (addr
, get_frame_alias_set ());
8343 return emit_move_insn (addr
, gen_rtx_REG (DFmode
, regnum
));
8346 /* Emit insn to restore fpr REGNUM from offset OFFSET relative
8347 to register BASE. Return generated insn. */
8350 restore_fpr (rtx base
, int offset
, int regnum
)
8353 addr
= gen_rtx_MEM (DFmode
, plus_constant (Pmode
, base
, offset
));
8354 set_mem_alias_set (addr
, get_frame_alias_set ());
8356 return emit_move_insn (gen_rtx_REG (DFmode
, regnum
), addr
);
8359 /* Return true if REGNO is a global register, but not one
8360 of the special ones that need to be saved/restored in anyway. */
8363 global_not_special_regno_p (int regno
)
8365 return (global_regs
[regno
]
8366 /* These registers are special and need to be
8367 restored in any case. */
8368 && !(regno
== STACK_POINTER_REGNUM
8369 || regno
== RETURN_REGNUM
8370 || regno
== BASE_REGNUM
8371 || (flag_pic
&& regno
== (int)PIC_OFFSET_TABLE_REGNUM
)));
8374 /* Generate insn to save registers FIRST to LAST into
8375 the register save area located at offset OFFSET
8376 relative to register BASE. */
8379 save_gprs (rtx base
, int offset
, int first
, int last
)
8381 rtx addr
, insn
, note
;
8384 addr
= plus_constant (Pmode
, base
, offset
);
8385 addr
= gen_rtx_MEM (Pmode
, addr
);
8387 set_mem_alias_set (addr
, get_frame_alias_set ());
8389 /* Special-case single register. */
8393 insn
= gen_movdi (addr
, gen_rtx_REG (Pmode
, first
));
8395 insn
= gen_movsi (addr
, gen_rtx_REG (Pmode
, first
));
8397 if (!global_not_special_regno_p (first
))
8398 RTX_FRAME_RELATED_P (insn
) = 1;
8403 insn
= gen_store_multiple (addr
,
8404 gen_rtx_REG (Pmode
, first
),
8405 GEN_INT (last
- first
+ 1));
8407 if (first
<= 6 && cfun
->stdarg
)
8408 for (i
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
8410 rtx mem
= XEXP (XVECEXP (PATTERN (insn
), 0, i
), 0);
8413 set_mem_alias_set (mem
, get_varargs_alias_set ());
8416 /* We need to set the FRAME_RELATED flag on all SETs
8417 inside the store-multiple pattern.
8419 However, we must not emit DWARF records for registers 2..5
8420 if they are stored for use by variable arguments ...
8422 ??? Unfortunately, it is not enough to simply not the
8423 FRAME_RELATED flags for those SETs, because the first SET
8424 of the PARALLEL is always treated as if it had the flag
8425 set, even if it does not. Therefore we emit a new pattern
8426 without those registers as REG_FRAME_RELATED_EXPR note. */
8428 if (first
>= 6 && !global_not_special_regno_p (first
))
8430 rtx pat
= PATTERN (insn
);
8432 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
8433 if (GET_CODE (XVECEXP (pat
, 0, i
)) == SET
8434 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (pat
,
8436 RTX_FRAME_RELATED_P (XVECEXP (pat
, 0, i
)) = 1;
8438 RTX_FRAME_RELATED_P (insn
) = 1;
8444 for (start
= first
>= 6 ? first
: 6; start
<= last
; start
++)
8445 if (!global_not_special_regno_p (start
))
8451 addr
= plus_constant (Pmode
, base
,
8452 offset
+ (start
- first
) * UNITS_PER_LONG
);
8457 note
= gen_movdi (gen_rtx_MEM (Pmode
, addr
),
8458 gen_rtx_REG (Pmode
, start
));
8460 note
= gen_movsi (gen_rtx_MEM (Pmode
, addr
),
8461 gen_rtx_REG (Pmode
, start
));
8462 note
= PATTERN (note
);
8464 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, note
);
8465 RTX_FRAME_RELATED_P (insn
) = 1;
8470 note
= gen_store_multiple (gen_rtx_MEM (Pmode
, addr
),
8471 gen_rtx_REG (Pmode
, start
),
8472 GEN_INT (last
- start
+ 1));
8473 note
= PATTERN (note
);
8475 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, note
);
8477 for (i
= 0; i
< XVECLEN (note
, 0); i
++)
8478 if (GET_CODE (XVECEXP (note
, 0, i
)) == SET
8479 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (note
,
8481 RTX_FRAME_RELATED_P (XVECEXP (note
, 0, i
)) = 1;
8483 RTX_FRAME_RELATED_P (insn
) = 1;
8489 /* Generate insn to restore registers FIRST to LAST from
8490 the register save area located at offset OFFSET
8491 relative to register BASE. */
8494 restore_gprs (rtx base
, int offset
, int first
, int last
)
8498 addr
= plus_constant (Pmode
, base
, offset
);
8499 addr
= gen_rtx_MEM (Pmode
, addr
);
8500 set_mem_alias_set (addr
, get_frame_alias_set ());
8502 /* Special-case single register. */
8506 insn
= gen_movdi (gen_rtx_REG (Pmode
, first
), addr
);
8508 insn
= gen_movsi (gen_rtx_REG (Pmode
, first
), addr
);
8510 RTX_FRAME_RELATED_P (insn
) = 1;
8514 insn
= gen_load_multiple (gen_rtx_REG (Pmode
, first
),
8516 GEN_INT (last
- first
+ 1));
8517 RTX_FRAME_RELATED_P (insn
) = 1;
8521 /* Return insn sequence to load the GOT register. */
8523 static GTY(()) rtx got_symbol
;
8525 s390_load_got (void)
8529 /* We cannot use pic_offset_table_rtx here since we use this
8530 function also for non-pic if __tls_get_offset is called and in
8531 that case PIC_OFFSET_TABLE_REGNUM as well as pic_offset_table_rtx
8533 rtx got_rtx
= gen_rtx_REG (Pmode
, 12);
8537 got_symbol
= gen_rtx_SYMBOL_REF (Pmode
, "_GLOBAL_OFFSET_TABLE_");
8538 SYMBOL_REF_FLAGS (got_symbol
) = SYMBOL_FLAG_LOCAL
;
8543 if (TARGET_CPU_ZARCH
)
8545 emit_move_insn (got_rtx
, got_symbol
);
8551 offset
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, got_symbol
),
8552 UNSPEC_LTREL_OFFSET
);
8553 offset
= gen_rtx_CONST (Pmode
, offset
);
8554 offset
= force_const_mem (Pmode
, offset
);
8556 emit_move_insn (got_rtx
, offset
);
8558 offset
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, XEXP (offset
, 0)),
8560 offset
= gen_rtx_PLUS (Pmode
, got_rtx
, offset
);
8562 emit_move_insn (got_rtx
, offset
);
8565 insns
= get_insns ();
8570 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
8571 and the change to the stack pointer. */
8574 s390_emit_stack_tie (void)
8576 rtx mem
= gen_frame_mem (BLKmode
,
8577 gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
));
8579 emit_insn (gen_stack_tie (mem
));
8582 /* Copy GPRS into FPR save slots. */
8585 s390_save_gprs_to_fprs (void)
8589 if (!TARGET_Z10
|| !TARGET_HARD_FLOAT
|| !crtl
->is_leaf
)
8592 for (i
= 6; i
< 16; i
++)
8594 if (FP_REGNO_P (cfun_gpr_save_slot (i
)))
8597 emit_move_insn (gen_rtx_REG (DImode
, cfun_gpr_save_slot (i
)),
8598 gen_rtx_REG (DImode
, i
));
8599 RTX_FRAME_RELATED_P (insn
) = 1;
8604 /* Restore GPRs from FPR save slots. */
8607 s390_restore_gprs_from_fprs (void)
8611 if (!TARGET_Z10
|| !TARGET_HARD_FLOAT
|| !crtl
->is_leaf
)
8614 for (i
= 6; i
< 16; i
++)
8616 if (FP_REGNO_P (cfun_gpr_save_slot (i
)))
8619 emit_move_insn (gen_rtx_REG (DImode
, i
),
8620 gen_rtx_REG (DImode
, cfun_gpr_save_slot (i
)));
8621 df_set_regs_ever_live (i
, true);
8622 add_reg_note (insn
, REG_CFA_RESTORE
, gen_rtx_REG (DImode
, i
));
8623 if (i
== STACK_POINTER_REGNUM
)
8624 add_reg_note (insn
, REG_CFA_DEF_CFA
,
8625 plus_constant (Pmode
, stack_pointer_rtx
,
8626 STACK_POINTER_OFFSET
));
8627 RTX_FRAME_RELATED_P (insn
) = 1;
8633 /* A pass run immediately before shrink-wrapping and prologue and epilogue
8638 const pass_data pass_data_s390_early_mach
=
8640 RTL_PASS
, /* type */
8641 "early_mach", /* name */
8642 OPTGROUP_NONE
, /* optinfo_flags */
8643 TV_MACH_DEP
, /* tv_id */
8644 0, /* properties_required */
8645 0, /* properties_provided */
8646 0, /* properties_destroyed */
8647 0, /* todo_flags_start */
8648 ( TODO_df_verify
| TODO_df_finish
), /* todo_flags_finish */
8651 class pass_s390_early_mach
: public rtl_opt_pass
8654 pass_s390_early_mach (gcc::context
*ctxt
)
8655 : rtl_opt_pass (pass_data_s390_early_mach
, ctxt
)
8658 /* opt_pass methods: */
8659 virtual unsigned int execute (function
*);
8661 }; // class pass_s390_early_mach
8664 pass_s390_early_mach::execute (function
*fun
)
8668 /* Try to get rid of the FPR clobbers. */
8669 s390_optimize_nonescaping_tx ();
8671 /* Re-compute register info. */
8672 s390_register_info ();
8674 /* If we're using a base register, ensure that it is always valid for
8675 the first non-prologue instruction. */
8676 if (fun
->machine
->base_reg
)
8677 emit_insn_at_entry (gen_main_pool (fun
->machine
->base_reg
));
8679 /* Annotate all constant pool references to let the scheduler know
8680 they implicitly use the base register. */
8681 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
8684 annotate_constant_pool_refs (&PATTERN (insn
));
8685 df_insn_rescan (insn
);
8692 /* Expand the prologue into a bunch of separate insns. */
8695 s390_emit_prologue (void)
8703 /* Choose best register to use for temp use within prologue.
8704 See below for why TPF must use the register 1. */
8706 if (!has_hard_reg_initial_val (Pmode
, RETURN_REGNUM
)
8708 && !TARGET_TPF_PROFILING
)
8709 temp_reg
= gen_rtx_REG (Pmode
, RETURN_REGNUM
);
8711 temp_reg
= gen_rtx_REG (Pmode
, 1);
8713 s390_save_gprs_to_fprs ();
8715 /* Save call saved gprs. */
8716 if (cfun_frame_layout
.first_save_gpr
!= -1)
8718 insn
= save_gprs (stack_pointer_rtx
,
8719 cfun_frame_layout
.gprs_offset
+
8720 UNITS_PER_LONG
* (cfun_frame_layout
.first_save_gpr
8721 - cfun_frame_layout
.first_save_gpr_slot
),
8722 cfun_frame_layout
.first_save_gpr
,
8723 cfun_frame_layout
.last_save_gpr
);
8727 /* Dummy insn to mark literal pool slot. */
8729 if (cfun
->machine
->base_reg
)
8730 emit_insn (gen_main_pool (cfun
->machine
->base_reg
));
8732 offset
= cfun_frame_layout
.f0_offset
;
8734 /* Save f0 and f2. */
8735 for (i
= FPR0_REGNUM
; i
<= FPR0_REGNUM
+ 1; i
++)
8737 if (cfun_fpr_save_p (i
))
8739 save_fpr (stack_pointer_rtx
, offset
, i
);
8742 else if (!TARGET_PACKED_STACK
|| cfun
->stdarg
)
8746 /* Save f4 and f6. */
8747 offset
= cfun_frame_layout
.f4_offset
;
8748 for (i
= FPR4_REGNUM
; i
<= FPR4_REGNUM
+ 1; i
++)
8750 if (cfun_fpr_save_p (i
))
8752 insn
= save_fpr (stack_pointer_rtx
, offset
, i
);
8755 /* If f4 and f6 are call clobbered they are saved due to
8756 stdargs and therefore are not frame related. */
8757 if (!call_really_used_regs
[i
])
8758 RTX_FRAME_RELATED_P (insn
) = 1;
8760 else if (!TARGET_PACKED_STACK
|| call_really_used_regs
[i
])
8764 if (TARGET_PACKED_STACK
8765 && cfun_save_high_fprs_p
8766 && cfun_frame_layout
.f8_offset
+ cfun_frame_layout
.high_fprs
* 8 > 0)
8768 offset
= (cfun_frame_layout
.f8_offset
8769 + (cfun_frame_layout
.high_fprs
- 1) * 8);
8771 for (i
= FPR15_REGNUM
; i
>= FPR8_REGNUM
&& offset
>= 0; i
--)
8772 if (cfun_fpr_save_p (i
))
8774 insn
= save_fpr (stack_pointer_rtx
, offset
, i
);
8776 RTX_FRAME_RELATED_P (insn
) = 1;
8779 if (offset
>= cfun_frame_layout
.f8_offset
)
8783 if (!TARGET_PACKED_STACK
)
8784 next_fpr
= cfun_save_high_fprs_p
? FPR15_REGNUM
: 0;
8786 if (flag_stack_usage_info
)
8787 current_function_static_stack_size
= cfun_frame_layout
.frame_size
;
8789 /* Decrement stack pointer. */
8791 if (cfun_frame_layout
.frame_size
> 0)
8793 rtx frame_off
= GEN_INT (-cfun_frame_layout
.frame_size
);
8796 if (s390_stack_size
)
8798 HOST_WIDE_INT stack_guard
;
8800 if (s390_stack_guard
)
8801 stack_guard
= s390_stack_guard
;
8804 /* If no value for stack guard is provided the smallest power of 2
8805 larger than the current frame size is chosen. */
8807 while (stack_guard
< cfun_frame_layout
.frame_size
)
8811 if (cfun_frame_layout
.frame_size
>= s390_stack_size
)
8813 warning (0, "frame size of function %qs is %wd"
8814 " bytes exceeding user provided stack limit of "
8816 "An unconditional trap is added.",
8817 current_function_name(), cfun_frame_layout
.frame_size
,
8819 emit_insn (gen_trap ());
8823 /* stack_guard has to be smaller than s390_stack_size.
8824 Otherwise we would emit an AND with zero which would
8825 not match the test under mask pattern. */
8826 if (stack_guard
>= s390_stack_size
)
8828 warning (0, "frame size of function %qs is %wd"
8829 " bytes which is more than half the stack size. "
8830 "The dynamic check would not be reliable. "
8831 "No check emitted for this function.",
8832 current_function_name(),
8833 cfun_frame_layout
.frame_size
);
8837 HOST_WIDE_INT stack_check_mask
= ((s390_stack_size
- 1)
8838 & ~(stack_guard
- 1));
8840 rtx t
= gen_rtx_AND (Pmode
, stack_pointer_rtx
,
8841 GEN_INT (stack_check_mask
));
8843 emit_insn (gen_ctrapdi4 (gen_rtx_EQ (VOIDmode
,
8845 t
, const0_rtx
, const0_rtx
));
8847 emit_insn (gen_ctrapsi4 (gen_rtx_EQ (VOIDmode
,
8849 t
, const0_rtx
, const0_rtx
));
8854 if (s390_warn_framesize
> 0
8855 && cfun_frame_layout
.frame_size
>= s390_warn_framesize
)
8856 warning (0, "frame size of %qs is %wd bytes",
8857 current_function_name (), cfun_frame_layout
.frame_size
);
8859 if (s390_warn_dynamicstack_p
&& cfun
->calls_alloca
)
8860 warning (0, "%qs uses dynamic stack allocation", current_function_name ());
8862 /* Save incoming stack pointer into temp reg. */
8863 if (TARGET_BACKCHAIN
|| next_fpr
)
8864 insn
= emit_insn (gen_move_insn (temp_reg
, stack_pointer_rtx
));
8866 /* Subtract frame size from stack pointer. */
8868 if (DISP_IN_RANGE (INTVAL (frame_off
)))
8870 insn
= gen_rtx_SET (VOIDmode
, stack_pointer_rtx
,
8871 gen_rtx_PLUS (Pmode
, stack_pointer_rtx
,
8873 insn
= emit_insn (insn
);
8877 if (!CONST_OK_FOR_K (INTVAL (frame_off
)))
8878 frame_off
= force_const_mem (Pmode
, frame_off
);
8880 insn
= emit_insn (gen_add2_insn (stack_pointer_rtx
, frame_off
));
8881 annotate_constant_pool_refs (&PATTERN (insn
));
8884 RTX_FRAME_RELATED_P (insn
) = 1;
8885 real_frame_off
= GEN_INT (-cfun_frame_layout
.frame_size
);
8886 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
8887 gen_rtx_SET (VOIDmode
, stack_pointer_rtx
,
8888 gen_rtx_PLUS (Pmode
, stack_pointer_rtx
,
8891 /* Set backchain. */
8893 if (TARGET_BACKCHAIN
)
8895 if (cfun_frame_layout
.backchain_offset
)
8896 addr
= gen_rtx_MEM (Pmode
,
8897 plus_constant (Pmode
, stack_pointer_rtx
,
8898 cfun_frame_layout
.backchain_offset
));
8900 addr
= gen_rtx_MEM (Pmode
, stack_pointer_rtx
);
8901 set_mem_alias_set (addr
, get_frame_alias_set ());
8902 insn
= emit_insn (gen_move_insn (addr
, temp_reg
));
8905 /* If we support non-call exceptions (e.g. for Java),
8906 we need to make sure the backchain pointer is set up
8907 before any possibly trapping memory access. */
8908 if (TARGET_BACKCHAIN
&& cfun
->can_throw_non_call_exceptions
)
8910 addr
= gen_rtx_MEM (BLKmode
, gen_rtx_SCRATCH (VOIDmode
));
8911 emit_clobber (addr
);
8915 /* Save fprs 8 - 15 (64 bit ABI). */
8917 if (cfun_save_high_fprs_p
&& next_fpr
)
8919 /* If the stack might be accessed through a different register
8920 we have to make sure that the stack pointer decrement is not
8921 moved below the use of the stack slots. */
8922 s390_emit_stack_tie ();
8924 insn
= emit_insn (gen_add2_insn (temp_reg
,
8925 GEN_INT (cfun_frame_layout
.f8_offset
)));
8929 for (i
= FPR8_REGNUM
; i
<= next_fpr
; i
++)
8930 if (cfun_fpr_save_p (i
))
8932 rtx addr
= plus_constant (Pmode
, stack_pointer_rtx
,
8933 cfun_frame_layout
.frame_size
8934 + cfun_frame_layout
.f8_offset
8937 insn
= save_fpr (temp_reg
, offset
, i
);
8939 RTX_FRAME_RELATED_P (insn
) = 1;
8940 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
8941 gen_rtx_SET (VOIDmode
,
8942 gen_rtx_MEM (DFmode
, addr
),
8943 gen_rtx_REG (DFmode
, i
)));
8947 /* Set frame pointer, if needed. */
8949 if (frame_pointer_needed
)
8951 insn
= emit_move_insn (hard_frame_pointer_rtx
, stack_pointer_rtx
);
8952 RTX_FRAME_RELATED_P (insn
) = 1;
8955 /* Set up got pointer, if needed. */
8957 if (flag_pic
&& df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM
))
8959 rtx_insn
*insns
= s390_load_got ();
8961 for (rtx_insn
*insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
8962 annotate_constant_pool_refs (&PATTERN (insn
));
8967 if (TARGET_TPF_PROFILING
)
8969 /* Generate a BAS instruction to serve as a function
8970 entry intercept to facilitate the use of tracing
8971 algorithms located at the branch target. */
8972 emit_insn (gen_prologue_tpf ());
8974 /* Emit a blockage here so that all code
8975 lies between the profiling mechanisms. */
8976 emit_insn (gen_blockage ());
8980 /* Expand the epilogue into a bunch of separate insns. */
8983 s390_emit_epilogue (bool sibcall
)
8985 rtx frame_pointer
, return_reg
, cfa_restores
= NULL_RTX
;
8986 int area_bottom
, area_top
, offset
= 0;
8991 if (TARGET_TPF_PROFILING
)
8994 /* Generate a BAS instruction to serve as a function
8995 entry intercept to facilitate the use of tracing
8996 algorithms located at the branch target. */
8998 /* Emit a blockage here so that all code
8999 lies between the profiling mechanisms. */
9000 emit_insn (gen_blockage ());
9002 emit_insn (gen_epilogue_tpf ());
9005 /* Check whether to use frame or stack pointer for restore. */
9007 frame_pointer
= (frame_pointer_needed
9008 ? hard_frame_pointer_rtx
: stack_pointer_rtx
);
9010 s390_frame_area (&area_bottom
, &area_top
);
9012 /* Check whether we can access the register save area.
9013 If not, increment the frame pointer as required. */
9015 if (area_top
<= area_bottom
)
9017 /* Nothing to restore. */
9019 else if (DISP_IN_RANGE (cfun_frame_layout
.frame_size
+ area_bottom
)
9020 && DISP_IN_RANGE (cfun_frame_layout
.frame_size
+ area_top
- 1))
9022 /* Area is in range. */
9023 offset
= cfun_frame_layout
.frame_size
;
9027 rtx insn
, frame_off
, cfa
;
9029 offset
= area_bottom
< 0 ? -area_bottom
: 0;
9030 frame_off
= GEN_INT (cfun_frame_layout
.frame_size
- offset
);
9032 cfa
= gen_rtx_SET (VOIDmode
, frame_pointer
,
9033 gen_rtx_PLUS (Pmode
, frame_pointer
, frame_off
));
9034 if (DISP_IN_RANGE (INTVAL (frame_off
)))
9036 insn
= gen_rtx_SET (VOIDmode
, frame_pointer
,
9037 gen_rtx_PLUS (Pmode
, frame_pointer
, frame_off
));
9038 insn
= emit_insn (insn
);
9042 if (!CONST_OK_FOR_K (INTVAL (frame_off
)))
9043 frame_off
= force_const_mem (Pmode
, frame_off
);
9045 insn
= emit_insn (gen_add2_insn (frame_pointer
, frame_off
));
9046 annotate_constant_pool_refs (&PATTERN (insn
));
9048 add_reg_note (insn
, REG_CFA_ADJUST_CFA
, cfa
);
9049 RTX_FRAME_RELATED_P (insn
) = 1;
9052 /* Restore call saved fprs. */
9056 if (cfun_save_high_fprs_p
)
9058 next_offset
= cfun_frame_layout
.f8_offset
;
9059 for (i
= FPR8_REGNUM
; i
<= FPR15_REGNUM
; i
++)
9061 if (cfun_fpr_save_p (i
))
9063 restore_fpr (frame_pointer
,
9064 offset
+ next_offset
, i
);
9066 = alloc_reg_note (REG_CFA_RESTORE
,
9067 gen_rtx_REG (DFmode
, i
), cfa_restores
);
9076 next_offset
= cfun_frame_layout
.f4_offset
;
9078 for (i
= FPR4_REGNUM
; i
<= FPR4_REGNUM
+ 1; i
++)
9080 if (cfun_fpr_save_p (i
))
9082 restore_fpr (frame_pointer
,
9083 offset
+ next_offset
, i
);
9085 = alloc_reg_note (REG_CFA_RESTORE
,
9086 gen_rtx_REG (DFmode
, i
), cfa_restores
);
9089 else if (!TARGET_PACKED_STACK
)
9095 /* Return register. */
9097 return_reg
= gen_rtx_REG (Pmode
, RETURN_REGNUM
);
9099 /* Restore call saved gprs. */
9101 if (cfun_frame_layout
.first_restore_gpr
!= -1)
9106 /* Check for global register and save them
9107 to stack location from where they get restored. */
9109 for (i
= cfun_frame_layout
.first_restore_gpr
;
9110 i
<= cfun_frame_layout
.last_restore_gpr
;
9113 if (global_not_special_regno_p (i
))
9115 addr
= plus_constant (Pmode
, frame_pointer
,
9116 offset
+ cfun_frame_layout
.gprs_offset
9117 + (i
- cfun_frame_layout
.first_save_gpr_slot
)
9119 addr
= gen_rtx_MEM (Pmode
, addr
);
9120 set_mem_alias_set (addr
, get_frame_alias_set ());
9121 emit_move_insn (addr
, gen_rtx_REG (Pmode
, i
));
9125 = alloc_reg_note (REG_CFA_RESTORE
,
9126 gen_rtx_REG (Pmode
, i
), cfa_restores
);
9131 /* Fetch return address from stack before load multiple,
9132 this will do good for scheduling.
9134 Only do this if we already decided that r14 needs to be
9135 saved to a stack slot. (And not just because r14 happens to
9136 be in between two GPRs which need saving.) Otherwise it
9137 would be difficult to take that decision back in
9138 s390_optimize_prologue. */
9139 if (cfun_gpr_save_slot (RETURN_REGNUM
) == -1)
9141 int return_regnum
= find_unused_clobbered_reg();
9144 return_reg
= gen_rtx_REG (Pmode
, return_regnum
);
9146 addr
= plus_constant (Pmode
, frame_pointer
,
9147 offset
+ cfun_frame_layout
.gprs_offset
9149 - cfun_frame_layout
.first_save_gpr_slot
)
9151 addr
= gen_rtx_MEM (Pmode
, addr
);
9152 set_mem_alias_set (addr
, get_frame_alias_set ());
9153 emit_move_insn (return_reg
, addr
);
9155 /* Once we did that optimization we have to make sure
9156 s390_optimize_prologue does not try to remove the
9157 store of r14 since we will not be able to find the
9158 load issued here. */
9159 cfun_frame_layout
.save_return_addr_p
= true;
9163 insn
= restore_gprs (frame_pointer
,
9164 offset
+ cfun_frame_layout
.gprs_offset
9165 + (cfun_frame_layout
.first_restore_gpr
9166 - cfun_frame_layout
.first_save_gpr_slot
)
9168 cfun_frame_layout
.first_restore_gpr
,
9169 cfun_frame_layout
.last_restore_gpr
);
9170 insn
= emit_insn (insn
);
9171 REG_NOTES (insn
) = cfa_restores
;
9172 add_reg_note (insn
, REG_CFA_DEF_CFA
,
9173 plus_constant (Pmode
, stack_pointer_rtx
,
9174 STACK_POINTER_OFFSET
));
9175 RTX_FRAME_RELATED_P (insn
) = 1;
9178 s390_restore_gprs_from_fprs ();
9183 /* Return to caller. */
9185 p
= rtvec_alloc (2);
9187 RTVEC_ELT (p
, 0) = ret_rtx
;
9188 RTVEC_ELT (p
, 1) = gen_rtx_USE (VOIDmode
, return_reg
);
9189 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
9193 /* Implement TARGET_SET_UP_BY_PROLOGUE. */
9196 s300_set_up_by_prologue (hard_reg_set_container
*regs
)
9198 if (cfun
->machine
->base_reg
9199 && !call_really_used_regs
[REGNO (cfun
->machine
->base_reg
)])
9200 SET_HARD_REG_BIT (regs
->set
, REGNO (cfun
->machine
->base_reg
));
9203 /* Return true if the function can use simple_return to return outside
9204 of a shrink-wrapped region. At present shrink-wrapping is supported
9208 s390_can_use_simple_return_insn (void)
9213 /* Return true if the epilogue is guaranteed to contain only a return
9214 instruction and if a direct return can therefore be used instead.
9215 One of the main advantages of using direct return instructions
9216 is that we can then use conditional returns. */
9219 s390_can_use_return_insn (void)
9223 if (!reload_completed
)
9229 if (TARGET_TPF_PROFILING
)
9232 for (i
= 0; i
< 16; i
++)
9233 if (cfun_gpr_save_slot (i
))
9236 /* For 31 bit this is not covered by the frame_size check below
9237 since f4, f6 are saved in the register save area without needing
9238 additional stack space. */
9240 && (cfun_fpr_save_p (FPR4_REGNUM
) || cfun_fpr_save_p (FPR6_REGNUM
)))
9243 if (cfun
->machine
->base_reg
9244 && !call_really_used_regs
[REGNO (cfun
->machine
->base_reg
)])
9247 return cfun_frame_layout
.frame_size
== 0;
9250 /* Return the size in bytes of a function argument of
9251 type TYPE and/or mode MODE. At least one of TYPE or
9252 MODE must be specified. */
9255 s390_function_arg_size (machine_mode mode
, const_tree type
)
9258 return int_size_in_bytes (type
);
9260 /* No type info available for some library calls ... */
9261 if (mode
!= BLKmode
)
9262 return GET_MODE_SIZE (mode
);
9264 /* If we have neither type nor mode, abort */
9268 /* Return true if a function argument of type TYPE and mode MODE
9269 is to be passed in a floating-point register, if available. */
9272 s390_function_arg_float (machine_mode mode
, const_tree type
)
9274 int size
= s390_function_arg_size (mode
, type
);
9278 /* Soft-float changes the ABI: no floating-point registers are used. */
9279 if (TARGET_SOFT_FLOAT
)
9282 /* No type info available for some library calls ... */
9284 return mode
== SFmode
|| mode
== DFmode
|| mode
== SDmode
|| mode
== DDmode
;
9286 /* The ABI says that record types with a single member are treated
9287 just like that member would be. */
9288 while (TREE_CODE (type
) == RECORD_TYPE
)
9290 tree field
, single
= NULL_TREE
;
9292 for (field
= TYPE_FIELDS (type
); field
; field
= DECL_CHAIN (field
))
9294 if (TREE_CODE (field
) != FIELD_DECL
)
9297 if (single
== NULL_TREE
)
9298 single
= TREE_TYPE (field
);
9303 if (single
== NULL_TREE
)
9309 return TREE_CODE (type
) == REAL_TYPE
;
9312 /* Return true if a function argument of type TYPE and mode MODE
9313 is to be passed in an integer register, or a pair of integer
9314 registers, if available. */
9317 s390_function_arg_integer (machine_mode mode
, const_tree type
)
9319 int size
= s390_function_arg_size (mode
, type
);
9323 /* No type info available for some library calls ... */
9325 return GET_MODE_CLASS (mode
) == MODE_INT
9326 || (TARGET_SOFT_FLOAT
&& SCALAR_FLOAT_MODE_P (mode
));
9328 /* We accept small integral (and similar) types. */
9329 if (INTEGRAL_TYPE_P (type
)
9330 || POINTER_TYPE_P (type
)
9331 || TREE_CODE (type
) == NULLPTR_TYPE
9332 || TREE_CODE (type
) == OFFSET_TYPE
9333 || (TARGET_SOFT_FLOAT
&& TREE_CODE (type
) == REAL_TYPE
))
9336 /* We also accept structs of size 1, 2, 4, 8 that are not
9337 passed in floating-point registers. */
9338 if (AGGREGATE_TYPE_P (type
)
9339 && exact_log2 (size
) >= 0
9340 && !s390_function_arg_float (mode
, type
))
9346 /* Return 1 if a function argument of type TYPE and mode MODE
9347 is to be passed by reference. The ABI specifies that only
9348 structures of size 1, 2, 4, or 8 bytes are passed by value,
9349 all other structures (and complex numbers) are passed by
9353 s390_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED
,
9354 machine_mode mode
, const_tree type
,
9355 bool named ATTRIBUTE_UNUSED
)
9357 int size
= s390_function_arg_size (mode
, type
);
9363 if (AGGREGATE_TYPE_P (type
) && exact_log2 (size
) < 0)
9366 if (TREE_CODE (type
) == COMPLEX_TYPE
9367 || TREE_CODE (type
) == VECTOR_TYPE
)
9374 /* Update the data in CUM to advance over an argument of mode MODE and
9375 data type TYPE. (TYPE is null for libcalls where that information
9376 may not be available.). The boolean NAMED specifies whether the
9377 argument is a named argument (as opposed to an unnamed argument
9378 matching an ellipsis). */
9381 s390_function_arg_advance (cumulative_args_t cum_v
, machine_mode mode
,
9382 const_tree type
, bool named ATTRIBUTE_UNUSED
)
9384 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
9386 if (s390_function_arg_float (mode
, type
))
9390 else if (s390_function_arg_integer (mode
, type
))
9392 int size
= s390_function_arg_size (mode
, type
);
9393 cum
->gprs
+= ((size
+ UNITS_PER_LONG
- 1) / UNITS_PER_LONG
);
9399 /* Define where to put the arguments to a function.
9400 Value is zero to push the argument on the stack,
9401 or a hard register in which to store the argument.
9403 MODE is the argument's machine mode.
9404 TYPE is the data type of the argument (as a tree).
9405 This is null for libcalls where that information may
9407 CUM is a variable of type CUMULATIVE_ARGS which gives info about
9408 the preceding args and about the function being called.
9409 NAMED is nonzero if this argument is a named parameter
9410 (otherwise it is an extra parameter matching an ellipsis).
9412 On S/390, we use general purpose registers 2 through 6 to
9413 pass integer, pointer, and certain structure arguments, and
9414 floating point registers 0 and 2 (0, 2, 4, and 6 on 64-bit)
9415 to pass floating point arguments. All remaining arguments
9416 are pushed to the stack. */
9419 s390_function_arg (cumulative_args_t cum_v
, machine_mode mode
,
9420 const_tree type
, bool named ATTRIBUTE_UNUSED
)
9422 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
9424 if (s390_function_arg_float (mode
, type
))
9426 if (cum
->fprs
+ 1 > FP_ARG_NUM_REG
)
9429 return gen_rtx_REG (mode
, cum
->fprs
+ 16);
9431 else if (s390_function_arg_integer (mode
, type
))
9433 int size
= s390_function_arg_size (mode
, type
);
9434 int n_gprs
= (size
+ UNITS_PER_LONG
- 1) / UNITS_PER_LONG
;
9436 if (cum
->gprs
+ n_gprs
> GP_ARG_NUM_REG
)
9438 else if (n_gprs
== 1 || UNITS_PER_WORD
== UNITS_PER_LONG
)
9439 return gen_rtx_REG (mode
, cum
->gprs
+ 2);
9440 else if (n_gprs
== 2)
9442 rtvec p
= rtvec_alloc (2);
9445 = gen_rtx_EXPR_LIST (SImode
, gen_rtx_REG (SImode
, cum
->gprs
+ 2),
9448 = gen_rtx_EXPR_LIST (SImode
, gen_rtx_REG (SImode
, cum
->gprs
+ 3),
9451 return gen_rtx_PARALLEL (mode
, p
);
9455 /* After the real arguments, expand_call calls us once again
9456 with a void_type_node type. Whatever we return here is
9457 passed as operand 2 to the call expanders.
9459 We don't need this feature ... */
9460 else if (type
== void_type_node
)
9466 /* Return true if return values of type TYPE should be returned
9467 in a memory buffer whose address is passed by the caller as
9468 hidden first argument. */
9471 s390_return_in_memory (const_tree type
, const_tree fundecl ATTRIBUTE_UNUSED
)
9473 /* We accept small integral (and similar) types. */
9474 if (INTEGRAL_TYPE_P (type
)
9475 || POINTER_TYPE_P (type
)
9476 || TREE_CODE (type
) == OFFSET_TYPE
9477 || TREE_CODE (type
) == REAL_TYPE
)
9478 return int_size_in_bytes (type
) > 8;
9480 /* Aggregates and similar constructs are always returned
9482 if (AGGREGATE_TYPE_P (type
)
9483 || TREE_CODE (type
) == COMPLEX_TYPE
9484 || TREE_CODE (type
) == VECTOR_TYPE
)
9487 /* ??? We get called on all sorts of random stuff from
9488 aggregate_value_p. We can't abort, but it's not clear
9489 what's safe to return. Pretend it's a struct I guess. */
9493 /* Function arguments and return values are promoted to word size. */
9496 s390_promote_function_mode (const_tree type
, machine_mode mode
,
9498 const_tree fntype ATTRIBUTE_UNUSED
,
9499 int for_return ATTRIBUTE_UNUSED
)
9501 if (INTEGRAL_MODE_P (mode
)
9502 && GET_MODE_SIZE (mode
) < UNITS_PER_LONG
)
9504 if (type
!= NULL_TREE
&& POINTER_TYPE_P (type
))
9505 *punsignedp
= POINTERS_EXTEND_UNSIGNED
;
9512 /* Define where to return a (scalar) value of type RET_TYPE.
9513 If RET_TYPE is null, define where to return a (scalar)
9514 value of mode MODE from a libcall. */
9517 s390_function_and_libcall_value (machine_mode mode
,
9518 const_tree ret_type
,
9519 const_tree fntype_or_decl
,
9520 bool outgoing ATTRIBUTE_UNUSED
)
9522 /* For normal functions perform the promotion as
9523 promote_function_mode would do. */
9526 int unsignedp
= TYPE_UNSIGNED (ret_type
);
9527 mode
= promote_function_mode (ret_type
, mode
, &unsignedp
,
9531 gcc_assert (GET_MODE_CLASS (mode
) == MODE_INT
|| SCALAR_FLOAT_MODE_P (mode
));
9532 gcc_assert (GET_MODE_SIZE (mode
) <= 8);
9534 if (TARGET_HARD_FLOAT
&& SCALAR_FLOAT_MODE_P (mode
))
9535 return gen_rtx_REG (mode
, 16);
9536 else if (GET_MODE_SIZE (mode
) <= UNITS_PER_LONG
9537 || UNITS_PER_LONG
== UNITS_PER_WORD
)
9538 return gen_rtx_REG (mode
, 2);
9539 else if (GET_MODE_SIZE (mode
) == 2 * UNITS_PER_LONG
)
9541 /* This case is triggered when returning a 64 bit value with
9542 -m31 -mzarch. Although the value would fit into a single
9543 register it has to be forced into a 32 bit register pair in
9544 order to match the ABI. */
9545 rtvec p
= rtvec_alloc (2);
9548 = gen_rtx_EXPR_LIST (SImode
, gen_rtx_REG (SImode
, 2), const0_rtx
);
9550 = gen_rtx_EXPR_LIST (SImode
, gen_rtx_REG (SImode
, 3), GEN_INT (4));
9552 return gen_rtx_PARALLEL (mode
, p
);
9558 /* Define where to return a scalar return value of type RET_TYPE. */
9561 s390_function_value (const_tree ret_type
, const_tree fn_decl_or_type
,
9564 return s390_function_and_libcall_value (TYPE_MODE (ret_type
), ret_type
,
9565 fn_decl_or_type
, outgoing
);
9568 /* Define where to return a scalar libcall return value of mode
9572 s390_libcall_value (machine_mode mode
, const_rtx fun ATTRIBUTE_UNUSED
)
9574 return s390_function_and_libcall_value (mode
, NULL_TREE
,
9579 /* Create and return the va_list datatype.
9581 On S/390, va_list is an array type equivalent to
9583 typedef struct __va_list_tag
9587 void *__overflow_arg_area;
9588 void *__reg_save_area;
9591 where __gpr and __fpr hold the number of general purpose
9592 or floating point arguments used up to now, respectively,
9593 __overflow_arg_area points to the stack location of the
9594 next argument passed on the stack, and __reg_save_area
9595 always points to the start of the register area in the
9596 call frame of the current function. The function prologue
9597 saves all registers used for argument passing into this
9598 area if the function uses variable arguments. */
9601 s390_build_builtin_va_list (void)
9603 tree f_gpr
, f_fpr
, f_ovf
, f_sav
, record
, type_decl
;
9605 record
= lang_hooks
.types
.make_type (RECORD_TYPE
);
9608 build_decl (BUILTINS_LOCATION
,
9609 TYPE_DECL
, get_identifier ("__va_list_tag"), record
);
9611 f_gpr
= build_decl (BUILTINS_LOCATION
,
9612 FIELD_DECL
, get_identifier ("__gpr"),
9613 long_integer_type_node
);
9614 f_fpr
= build_decl (BUILTINS_LOCATION
,
9615 FIELD_DECL
, get_identifier ("__fpr"),
9616 long_integer_type_node
);
9617 f_ovf
= build_decl (BUILTINS_LOCATION
,
9618 FIELD_DECL
, get_identifier ("__overflow_arg_area"),
9620 f_sav
= build_decl (BUILTINS_LOCATION
,
9621 FIELD_DECL
, get_identifier ("__reg_save_area"),
9624 va_list_gpr_counter_field
= f_gpr
;
9625 va_list_fpr_counter_field
= f_fpr
;
9627 DECL_FIELD_CONTEXT (f_gpr
) = record
;
9628 DECL_FIELD_CONTEXT (f_fpr
) = record
;
9629 DECL_FIELD_CONTEXT (f_ovf
) = record
;
9630 DECL_FIELD_CONTEXT (f_sav
) = record
;
9632 TYPE_STUB_DECL (record
) = type_decl
;
9633 TYPE_NAME (record
) = type_decl
;
9634 TYPE_FIELDS (record
) = f_gpr
;
9635 DECL_CHAIN (f_gpr
) = f_fpr
;
9636 DECL_CHAIN (f_fpr
) = f_ovf
;
9637 DECL_CHAIN (f_ovf
) = f_sav
;
9639 layout_type (record
);
9641 /* The correct type is an array type of one element. */
9642 return build_array_type (record
, build_index_type (size_zero_node
));
9645 /* Implement va_start by filling the va_list structure VALIST.
9646 STDARG_P is always true, and ignored.
9647 NEXTARG points to the first anonymous stack argument.
9649 The following global variables are used to initialize
9650 the va_list structure:
9653 holds number of gprs and fprs used for named arguments.
9654 crtl->args.arg_offset_rtx:
9655 holds the offset of the first anonymous stack argument
9656 (relative to the virtual arg pointer). */
9659 s390_va_start (tree valist
, rtx nextarg ATTRIBUTE_UNUSED
)
9661 HOST_WIDE_INT n_gpr
, n_fpr
;
9663 tree f_gpr
, f_fpr
, f_ovf
, f_sav
;
9664 tree gpr
, fpr
, ovf
, sav
, t
;
9666 f_gpr
= TYPE_FIELDS (TREE_TYPE (va_list_type_node
));
9667 f_fpr
= DECL_CHAIN (f_gpr
);
9668 f_ovf
= DECL_CHAIN (f_fpr
);
9669 f_sav
= DECL_CHAIN (f_ovf
);
9671 valist
= build_simple_mem_ref (valist
);
9672 gpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_gpr
), valist
, f_gpr
, NULL_TREE
);
9673 fpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_fpr
), valist
, f_fpr
, NULL_TREE
);
9674 ovf
= build3 (COMPONENT_REF
, TREE_TYPE (f_ovf
), valist
, f_ovf
, NULL_TREE
);
9675 sav
= build3 (COMPONENT_REF
, TREE_TYPE (f_sav
), valist
, f_sav
, NULL_TREE
);
9677 /* Count number of gp and fp argument registers used. */
9679 n_gpr
= crtl
->args
.info
.gprs
;
9680 n_fpr
= crtl
->args
.info
.fprs
;
9682 if (cfun
->va_list_gpr_size
)
9684 t
= build2 (MODIFY_EXPR
, TREE_TYPE (gpr
), gpr
,
9685 build_int_cst (NULL_TREE
, n_gpr
));
9686 TREE_SIDE_EFFECTS (t
) = 1;
9687 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
9690 if (cfun
->va_list_fpr_size
)
9692 t
= build2 (MODIFY_EXPR
, TREE_TYPE (fpr
), fpr
,
9693 build_int_cst (NULL_TREE
, n_fpr
));
9694 TREE_SIDE_EFFECTS (t
) = 1;
9695 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
9698 /* Find the overflow area. */
9699 if (n_gpr
+ cfun
->va_list_gpr_size
> GP_ARG_NUM_REG
9700 || n_fpr
+ cfun
->va_list_fpr_size
> FP_ARG_NUM_REG
)
9702 t
= make_tree (TREE_TYPE (ovf
), virtual_incoming_args_rtx
);
9704 off
= INTVAL (crtl
->args
.arg_offset_rtx
);
9705 off
= off
< 0 ? 0 : off
;
9706 if (TARGET_DEBUG_ARG
)
9707 fprintf (stderr
, "va_start: n_gpr = %d, n_fpr = %d off %d\n",
9708 (int)n_gpr
, (int)n_fpr
, off
);
9710 t
= fold_build_pointer_plus_hwi (t
, off
);
9712 t
= build2 (MODIFY_EXPR
, TREE_TYPE (ovf
), ovf
, t
);
9713 TREE_SIDE_EFFECTS (t
) = 1;
9714 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
9717 /* Find the register save area. */
9718 if ((cfun
->va_list_gpr_size
&& n_gpr
< GP_ARG_NUM_REG
)
9719 || (cfun
->va_list_fpr_size
&& n_fpr
< FP_ARG_NUM_REG
))
9721 t
= make_tree (TREE_TYPE (sav
), return_address_pointer_rtx
);
9722 t
= fold_build_pointer_plus_hwi (t
, -RETURN_REGNUM
* UNITS_PER_LONG
);
9724 t
= build2 (MODIFY_EXPR
, TREE_TYPE (sav
), sav
, t
);
9725 TREE_SIDE_EFFECTS (t
) = 1;
9726 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
9730 /* Implement va_arg by updating the va_list structure
9731 VALIST as required to retrieve an argument of type
9732 TYPE, and returning that argument.
9734 Generates code equivalent to:
9736 if (integral value) {
9737 if (size <= 4 && args.gpr < 5 ||
9738 size > 4 && args.gpr < 4 )
9739 ret = args.reg_save_area[args.gpr+8]
9741 ret = *args.overflow_arg_area++;
9742 } else if (float value) {
9744 ret = args.reg_save_area[args.fpr+64]
9746 ret = *args.overflow_arg_area++;
9747 } else if (aggregate value) {
9749 ret = *args.reg_save_area[args.gpr]
9751 ret = **args.overflow_arg_area++;
9755 s390_gimplify_va_arg (tree valist
, tree type
, gimple_seq
*pre_p
,
9756 gimple_seq
*post_p ATTRIBUTE_UNUSED
)
9758 tree f_gpr
, f_fpr
, f_ovf
, f_sav
;
9759 tree gpr
, fpr
, ovf
, sav
, reg
, t
, u
;
9760 int indirect_p
, size
, n_reg
, sav_ofs
, sav_scale
, max_reg
;
9761 tree lab_false
, lab_over
, addr
;
9763 f_gpr
= TYPE_FIELDS (TREE_TYPE (va_list_type_node
));
9764 f_fpr
= DECL_CHAIN (f_gpr
);
9765 f_ovf
= DECL_CHAIN (f_fpr
);
9766 f_sav
= DECL_CHAIN (f_ovf
);
9768 valist
= build_va_arg_indirect_ref (valist
);
9769 gpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_gpr
), valist
, f_gpr
, NULL_TREE
);
9770 fpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_fpr
), valist
, f_fpr
, NULL_TREE
);
9771 sav
= build3 (COMPONENT_REF
, TREE_TYPE (f_sav
), valist
, f_sav
, NULL_TREE
);
9773 /* The tree for args* cannot be shared between gpr/fpr and ovf since
9774 both appear on a lhs. */
9775 valist
= unshare_expr (valist
);
9776 ovf
= build3 (COMPONENT_REF
, TREE_TYPE (f_ovf
), valist
, f_ovf
, NULL_TREE
);
9778 size
= int_size_in_bytes (type
);
9780 if (pass_by_reference (NULL
, TYPE_MODE (type
), type
, false))
9782 if (TARGET_DEBUG_ARG
)
9784 fprintf (stderr
, "va_arg: aggregate type");
9788 /* Aggregates are passed by reference. */
9793 /* kernel stack layout on 31 bit: It is assumed here that no padding
9794 will be added by s390_frame_info because for va_args always an even
9795 number of gprs has to be saved r15-r2 = 14 regs. */
9796 sav_ofs
= 2 * UNITS_PER_LONG
;
9797 sav_scale
= UNITS_PER_LONG
;
9798 size
= UNITS_PER_LONG
;
9799 max_reg
= GP_ARG_NUM_REG
- n_reg
;
9801 else if (s390_function_arg_float (TYPE_MODE (type
), type
))
9803 if (TARGET_DEBUG_ARG
)
9805 fprintf (stderr
, "va_arg: float type");
9809 /* FP args go in FP registers, if present. */
9813 sav_ofs
= 16 * UNITS_PER_LONG
;
9815 max_reg
= FP_ARG_NUM_REG
- n_reg
;
9819 if (TARGET_DEBUG_ARG
)
9821 fprintf (stderr
, "va_arg: other type");
9825 /* Otherwise into GP registers. */
9828 n_reg
= (size
+ UNITS_PER_LONG
- 1) / UNITS_PER_LONG
;
9830 /* kernel stack layout on 31 bit: It is assumed here that no padding
9831 will be added by s390_frame_info because for va_args always an even
9832 number of gprs has to be saved r15-r2 = 14 regs. */
9833 sav_ofs
= 2 * UNITS_PER_LONG
;
9835 if (size
< UNITS_PER_LONG
)
9836 sav_ofs
+= UNITS_PER_LONG
- size
;
9838 sav_scale
= UNITS_PER_LONG
;
9839 max_reg
= GP_ARG_NUM_REG
- n_reg
;
9842 /* Pull the value out of the saved registers ... */
9844 lab_false
= create_artificial_label (UNKNOWN_LOCATION
);
9845 lab_over
= create_artificial_label (UNKNOWN_LOCATION
);
9846 addr
= create_tmp_var (ptr_type_node
, "addr");
9848 t
= fold_convert (TREE_TYPE (reg
), size_int (max_reg
));
9849 t
= build2 (GT_EXPR
, boolean_type_node
, reg
, t
);
9850 u
= build1 (GOTO_EXPR
, void_type_node
, lab_false
);
9851 t
= build3 (COND_EXPR
, void_type_node
, t
, u
, NULL_TREE
);
9852 gimplify_and_add (t
, pre_p
);
9854 t
= fold_build_pointer_plus_hwi (sav
, sav_ofs
);
9855 u
= build2 (MULT_EXPR
, TREE_TYPE (reg
), reg
,
9856 fold_convert (TREE_TYPE (reg
), size_int (sav_scale
)));
9857 t
= fold_build_pointer_plus (t
, u
);
9859 gimplify_assign (addr
, t
, pre_p
);
9861 gimple_seq_add_stmt (pre_p
, gimple_build_goto (lab_over
));
9863 gimple_seq_add_stmt (pre_p
, gimple_build_label (lab_false
));
9866 /* ... Otherwise out of the overflow area. */
9869 if (size
< UNITS_PER_LONG
)
9870 t
= fold_build_pointer_plus_hwi (t
, UNITS_PER_LONG
- size
);
9872 gimplify_expr (&t
, pre_p
, NULL
, is_gimple_val
, fb_rvalue
);
9874 gimplify_assign (addr
, t
, pre_p
);
9876 t
= fold_build_pointer_plus_hwi (t
, size
);
9877 gimplify_assign (ovf
, t
, pre_p
);
9879 gimple_seq_add_stmt (pre_p
, gimple_build_label (lab_over
));
9882 /* Increment register save count. */
9884 u
= build2 (PREINCREMENT_EXPR
, TREE_TYPE (reg
), reg
,
9885 fold_convert (TREE_TYPE (reg
), size_int (n_reg
)));
9886 gimplify_and_add (u
, pre_p
);
9890 t
= build_pointer_type_for_mode (build_pointer_type (type
),
9892 addr
= fold_convert (t
, addr
);
9893 addr
= build_va_arg_indirect_ref (addr
);
9897 t
= build_pointer_type_for_mode (type
, ptr_mode
, true);
9898 addr
= fold_convert (t
, addr
);
9901 return build_va_arg_indirect_ref (addr
);
9904 /* Emit rtl for the tbegin or tbegin_retry (RETRY != NULL_RTX)
9906 DEST - Register location where CC will be stored.
9907 TDB - Pointer to a 256 byte area where to store the transaction.
9908 diagnostic block. NULL if TDB is not needed.
9909 RETRY - Retry count value. If non-NULL a retry loop for CC2
9911 CLOBBER_FPRS_P - If true clobbers for all FPRs are emitted as part
9912 of the tbegin instruction pattern. */
9915 s390_expand_tbegin (rtx dest
, rtx tdb
, rtx retry
, bool clobber_fprs_p
)
9917 rtx retry_plus_two
= gen_reg_rtx (SImode
);
9918 rtx retry_reg
= gen_reg_rtx (SImode
);
9919 rtx_code_label
*retry_label
= NULL
;
9921 if (retry
!= NULL_RTX
)
9923 emit_move_insn (retry_reg
, retry
);
9924 emit_insn (gen_addsi3 (retry_plus_two
, retry_reg
, const2_rtx
));
9925 emit_insn (gen_addsi3 (retry_reg
, retry_reg
, const1_rtx
));
9926 retry_label
= gen_label_rtx ();
9927 emit_label (retry_label
);
9931 emit_insn (gen_tbegin_1 (gen_rtx_CONST_INT (VOIDmode
, TBEGIN_MASK
), tdb
));
9933 emit_insn (gen_tbegin_nofloat_1 (gen_rtx_CONST_INT (VOIDmode
, TBEGIN_MASK
),
9936 emit_move_insn (dest
, gen_rtx_UNSPEC (SImode
,
9937 gen_rtvec (1, gen_rtx_REG (CCRAWmode
,
9940 if (retry
!= NULL_RTX
)
9942 const int CC0
= 1 << 3;
9943 const int CC1
= 1 << 2;
9944 const int CC3
= 1 << 0;
9946 rtx count
= gen_reg_rtx (SImode
);
9947 rtx_code_label
*leave_label
= gen_label_rtx ();
9949 /* Exit for success and permanent failures. */
9950 jump
= s390_emit_jump (leave_label
,
9951 gen_rtx_EQ (VOIDmode
,
9952 gen_rtx_REG (CCRAWmode
, CC_REGNUM
),
9953 gen_rtx_CONST_INT (VOIDmode
, CC0
| CC1
| CC3
)));
9954 LABEL_NUSES (leave_label
) = 1;
9956 /* CC2 - transient failure. Perform retry with ppa. */
9957 emit_move_insn (count
, retry_plus_two
);
9958 emit_insn (gen_subsi3 (count
, count
, retry_reg
));
9959 emit_insn (gen_tx_assist (count
));
9960 jump
= emit_jump_insn (gen_doloop_si64 (retry_label
,
9963 JUMP_LABEL (jump
) = retry_label
;
9964 LABEL_NUSES (retry_label
) = 1;
9965 emit_label (leave_label
);
9973 S390_BUILTIN_TBEGIN
,
9974 S390_BUILTIN_TBEGIN_NOFLOAT
,
9975 S390_BUILTIN_TBEGIN_RETRY
,
9976 S390_BUILTIN_TBEGIN_RETRY_NOFLOAT
,
9977 S390_BUILTIN_TBEGINC
,
9979 S390_BUILTIN_TABORT
,
9980 S390_BUILTIN_NON_TX_STORE
,
9981 S390_BUILTIN_TX_NESTING_DEPTH
,
9982 S390_BUILTIN_TX_ASSIST
,
9987 static enum insn_code
const code_for_builtin
[S390_BUILTIN_max
] = {
9989 CODE_FOR_tbegin_nofloat
,
9990 CODE_FOR_tbegin_retry
,
9991 CODE_FOR_tbegin_retry_nofloat
,
10001 s390_init_builtins (void)
10003 tree ftype
, uint64_type
;
10004 tree returns_twice_attr
= tree_cons (get_identifier ("returns_twice"),
10006 tree noreturn_attr
= tree_cons (get_identifier ("noreturn"), NULL
, NULL
);
10008 /* void foo (void) */
10009 ftype
= build_function_type_list (void_type_node
, NULL_TREE
);
10010 add_builtin_function ("__builtin_tbeginc", ftype
, S390_BUILTIN_TBEGINC
,
10011 BUILT_IN_MD
, NULL
, NULL_TREE
);
10013 /* void foo (int) */
10014 ftype
= build_function_type_list (void_type_node
, integer_type_node
,
10016 add_builtin_function ("__builtin_tabort", ftype
,
10017 S390_BUILTIN_TABORT
, BUILT_IN_MD
, NULL
, noreturn_attr
);
10018 add_builtin_function ("__builtin_tx_assist", ftype
,
10019 S390_BUILTIN_TX_ASSIST
, BUILT_IN_MD
, NULL
, NULL_TREE
);
10021 /* int foo (void *) */
10022 ftype
= build_function_type_list (integer_type_node
, ptr_type_node
, NULL_TREE
);
10023 add_builtin_function ("__builtin_tbegin", ftype
, S390_BUILTIN_TBEGIN
,
10024 BUILT_IN_MD
, NULL
, returns_twice_attr
);
10025 add_builtin_function ("__builtin_tbegin_nofloat", ftype
,
10026 S390_BUILTIN_TBEGIN_NOFLOAT
,
10027 BUILT_IN_MD
, NULL
, returns_twice_attr
);
10029 /* int foo (void *, int) */
10030 ftype
= build_function_type_list (integer_type_node
, ptr_type_node
,
10031 integer_type_node
, NULL_TREE
);
10032 add_builtin_function ("__builtin_tbegin_retry", ftype
,
10033 S390_BUILTIN_TBEGIN_RETRY
,
10035 NULL
, returns_twice_attr
);
10036 add_builtin_function ("__builtin_tbegin_retry_nofloat", ftype
,
10037 S390_BUILTIN_TBEGIN_RETRY_NOFLOAT
,
10039 NULL
, returns_twice_attr
);
10041 /* int foo (void) */
10042 ftype
= build_function_type_list (integer_type_node
, NULL_TREE
);
10043 add_builtin_function ("__builtin_tx_nesting_depth", ftype
,
10044 S390_BUILTIN_TX_NESTING_DEPTH
,
10045 BUILT_IN_MD
, NULL
, NULL_TREE
);
10046 add_builtin_function ("__builtin_tend", ftype
,
10047 S390_BUILTIN_TEND
, BUILT_IN_MD
, NULL
, NULL_TREE
);
10049 /* void foo (uint64_t *, uint64_t) */
10051 uint64_type
= long_unsigned_type_node
;
10053 uint64_type
= long_long_unsigned_type_node
;
10055 ftype
= build_function_type_list (void_type_node
,
10056 build_pointer_type (uint64_type
),
10057 uint64_type
, NULL_TREE
);
10058 add_builtin_function ("__builtin_non_tx_store", ftype
,
10059 S390_BUILTIN_NON_TX_STORE
,
10060 BUILT_IN_MD
, NULL
, NULL_TREE
);
10063 /* Expand an expression EXP that calls a built-in function,
10064 with result going to TARGET if that's convenient
10065 (and in mode MODE if that's convenient).
10066 SUBTARGET may be used as the target for computing one of EXP's operands.
10067 IGNORE is nonzero if the value is to be ignored. */
10070 s390_expand_builtin (tree exp
, rtx target
, rtx subtarget ATTRIBUTE_UNUSED
,
10071 machine_mode mode ATTRIBUTE_UNUSED
,
10072 int ignore ATTRIBUTE_UNUSED
)
10076 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
10077 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
10078 enum insn_code icode
;
10079 rtx op
[MAX_ARGS
], pat
;
10083 call_expr_arg_iterator iter
;
10085 if (fcode
>= S390_BUILTIN_max
)
10086 internal_error ("bad builtin fcode");
10087 icode
= code_for_builtin
[fcode
];
10089 internal_error ("bad builtin fcode");
10092 error ("Transactional execution builtins not enabled (-mhtm)\n");
10094 /* Set a flag in the machine specific cfun part in order to support
10095 saving/restoring of FPRs. */
10096 if (fcode
== S390_BUILTIN_TBEGIN
|| fcode
== S390_BUILTIN_TBEGIN_RETRY
)
10097 cfun
->machine
->tbegin_p
= true;
10099 nonvoid
= TREE_TYPE (TREE_TYPE (fndecl
)) != void_type_node
;
10102 FOR_EACH_CALL_EXPR_ARG (arg
, iter
, exp
)
10104 const struct insn_operand_data
*insn_op
;
10106 if (arg
== error_mark_node
)
10108 if (arity
>= MAX_ARGS
)
10111 insn_op
= &insn_data
[icode
].operand
[arity
+ nonvoid
];
10113 op
[arity
] = expand_expr (arg
, NULL_RTX
, insn_op
->mode
, EXPAND_NORMAL
);
10115 if (!(*insn_op
->predicate
) (op
[arity
], insn_op
->mode
))
10117 if (insn_op
->predicate
== memory_operand
)
10119 /* Don't move a NULL pointer into a register. Otherwise
10120 we have to rely on combine being able to move it back
10121 in order to get an immediate 0 in the instruction. */
10122 if (op
[arity
] != const0_rtx
)
10123 op
[arity
] = copy_to_mode_reg (Pmode
, op
[arity
]);
10124 op
[arity
] = gen_rtx_MEM (insn_op
->mode
, op
[arity
]);
10127 op
[arity
] = copy_to_mode_reg (insn_op
->mode
, op
[arity
]);
10135 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
10137 || GET_MODE (target
) != tmode
10138 || !(*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
10139 target
= gen_reg_rtx (tmode
);
10145 pat
= GEN_FCN (icode
) (target
);
10149 pat
= GEN_FCN (icode
) (target
, op
[0]);
10151 pat
= GEN_FCN (icode
) (op
[0]);
10155 pat
= GEN_FCN (icode
) (target
, op
[0], op
[1]);
10157 pat
= GEN_FCN (icode
) (op
[0], op
[1]);
10160 gcc_unreachable ();
10172 /* We call mcount before the function prologue. So a profiled leaf
10173 function should stay a leaf function. */
10176 s390_keep_leaf_when_profiled ()
10181 /* Output assembly code for the trampoline template to
10184 On S/390, we use gpr 1 internally in the trampoline code;
10185 gpr 0 is used to hold the static chain. */
10188 s390_asm_trampoline_template (FILE *file
)
10191 op
[0] = gen_rtx_REG (Pmode
, 0);
10192 op
[1] = gen_rtx_REG (Pmode
, 1);
10196 output_asm_insn ("basr\t%1,0", op
); /* 2 byte */
10197 output_asm_insn ("lmg\t%0,%1,14(%1)", op
); /* 6 byte */
10198 output_asm_insn ("br\t%1", op
); /* 2 byte */
10199 ASM_OUTPUT_SKIP (file
, (HOST_WIDE_INT
)(TRAMPOLINE_SIZE
- 10));
10203 output_asm_insn ("basr\t%1,0", op
); /* 2 byte */
10204 output_asm_insn ("lm\t%0,%1,6(%1)", op
); /* 4 byte */
10205 output_asm_insn ("br\t%1", op
); /* 2 byte */
10206 ASM_OUTPUT_SKIP (file
, (HOST_WIDE_INT
)(TRAMPOLINE_SIZE
- 8));
10210 /* Emit RTL insns to initialize the variable parts of a trampoline.
10211 FNADDR is an RTX for the address of the function's pure code.
10212 CXT is an RTX for the static chain value for the function. */
10215 s390_trampoline_init (rtx m_tramp
, tree fndecl
, rtx cxt
)
10217 rtx fnaddr
= XEXP (DECL_RTL (fndecl
), 0);
10220 emit_block_move (m_tramp
, assemble_trampoline_template (),
10221 GEN_INT (2 * UNITS_PER_LONG
), BLOCK_OP_NORMAL
);
10223 mem
= adjust_address (m_tramp
, Pmode
, 2 * UNITS_PER_LONG
);
10224 emit_move_insn (mem
, cxt
);
10225 mem
= adjust_address (m_tramp
, Pmode
, 3 * UNITS_PER_LONG
);
10226 emit_move_insn (mem
, fnaddr
);
10229 /* Output assembler code to FILE to increment profiler label # LABELNO
10230 for profiling a function entry. */
10233 s390_function_profiler (FILE *file
, int labelno
)
10238 ASM_GENERATE_INTERNAL_LABEL (label
, "LP", labelno
);
10240 fprintf (file
, "# function profiler \n");
10242 op
[0] = gen_rtx_REG (Pmode
, RETURN_REGNUM
);
10243 op
[1] = gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
10244 op
[1] = gen_rtx_MEM (Pmode
, plus_constant (Pmode
, op
[1], UNITS_PER_LONG
));
10246 op
[2] = gen_rtx_REG (Pmode
, 1);
10247 op
[3] = gen_rtx_SYMBOL_REF (Pmode
, label
);
10248 SYMBOL_REF_FLAGS (op
[3]) = SYMBOL_FLAG_LOCAL
;
10250 op
[4] = gen_rtx_SYMBOL_REF (Pmode
, "_mcount");
10253 op
[4] = gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, op
[4]), UNSPEC_PLT
);
10254 op
[4] = gen_rtx_CONST (Pmode
, op
[4]);
10259 output_asm_insn ("stg\t%0,%1", op
);
10260 output_asm_insn ("larl\t%2,%3", op
);
10261 output_asm_insn ("brasl\t%0,%4", op
);
10262 output_asm_insn ("lg\t%0,%1", op
);
10264 else if (!flag_pic
)
10266 op
[6] = gen_label_rtx ();
10268 output_asm_insn ("st\t%0,%1", op
);
10269 output_asm_insn ("bras\t%2,%l6", op
);
10270 output_asm_insn (".long\t%4", op
);
10271 output_asm_insn (".long\t%3", op
);
10272 targetm
.asm_out
.internal_label (file
, "L", CODE_LABEL_NUMBER (op
[6]));
10273 output_asm_insn ("l\t%0,0(%2)", op
);
10274 output_asm_insn ("l\t%2,4(%2)", op
);
10275 output_asm_insn ("basr\t%0,%0", op
);
10276 output_asm_insn ("l\t%0,%1", op
);
10280 op
[5] = gen_label_rtx ();
10281 op
[6] = gen_label_rtx ();
10283 output_asm_insn ("st\t%0,%1", op
);
10284 output_asm_insn ("bras\t%2,%l6", op
);
10285 targetm
.asm_out
.internal_label (file
, "L", CODE_LABEL_NUMBER (op
[5]));
10286 output_asm_insn (".long\t%4-%l5", op
);
10287 output_asm_insn (".long\t%3-%l5", op
);
10288 targetm
.asm_out
.internal_label (file
, "L", CODE_LABEL_NUMBER (op
[6]));
10289 output_asm_insn ("lr\t%0,%2", op
);
10290 output_asm_insn ("a\t%0,0(%2)", op
);
10291 output_asm_insn ("a\t%2,4(%2)", op
);
10292 output_asm_insn ("basr\t%0,%0", op
);
10293 output_asm_insn ("l\t%0,%1", op
);
10297 /* Encode symbol attributes (local vs. global, tls model) of a SYMBOL_REF
10298 into its SYMBOL_REF_FLAGS. */
10301 s390_encode_section_info (tree decl
, rtx rtl
, int first
)
10303 default_encode_section_info (decl
, rtl
, first
);
10305 if (TREE_CODE (decl
) == VAR_DECL
)
10307 /* If a variable has a forced alignment to < 2 bytes, mark it
10308 with SYMBOL_FLAG_ALIGN1 to prevent it from being used as LARL
10310 if (DECL_USER_ALIGN (decl
) && DECL_ALIGN (decl
) < 16)
10311 SYMBOL_REF_FLAGS (XEXP (rtl
, 0)) |= SYMBOL_FLAG_ALIGN1
;
10312 if (!DECL_SIZE (decl
)
10313 || !DECL_ALIGN (decl
)
10314 || !tree_fits_shwi_p (DECL_SIZE (decl
))
10315 || (DECL_ALIGN (decl
) <= 64
10316 && DECL_ALIGN (decl
) != tree_to_shwi (DECL_SIZE (decl
))))
10317 SYMBOL_REF_FLAGS (XEXP (rtl
, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED
;
10320 /* Literal pool references don't have a decl so they are handled
10321 differently here. We rely on the information in the MEM_ALIGN
10322 entry to decide upon natural alignment. */
10324 && GET_CODE (XEXP (rtl
, 0)) == SYMBOL_REF
10325 && TREE_CONSTANT_POOL_ADDRESS_P (XEXP (rtl
, 0))
10326 && (MEM_ALIGN (rtl
) == 0
10327 || GET_MODE_BITSIZE (GET_MODE (rtl
)) == 0
10328 || MEM_ALIGN (rtl
) < GET_MODE_BITSIZE (GET_MODE (rtl
))))
10329 SYMBOL_REF_FLAGS (XEXP (rtl
, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED
;
10332 /* Output thunk to FILE that implements a C++ virtual function call (with
10333 multiple inheritance) to FUNCTION. The thunk adjusts the this pointer
10334 by DELTA, and unless VCALL_OFFSET is zero, applies an additional adjustment
10335 stored at VCALL_OFFSET in the vtable whose address is located at offset 0
10336 relative to the resulting this pointer. */
10339 s390_output_mi_thunk (FILE *file
, tree thunk ATTRIBUTE_UNUSED
,
10340 HOST_WIDE_INT delta
, HOST_WIDE_INT vcall_offset
,
10346 /* Make sure unwind info is emitted for the thunk if needed. */
10347 final_start_function (emit_barrier (), file
, 1);
10349 /* Operand 0 is the target function. */
10350 op
[0] = XEXP (DECL_RTL (function
), 0);
10351 if (flag_pic
&& !SYMBOL_REF_LOCAL_P (op
[0]))
10354 op
[0] = gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, op
[0]),
10355 TARGET_64BIT
? UNSPEC_PLT
: UNSPEC_GOT
);
10356 op
[0] = gen_rtx_CONST (Pmode
, op
[0]);
10359 /* Operand 1 is the 'this' pointer. */
10360 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function
)), function
))
10361 op
[1] = gen_rtx_REG (Pmode
, 3);
10363 op
[1] = gen_rtx_REG (Pmode
, 2);
10365 /* Operand 2 is the delta. */
10366 op
[2] = GEN_INT (delta
);
10368 /* Operand 3 is the vcall_offset. */
10369 op
[3] = GEN_INT (vcall_offset
);
10371 /* Operand 4 is the temporary register. */
10372 op
[4] = gen_rtx_REG (Pmode
, 1);
10374 /* Operands 5 to 8 can be used as labels. */
10380 /* Operand 9 can be used for temporary register. */
10383 /* Generate code. */
10386 /* Setup literal pool pointer if required. */
10387 if ((!DISP_IN_RANGE (delta
)
10388 && !CONST_OK_FOR_K (delta
)
10389 && !CONST_OK_FOR_Os (delta
))
10390 || (!DISP_IN_RANGE (vcall_offset
)
10391 && !CONST_OK_FOR_K (vcall_offset
)
10392 && !CONST_OK_FOR_Os (vcall_offset
)))
10394 op
[5] = gen_label_rtx ();
10395 output_asm_insn ("larl\t%4,%5", op
);
10398 /* Add DELTA to this pointer. */
10401 if (CONST_OK_FOR_J (delta
))
10402 output_asm_insn ("la\t%1,%2(%1)", op
);
10403 else if (DISP_IN_RANGE (delta
))
10404 output_asm_insn ("lay\t%1,%2(%1)", op
);
10405 else if (CONST_OK_FOR_K (delta
))
10406 output_asm_insn ("aghi\t%1,%2", op
);
10407 else if (CONST_OK_FOR_Os (delta
))
10408 output_asm_insn ("agfi\t%1,%2", op
);
10411 op
[6] = gen_label_rtx ();
10412 output_asm_insn ("agf\t%1,%6-%5(%4)", op
);
10416 /* Perform vcall adjustment. */
10419 if (DISP_IN_RANGE (vcall_offset
))
10421 output_asm_insn ("lg\t%4,0(%1)", op
);
10422 output_asm_insn ("ag\t%1,%3(%4)", op
);
10424 else if (CONST_OK_FOR_K (vcall_offset
))
10426 output_asm_insn ("lghi\t%4,%3", op
);
10427 output_asm_insn ("ag\t%4,0(%1)", op
);
10428 output_asm_insn ("ag\t%1,0(%4)", op
);
10430 else if (CONST_OK_FOR_Os (vcall_offset
))
10432 output_asm_insn ("lgfi\t%4,%3", op
);
10433 output_asm_insn ("ag\t%4,0(%1)", op
);
10434 output_asm_insn ("ag\t%1,0(%4)", op
);
10438 op
[7] = gen_label_rtx ();
10439 output_asm_insn ("llgf\t%4,%7-%5(%4)", op
);
10440 output_asm_insn ("ag\t%4,0(%1)", op
);
10441 output_asm_insn ("ag\t%1,0(%4)", op
);
10445 /* Jump to target. */
10446 output_asm_insn ("jg\t%0", op
);
10448 /* Output literal pool if required. */
10451 output_asm_insn (".align\t4", op
);
10452 targetm
.asm_out
.internal_label (file
, "L",
10453 CODE_LABEL_NUMBER (op
[5]));
10457 targetm
.asm_out
.internal_label (file
, "L",
10458 CODE_LABEL_NUMBER (op
[6]));
10459 output_asm_insn (".long\t%2", op
);
10463 targetm
.asm_out
.internal_label (file
, "L",
10464 CODE_LABEL_NUMBER (op
[7]));
10465 output_asm_insn (".long\t%3", op
);
10470 /* Setup base pointer if required. */
10472 || (!DISP_IN_RANGE (delta
)
10473 && !CONST_OK_FOR_K (delta
)
10474 && !CONST_OK_FOR_Os (delta
))
10475 || (!DISP_IN_RANGE (delta
)
10476 && !CONST_OK_FOR_K (vcall_offset
)
10477 && !CONST_OK_FOR_Os (vcall_offset
)))
10479 op
[5] = gen_label_rtx ();
10480 output_asm_insn ("basr\t%4,0", op
);
10481 targetm
.asm_out
.internal_label (file
, "L",
10482 CODE_LABEL_NUMBER (op
[5]));
10485 /* Add DELTA to this pointer. */
10488 if (CONST_OK_FOR_J (delta
))
10489 output_asm_insn ("la\t%1,%2(%1)", op
);
10490 else if (DISP_IN_RANGE (delta
))
10491 output_asm_insn ("lay\t%1,%2(%1)", op
);
10492 else if (CONST_OK_FOR_K (delta
))
10493 output_asm_insn ("ahi\t%1,%2", op
);
10494 else if (CONST_OK_FOR_Os (delta
))
10495 output_asm_insn ("afi\t%1,%2", op
);
10498 op
[6] = gen_label_rtx ();
10499 output_asm_insn ("a\t%1,%6-%5(%4)", op
);
10503 /* Perform vcall adjustment. */
10506 if (CONST_OK_FOR_J (vcall_offset
))
10508 output_asm_insn ("l\t%4,0(%1)", op
);
10509 output_asm_insn ("a\t%1,%3(%4)", op
);
10511 else if (DISP_IN_RANGE (vcall_offset
))
10513 output_asm_insn ("l\t%4,0(%1)", op
);
10514 output_asm_insn ("ay\t%1,%3(%4)", op
);
10516 else if (CONST_OK_FOR_K (vcall_offset
))
10518 output_asm_insn ("lhi\t%4,%3", op
);
10519 output_asm_insn ("a\t%4,0(%1)", op
);
10520 output_asm_insn ("a\t%1,0(%4)", op
);
10522 else if (CONST_OK_FOR_Os (vcall_offset
))
10524 output_asm_insn ("iilf\t%4,%3", op
);
10525 output_asm_insn ("a\t%4,0(%1)", op
);
10526 output_asm_insn ("a\t%1,0(%4)", op
);
10530 op
[7] = gen_label_rtx ();
10531 output_asm_insn ("l\t%4,%7-%5(%4)", op
);
10532 output_asm_insn ("a\t%4,0(%1)", op
);
10533 output_asm_insn ("a\t%1,0(%4)", op
);
10536 /* We had to clobber the base pointer register.
10537 Re-setup the base pointer (with a different base). */
10538 op
[5] = gen_label_rtx ();
10539 output_asm_insn ("basr\t%4,0", op
);
10540 targetm
.asm_out
.internal_label (file
, "L",
10541 CODE_LABEL_NUMBER (op
[5]));
10544 /* Jump to target. */
10545 op
[8] = gen_label_rtx ();
10548 output_asm_insn ("l\t%4,%8-%5(%4)", op
);
10549 else if (!nonlocal
)
10550 output_asm_insn ("a\t%4,%8-%5(%4)", op
);
10551 /* We cannot call through .plt, since .plt requires %r12 loaded. */
10552 else if (flag_pic
== 1)
10554 output_asm_insn ("a\t%4,%8-%5(%4)", op
);
10555 output_asm_insn ("l\t%4,%0(%4)", op
);
10557 else if (flag_pic
== 2)
10559 op
[9] = gen_rtx_REG (Pmode
, 0);
10560 output_asm_insn ("l\t%9,%8-4-%5(%4)", op
);
10561 output_asm_insn ("a\t%4,%8-%5(%4)", op
);
10562 output_asm_insn ("ar\t%4,%9", op
);
10563 output_asm_insn ("l\t%4,0(%4)", op
);
10566 output_asm_insn ("br\t%4", op
);
10568 /* Output literal pool. */
10569 output_asm_insn (".align\t4", op
);
10571 if (nonlocal
&& flag_pic
== 2)
10572 output_asm_insn (".long\t%0", op
);
10575 op
[0] = gen_rtx_SYMBOL_REF (Pmode
, "_GLOBAL_OFFSET_TABLE_");
10576 SYMBOL_REF_FLAGS (op
[0]) = SYMBOL_FLAG_LOCAL
;
10579 targetm
.asm_out
.internal_label (file
, "L", CODE_LABEL_NUMBER (op
[8]));
10581 output_asm_insn (".long\t%0", op
);
10583 output_asm_insn (".long\t%0-%5", op
);
10587 targetm
.asm_out
.internal_label (file
, "L",
10588 CODE_LABEL_NUMBER (op
[6]));
10589 output_asm_insn (".long\t%2", op
);
10593 targetm
.asm_out
.internal_label (file
, "L",
10594 CODE_LABEL_NUMBER (op
[7]));
10595 output_asm_insn (".long\t%3", op
);
10598 final_end_function ();
10602 s390_valid_pointer_mode (machine_mode mode
)
10604 return (mode
== SImode
|| (TARGET_64BIT
&& mode
== DImode
));
10607 /* Checks whether the given CALL_EXPR would use a caller
10608 saved register. This is used to decide whether sibling call
10609 optimization could be performed on the respective function
10613 s390_call_saved_register_used (tree call_expr
)
10615 CUMULATIVE_ARGS cum_v
;
10616 cumulative_args_t cum
;
10623 INIT_CUMULATIVE_ARGS (cum_v
, NULL
, NULL
, 0, 0);
10624 cum
= pack_cumulative_args (&cum_v
);
10626 for (i
= 0; i
< call_expr_nargs (call_expr
); i
++)
10628 parameter
= CALL_EXPR_ARG (call_expr
, i
);
10629 gcc_assert (parameter
);
10631 /* For an undeclared variable passed as parameter we will get
10632 an ERROR_MARK node here. */
10633 if (TREE_CODE (parameter
) == ERROR_MARK
)
10636 type
= TREE_TYPE (parameter
);
10639 mode
= TYPE_MODE (type
);
10642 if (pass_by_reference (&cum_v
, mode
, type
, true))
10645 type
= build_pointer_type (type
);
10648 parm_rtx
= s390_function_arg (cum
, mode
, type
, 0);
10650 s390_function_arg_advance (cum
, mode
, type
, 0);
10655 if (REG_P (parm_rtx
))
10658 reg
< HARD_REGNO_NREGS (REGNO (parm_rtx
), GET_MODE (parm_rtx
));
10660 if (!call_used_regs
[reg
+ REGNO (parm_rtx
)])
10664 if (GET_CODE (parm_rtx
) == PARALLEL
)
10668 for (i
= 0; i
< XVECLEN (parm_rtx
, 0); i
++)
10670 rtx r
= XEXP (XVECEXP (parm_rtx
, 0, i
), 0);
10672 gcc_assert (REG_P (r
));
10675 reg
< HARD_REGNO_NREGS (REGNO (r
), GET_MODE (r
));
10677 if (!call_used_regs
[reg
+ REGNO (r
)])
10686 /* Return true if the given call expression can be
10687 turned into a sibling call.
10688 DECL holds the declaration of the function to be called whereas
10689 EXP is the call expression itself. */
10692 s390_function_ok_for_sibcall (tree decl
, tree exp
)
10694 /* The TPF epilogue uses register 1. */
10695 if (TARGET_TPF_PROFILING
)
10698 /* The 31 bit PLT code uses register 12 (GOT pointer - caller saved)
10699 which would have to be restored before the sibcall. */
10700 if (!TARGET_64BIT
&& flag_pic
&& decl
&& !targetm
.binds_local_p (decl
))
10703 /* Register 6 on s390 is available as an argument register but unfortunately
10704 "caller saved". This makes functions needing this register for arguments
10705 not suitable for sibcalls. */
10706 return !s390_call_saved_register_used (exp
);
10709 /* Return the fixed registers used for condition codes. */
10712 s390_fixed_condition_code_regs (unsigned int *p1
, unsigned int *p2
)
10715 *p2
= INVALID_REGNUM
;
10720 /* This function is used by the call expanders of the machine description.
10721 It emits the call insn itself together with the necessary operations
10722 to adjust the target address and returns the emitted insn.
10723 ADDR_LOCATION is the target address rtx
10724 TLS_CALL the location of the thread-local symbol
10725 RESULT_REG the register where the result of the call should be stored
10726 RETADDR_REG the register where the return address should be stored
10727 If this parameter is NULL_RTX the call is considered
10728 to be a sibling call. */
10731 s390_emit_call (rtx addr_location
, rtx tls_call
, rtx result_reg
,
10734 bool plt_call
= false;
10740 /* Direct function calls need special treatment. */
10741 if (GET_CODE (addr_location
) == SYMBOL_REF
)
10743 /* When calling a global routine in PIC mode, we must
10744 replace the symbol itself with the PLT stub. */
10745 if (flag_pic
&& !SYMBOL_REF_LOCAL_P (addr_location
))
10747 if (retaddr_reg
!= NULL_RTX
)
10749 addr_location
= gen_rtx_UNSPEC (Pmode
,
10750 gen_rtvec (1, addr_location
),
10752 addr_location
= gen_rtx_CONST (Pmode
, addr_location
);
10756 /* For -fpic code the PLT entries might use r12 which is
10757 call-saved. Therefore we cannot do a sibcall when
10758 calling directly using a symbol ref. When reaching
10759 this point we decided (in s390_function_ok_for_sibcall)
10760 to do a sibcall for a function pointer but one of the
10761 optimizers was able to get rid of the function pointer
10762 by propagating the symbol ref into the call. This
10763 optimization is illegal for S/390 so we turn the direct
10764 call into a indirect call again. */
10765 addr_location
= force_reg (Pmode
, addr_location
);
10768 /* Unless we can use the bras(l) insn, force the
10769 routine address into a register. */
10770 if (!TARGET_SMALL_EXEC
&& !TARGET_CPU_ZARCH
)
10773 addr_location
= legitimize_pic_address (addr_location
, 0);
10775 addr_location
= force_reg (Pmode
, addr_location
);
10779 /* If it is already an indirect call or the code above moved the
10780 SYMBOL_REF to somewhere else make sure the address can be found in
10782 if (retaddr_reg
== NULL_RTX
10783 && GET_CODE (addr_location
) != SYMBOL_REF
10786 emit_move_insn (gen_rtx_REG (Pmode
, SIBCALL_REGNUM
), addr_location
);
10787 addr_location
= gen_rtx_REG (Pmode
, SIBCALL_REGNUM
);
10790 addr_location
= gen_rtx_MEM (QImode
, addr_location
);
10791 call
= gen_rtx_CALL (VOIDmode
, addr_location
, const0_rtx
);
10793 if (result_reg
!= NULL_RTX
)
10794 call
= gen_rtx_SET (VOIDmode
, result_reg
, call
);
10796 if (retaddr_reg
!= NULL_RTX
)
10798 clobber
= gen_rtx_CLOBBER (VOIDmode
, retaddr_reg
);
10800 if (tls_call
!= NULL_RTX
)
10801 vec
= gen_rtvec (3, call
, clobber
,
10802 gen_rtx_USE (VOIDmode
, tls_call
));
10804 vec
= gen_rtvec (2, call
, clobber
);
10806 call
= gen_rtx_PARALLEL (VOIDmode
, vec
);
10809 insn
= emit_call_insn (call
);
10811 /* 31-bit PLT stubs and tls calls use the GOT register implicitly. */
10812 if ((!TARGET_64BIT
&& plt_call
) || tls_call
!= NULL_RTX
)
10814 /* s390_function_ok_for_sibcall should
10815 have denied sibcalls in this case. */
10816 gcc_assert (retaddr_reg
!= NULL_RTX
);
10817 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), gen_rtx_REG (Pmode
, 12));
10822 /* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
10825 s390_conditional_register_usage (void)
10831 fixed_regs
[PIC_OFFSET_TABLE_REGNUM
] = 1;
10832 call_used_regs
[PIC_OFFSET_TABLE_REGNUM
] = 1;
10834 if (TARGET_CPU_ZARCH
)
10836 fixed_regs
[BASE_REGNUM
] = 0;
10837 call_used_regs
[BASE_REGNUM
] = 0;
10838 fixed_regs
[RETURN_REGNUM
] = 0;
10839 call_used_regs
[RETURN_REGNUM
] = 0;
10843 for (i
= FPR8_REGNUM
; i
<= FPR15_REGNUM
; i
++)
10844 call_used_regs
[i
] = call_really_used_regs
[i
] = 0;
10848 call_used_regs
[FPR4_REGNUM
] = call_really_used_regs
[FPR4_REGNUM
] = 0;
10849 call_used_regs
[FPR6_REGNUM
] = call_really_used_regs
[FPR6_REGNUM
] = 0;
10852 if (TARGET_SOFT_FLOAT
)
10854 for (i
= FPR0_REGNUM
; i
<= FPR15_REGNUM
; i
++)
10855 call_used_regs
[i
] = fixed_regs
[i
] = 1;
10859 /* Corresponding function to eh_return expander. */
10861 static GTY(()) rtx s390_tpf_eh_return_symbol
;
10863 s390_emit_tpf_eh_return (rtx target
)
10868 if (!s390_tpf_eh_return_symbol
)
10869 s390_tpf_eh_return_symbol
= gen_rtx_SYMBOL_REF (Pmode
, "__tpf_eh_return");
10871 reg
= gen_rtx_REG (Pmode
, 2);
10872 orig_ra
= gen_rtx_REG (Pmode
, 3);
10874 emit_move_insn (reg
, target
);
10875 emit_move_insn (orig_ra
, get_hard_reg_initial_val (Pmode
, RETURN_REGNUM
));
10876 insn
= s390_emit_call (s390_tpf_eh_return_symbol
, NULL_RTX
, reg
,
10877 gen_rtx_REG (Pmode
, RETURN_REGNUM
));
10878 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), reg
);
10879 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), orig_ra
);
10881 emit_move_insn (EH_RETURN_HANDLER_RTX
, reg
);
10884 /* Rework the prologue/epilogue to avoid saving/restoring
10885 registers unnecessarily. */
10888 s390_optimize_prologue (void)
10890 rtx_insn
*insn
, *new_insn
, *next_insn
;
10892 /* Do a final recompute of the frame-related data. */
10893 s390_optimize_register_info ();
10895 /* If all special registers are in fact used, there's nothing we
10896 can do, so no point in walking the insn list. */
10898 if (cfun_frame_layout
.first_save_gpr
<= BASE_REGNUM
10899 && cfun_frame_layout
.last_save_gpr
>= BASE_REGNUM
10900 && (TARGET_CPU_ZARCH
10901 || (cfun_frame_layout
.first_save_gpr
<= RETURN_REGNUM
10902 && cfun_frame_layout
.last_save_gpr
>= RETURN_REGNUM
)))
10905 /* Search for prologue/epilogue insns and replace them. */
10907 for (insn
= get_insns (); insn
; insn
= next_insn
)
10909 int first
, last
, off
;
10910 rtx set
, base
, offset
;
10913 next_insn
= NEXT_INSN (insn
);
10915 if (! NONJUMP_INSN_P (insn
) || ! RTX_FRAME_RELATED_P (insn
))
10918 pat
= PATTERN (insn
);
10920 /* Remove ldgr/lgdr instructions used for saving and restore
10921 GPRs if possible. */
10923 && GET_CODE (pat
) == SET
10924 && GET_MODE (SET_SRC (pat
)) == DImode
10925 && REG_P (SET_SRC (pat
))
10926 && REG_P (SET_DEST (pat
)))
10928 int src_regno
= REGNO (SET_SRC (pat
));
10929 int dest_regno
= REGNO (SET_DEST (pat
));
10933 if (!((GENERAL_REGNO_P (src_regno
) && FP_REGNO_P (dest_regno
))
10934 || (FP_REGNO_P (src_regno
) && GENERAL_REGNO_P (dest_regno
))))
10937 gpr_regno
= GENERAL_REGNO_P (src_regno
) ? src_regno
: dest_regno
;
10938 fpr_regno
= FP_REGNO_P (src_regno
) ? src_regno
: dest_regno
;
10940 /* GPR must be call-saved, FPR must be call-clobbered. */
10941 if (!call_really_used_regs
[fpr_regno
]
10942 || call_really_used_regs
[gpr_regno
])
10945 /* It must not happen that what we once saved in an FPR now
10946 needs a stack slot. */
10947 gcc_assert (cfun_gpr_save_slot (gpr_regno
) != -1);
10949 if (cfun_gpr_save_slot (gpr_regno
) == 0)
10951 remove_insn (insn
);
10956 if (GET_CODE (pat
) == PARALLEL
10957 && store_multiple_operation (pat
, VOIDmode
))
10959 set
= XVECEXP (pat
, 0, 0);
10960 first
= REGNO (SET_SRC (set
));
10961 last
= first
+ XVECLEN (pat
, 0) - 1;
10962 offset
= const0_rtx
;
10963 base
= eliminate_constant_term (XEXP (SET_DEST (set
), 0), &offset
);
10964 off
= INTVAL (offset
);
10966 if (GET_CODE (base
) != REG
|| off
< 0)
10968 if (cfun_frame_layout
.first_save_gpr
!= -1
10969 && (cfun_frame_layout
.first_save_gpr
< first
10970 || cfun_frame_layout
.last_save_gpr
> last
))
10972 if (REGNO (base
) != STACK_POINTER_REGNUM
10973 && REGNO (base
) != HARD_FRAME_POINTER_REGNUM
)
10975 if (first
> BASE_REGNUM
|| last
< BASE_REGNUM
)
10978 if (cfun_frame_layout
.first_save_gpr
!= -1)
10980 rtx s_pat
= save_gprs (base
,
10981 off
+ (cfun_frame_layout
.first_save_gpr
10982 - first
) * UNITS_PER_LONG
,
10983 cfun_frame_layout
.first_save_gpr
,
10984 cfun_frame_layout
.last_save_gpr
);
10985 new_insn
= emit_insn_before (s_pat
, insn
);
10986 INSN_ADDRESSES_NEW (new_insn
, -1);
10989 remove_insn (insn
);
10993 if (cfun_frame_layout
.first_save_gpr
== -1
10994 && GET_CODE (pat
) == SET
10995 && GENERAL_REG_P (SET_SRC (pat
))
10996 && GET_CODE (SET_DEST (pat
)) == MEM
)
10999 first
= REGNO (SET_SRC (set
));
11000 offset
= const0_rtx
;
11001 base
= eliminate_constant_term (XEXP (SET_DEST (set
), 0), &offset
);
11002 off
= INTVAL (offset
);
11004 if (GET_CODE (base
) != REG
|| off
< 0)
11006 if (REGNO (base
) != STACK_POINTER_REGNUM
11007 && REGNO (base
) != HARD_FRAME_POINTER_REGNUM
)
11010 remove_insn (insn
);
11014 if (GET_CODE (pat
) == PARALLEL
11015 && load_multiple_operation (pat
, VOIDmode
))
11017 set
= XVECEXP (pat
, 0, 0);
11018 first
= REGNO (SET_DEST (set
));
11019 last
= first
+ XVECLEN (pat
, 0) - 1;
11020 offset
= const0_rtx
;
11021 base
= eliminate_constant_term (XEXP (SET_SRC (set
), 0), &offset
);
11022 off
= INTVAL (offset
);
11024 if (GET_CODE (base
) != REG
|| off
< 0)
11027 if (cfun_frame_layout
.first_restore_gpr
!= -1
11028 && (cfun_frame_layout
.first_restore_gpr
< first
11029 || cfun_frame_layout
.last_restore_gpr
> last
))
11031 if (REGNO (base
) != STACK_POINTER_REGNUM
11032 && REGNO (base
) != HARD_FRAME_POINTER_REGNUM
)
11034 if (first
> BASE_REGNUM
|| last
< BASE_REGNUM
)
11037 if (cfun_frame_layout
.first_restore_gpr
!= -1)
11039 rtx rpat
= restore_gprs (base
,
11040 off
+ (cfun_frame_layout
.first_restore_gpr
11041 - first
) * UNITS_PER_LONG
,
11042 cfun_frame_layout
.first_restore_gpr
,
11043 cfun_frame_layout
.last_restore_gpr
);
11045 /* Remove REG_CFA_RESTOREs for registers that we no
11046 longer need to save. */
11047 REG_NOTES (rpat
) = REG_NOTES (insn
);
11048 for (rtx
*ptr
= ®_NOTES (rpat
); *ptr
; )
11049 if (REG_NOTE_KIND (*ptr
) == REG_CFA_RESTORE
11050 && ((int) REGNO (XEXP (*ptr
, 0))
11051 < cfun_frame_layout
.first_restore_gpr
))
11052 *ptr
= XEXP (*ptr
, 1);
11054 ptr
= &XEXP (*ptr
, 1);
11055 new_insn
= emit_insn_before (rpat
, insn
);
11056 RTX_FRAME_RELATED_P (new_insn
) = 1;
11057 INSN_ADDRESSES_NEW (new_insn
, -1);
11060 remove_insn (insn
);
11064 if (cfun_frame_layout
.first_restore_gpr
== -1
11065 && GET_CODE (pat
) == SET
11066 && GENERAL_REG_P (SET_DEST (pat
))
11067 && GET_CODE (SET_SRC (pat
)) == MEM
)
11070 first
= REGNO (SET_DEST (set
));
11071 offset
= const0_rtx
;
11072 base
= eliminate_constant_term (XEXP (SET_SRC (set
), 0), &offset
);
11073 off
= INTVAL (offset
);
11075 if (GET_CODE (base
) != REG
|| off
< 0)
11078 if (REGNO (base
) != STACK_POINTER_REGNUM
11079 && REGNO (base
) != HARD_FRAME_POINTER_REGNUM
)
11082 remove_insn (insn
);
11088 /* On z10 and later the dynamic branch prediction must see the
11089 backward jump within a certain windows. If not it falls back to
11090 the static prediction. This function rearranges the loop backward
11091 branch in a way which makes the static prediction always correct.
11092 The function returns true if it added an instruction. */
11094 s390_fix_long_loop_prediction (rtx_insn
*insn
)
11096 rtx set
= single_set (insn
);
11097 rtx code_label
, label_ref
, new_label
;
11098 rtx_insn
*uncond_jump
;
11099 rtx_insn
*cur_insn
;
11103 /* This will exclude branch on count and branch on index patterns
11104 since these are correctly statically predicted. */
11106 || SET_DEST (set
) != pc_rtx
11107 || GET_CODE (SET_SRC(set
)) != IF_THEN_ELSE
)
11110 /* Skip conditional returns. */
11111 if (ANY_RETURN_P (XEXP (SET_SRC (set
), 1))
11112 && XEXP (SET_SRC (set
), 2) == pc_rtx
)
11115 label_ref
= (GET_CODE (XEXP (SET_SRC (set
), 1)) == LABEL_REF
?
11116 XEXP (SET_SRC (set
), 1) : XEXP (SET_SRC (set
), 2));
11118 gcc_assert (GET_CODE (label_ref
) == LABEL_REF
);
11120 code_label
= XEXP (label_ref
, 0);
11122 if (INSN_ADDRESSES (INSN_UID (code_label
)) == -1
11123 || INSN_ADDRESSES (INSN_UID (insn
)) == -1
11124 || (INSN_ADDRESSES (INSN_UID (insn
))
11125 - INSN_ADDRESSES (INSN_UID (code_label
)) < PREDICT_DISTANCE
))
11128 for (distance
= 0, cur_insn
= PREV_INSN (insn
);
11129 distance
< PREDICT_DISTANCE
- 6;
11130 distance
+= get_attr_length (cur_insn
), cur_insn
= PREV_INSN (cur_insn
))
11131 if (!cur_insn
|| JUMP_P (cur_insn
) || LABEL_P (cur_insn
))
11134 new_label
= gen_label_rtx ();
11135 uncond_jump
= emit_jump_insn_after (
11136 gen_rtx_SET (VOIDmode
, pc_rtx
,
11137 gen_rtx_LABEL_REF (VOIDmode
, code_label
)),
11139 emit_label_after (new_label
, uncond_jump
);
11141 tmp
= XEXP (SET_SRC (set
), 1);
11142 XEXP (SET_SRC (set
), 1) = XEXP (SET_SRC (set
), 2);
11143 XEXP (SET_SRC (set
), 2) = tmp
;
11144 INSN_CODE (insn
) = -1;
11146 XEXP (label_ref
, 0) = new_label
;
11147 JUMP_LABEL (insn
) = new_label
;
11148 JUMP_LABEL (uncond_jump
) = code_label
;
11153 /* Returns 1 if INSN reads the value of REG for purposes not related
11154 to addressing of memory, and 0 otherwise. */
11156 s390_non_addr_reg_read_p (rtx reg
, rtx_insn
*insn
)
11158 return reg_referenced_p (reg
, PATTERN (insn
))
11159 && !reg_used_in_mem_p (REGNO (reg
), PATTERN (insn
));
11162 /* Starting from INSN find_cond_jump looks downwards in the insn
11163 stream for a single jump insn which is the last user of the
11164 condition code set in INSN. */
11166 find_cond_jump (rtx_insn
*insn
)
11168 for (; insn
; insn
= NEXT_INSN (insn
))
11172 if (LABEL_P (insn
))
11175 if (!JUMP_P (insn
))
11177 if (reg_mentioned_p (gen_rtx_REG (CCmode
, CC_REGNUM
), insn
))
11182 /* This will be triggered by a return. */
11183 if (GET_CODE (PATTERN (insn
)) != SET
)
11186 gcc_assert (SET_DEST (PATTERN (insn
)) == pc_rtx
);
11187 ite
= SET_SRC (PATTERN (insn
));
11189 if (GET_CODE (ite
) != IF_THEN_ELSE
)
11192 cc
= XEXP (XEXP (ite
, 0), 0);
11193 if (!REG_P (cc
) || !CC_REGNO_P (REGNO (cc
)))
11196 if (find_reg_note (insn
, REG_DEAD
, cc
))
11204 /* Swap the condition in COND and the operands in OP0 and OP1 so that
11205 the semantics does not change. If NULL_RTX is passed as COND the
11206 function tries to find the conditional jump starting with INSN. */
11208 s390_swap_cmp (rtx cond
, rtx
*op0
, rtx
*op1
, rtx_insn
*insn
)
11212 if (cond
== NULL_RTX
)
11214 rtx_insn
*jump
= find_cond_jump (NEXT_INSN (insn
));
11215 rtx set
= jump
? single_set (jump
) : NULL_RTX
;
11217 if (set
== NULL_RTX
)
11220 cond
= XEXP (SET_SRC (set
), 0);
11225 PUT_CODE (cond
, swap_condition (GET_CODE (cond
)));
11228 /* On z10, instructions of the compare-and-branch family have the
11229 property to access the register occurring as second operand with
11230 its bits complemented. If such a compare is grouped with a second
11231 instruction that accesses the same register non-complemented, and
11232 if that register's value is delivered via a bypass, then the
11233 pipeline recycles, thereby causing significant performance decline.
11234 This function locates such situations and exchanges the two
11235 operands of the compare. The function return true whenever it
11238 s390_z10_optimize_cmp (rtx_insn
*insn
)
11240 rtx_insn
*prev_insn
, *next_insn
;
11241 bool insn_added_p
= false;
11242 rtx cond
, *op0
, *op1
;
11244 if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
11246 /* Handle compare and branch and branch on count
11248 rtx pattern
= single_set (insn
);
11251 || SET_DEST (pattern
) != pc_rtx
11252 || GET_CODE (SET_SRC (pattern
)) != IF_THEN_ELSE
)
11255 cond
= XEXP (SET_SRC (pattern
), 0);
11256 op0
= &XEXP (cond
, 0);
11257 op1
= &XEXP (cond
, 1);
11259 else if (GET_CODE (PATTERN (insn
)) == SET
)
11263 /* Handle normal compare instructions. */
11264 src
= SET_SRC (PATTERN (insn
));
11265 dest
= SET_DEST (PATTERN (insn
));
11268 || !CC_REGNO_P (REGNO (dest
))
11269 || GET_CODE (src
) != COMPARE
)
11272 /* s390_swap_cmp will try to find the conditional
11273 jump when passing NULL_RTX as condition. */
11275 op0
= &XEXP (src
, 0);
11276 op1
= &XEXP (src
, 1);
11281 if (!REG_P (*op0
) || !REG_P (*op1
))
11284 if (GET_MODE_CLASS (GET_MODE (*op0
)) != MODE_INT
)
11287 /* Swap the COMPARE arguments and its mask if there is a
11288 conflicting access in the previous insn. */
11289 prev_insn
= prev_active_insn (insn
);
11290 if (prev_insn
!= NULL_RTX
&& INSN_P (prev_insn
)
11291 && reg_referenced_p (*op1
, PATTERN (prev_insn
)))
11292 s390_swap_cmp (cond
, op0
, op1
, insn
);
11294 /* Check if there is a conflict with the next insn. If there
11295 was no conflict with the previous insn, then swap the
11296 COMPARE arguments and its mask. If we already swapped
11297 the operands, or if swapping them would cause a conflict
11298 with the previous insn, issue a NOP after the COMPARE in
11299 order to separate the two instuctions. */
11300 next_insn
= next_active_insn (insn
);
11301 if (next_insn
!= NULL_RTX
&& INSN_P (next_insn
)
11302 && s390_non_addr_reg_read_p (*op1
, next_insn
))
11304 if (prev_insn
!= NULL_RTX
&& INSN_P (prev_insn
)
11305 && s390_non_addr_reg_read_p (*op0
, prev_insn
))
11307 if (REGNO (*op1
) == 0)
11308 emit_insn_after (gen_nop1 (), insn
);
11310 emit_insn_after (gen_nop (), insn
);
11311 insn_added_p
= true;
11314 s390_swap_cmp (cond
, op0
, op1
, insn
);
11316 return insn_added_p
;
11319 /* Perform machine-dependent processing. */
11324 bool pool_overflow
= false;
11326 /* Make sure all splits have been performed; splits after
11327 machine_dependent_reorg might confuse insn length counts. */
11328 split_all_insns_noflow ();
11330 /* Install the main literal pool and the associated base
11331 register load insns.
11333 In addition, there are two problematic situations we need
11336 - the literal pool might be > 4096 bytes in size, so that
11337 some of its elements cannot be directly accessed
11339 - a branch target might be > 64K away from the branch, so that
11340 it is not possible to use a PC-relative instruction.
11342 To fix those, we split the single literal pool into multiple
11343 pool chunks, reloading the pool base register at various
11344 points throughout the function to ensure it always points to
11345 the pool chunk the following code expects, and / or replace
11346 PC-relative branches by absolute branches.
11348 However, the two problems are interdependent: splitting the
11349 literal pool can move a branch further away from its target,
11350 causing the 64K limit to overflow, and on the other hand,
11351 replacing a PC-relative branch by an absolute branch means
11352 we need to put the branch target address into the literal
11353 pool, possibly causing it to overflow.
11355 So, we loop trying to fix up both problems until we manage
11356 to satisfy both conditions at the same time. Note that the
11357 loop is guaranteed to terminate as every pass of the loop
11358 strictly decreases the total number of PC-relative branches
11359 in the function. (This is not completely true as there
11360 might be branch-over-pool insns introduced by chunkify_start.
11361 Those never need to be split however.) */
11365 struct constant_pool
*pool
= NULL
;
11367 /* Collect the literal pool. */
11368 if (!pool_overflow
)
11370 pool
= s390_mainpool_start ();
11372 pool_overflow
= true;
11375 /* If literal pool overflowed, start to chunkify it. */
11377 pool
= s390_chunkify_start ();
11379 /* Split out-of-range branches. If this has created new
11380 literal pool entries, cancel current chunk list and
11381 recompute it. zSeries machines have large branch
11382 instructions, so we never need to split a branch. */
11383 if (!TARGET_CPU_ZARCH
&& s390_split_branches ())
11386 s390_chunkify_cancel (pool
);
11388 s390_mainpool_cancel (pool
);
11393 /* If we made it up to here, both conditions are satisfied.
11394 Finish up literal pool related changes. */
11396 s390_chunkify_finish (pool
);
11398 s390_mainpool_finish (pool
);
11400 /* We're done splitting branches. */
11401 cfun
->machine
->split_branches_pending_p
= false;
11405 /* Generate out-of-pool execute target insns. */
11406 if (TARGET_CPU_ZARCH
)
11408 rtx_insn
*insn
, *target
;
11411 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
11413 label
= s390_execute_label (insn
);
11417 gcc_assert (label
!= const0_rtx
);
11419 target
= emit_label (XEXP (label
, 0));
11420 INSN_ADDRESSES_NEW (target
, -1);
11422 target
= emit_insn (s390_execute_target (insn
));
11423 INSN_ADDRESSES_NEW (target
, -1);
11427 /* Try to optimize prologue and epilogue further. */
11428 s390_optimize_prologue ();
11430 /* Walk over the insns and do some >=z10 specific changes. */
11431 if (s390_tune
== PROCESSOR_2097_Z10
11432 || s390_tune
== PROCESSOR_2817_Z196
11433 || s390_tune
== PROCESSOR_2827_ZEC12
)
11436 bool insn_added_p
= false;
11438 /* The insn lengths and addresses have to be up to date for the
11439 following manipulations. */
11440 shorten_branches (get_insns ());
11442 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
11444 if (!INSN_P (insn
) || INSN_CODE (insn
) <= 0)
11448 insn_added_p
|= s390_fix_long_loop_prediction (insn
);
11450 if ((GET_CODE (PATTERN (insn
)) == PARALLEL
11451 || GET_CODE (PATTERN (insn
)) == SET
)
11452 && s390_tune
== PROCESSOR_2097_Z10
)
11453 insn_added_p
|= s390_z10_optimize_cmp (insn
);
11456 /* Adjust branches if we added new instructions. */
11458 shorten_branches (get_insns ());
11462 /* Return true if INSN is a fp load insn writing register REGNO. */
11464 s390_fpload_toreg (rtx_insn
*insn
, unsigned int regno
)
11467 enum attr_type flag
= s390_safe_attr_type (insn
);
11469 if (flag
!= TYPE_FLOADSF
&& flag
!= TYPE_FLOADDF
)
11472 set
= single_set (insn
);
11474 if (set
== NULL_RTX
)
11477 if (!REG_P (SET_DEST (set
)) || !MEM_P (SET_SRC (set
)))
11480 if (REGNO (SET_DEST (set
)) != regno
)
11486 /* This value describes the distance to be avoided between an
11487 aritmetic fp instruction and an fp load writing the same register.
11488 Z10_EARLYLOAD_DISTANCE - 1 as well as Z10_EARLYLOAD_DISTANCE + 1 is
11489 fine but the exact value has to be avoided. Otherwise the FP
11490 pipeline will throw an exception causing a major penalty. */
11491 #define Z10_EARLYLOAD_DISTANCE 7
11493 /* Rearrange the ready list in order to avoid the situation described
11494 for Z10_EARLYLOAD_DISTANCE. A problematic load instruction is
11495 moved to the very end of the ready list. */
11497 s390_z10_prevent_earlyload_conflicts (rtx_insn
**ready
, int *nready_p
)
11499 unsigned int regno
;
11500 int nready
= *nready_p
;
11505 enum attr_type flag
;
11508 /* Skip DISTANCE - 1 active insns. */
11509 for (insn
= last_scheduled_insn
, distance
= Z10_EARLYLOAD_DISTANCE
- 1;
11510 distance
> 0 && insn
!= NULL_RTX
;
11511 distance
--, insn
= prev_active_insn (insn
))
11512 if (CALL_P (insn
) || JUMP_P (insn
))
11515 if (insn
== NULL_RTX
)
11518 set
= single_set (insn
);
11520 if (set
== NULL_RTX
|| !REG_P (SET_DEST (set
))
11521 || GET_MODE_CLASS (GET_MODE (SET_DEST (set
))) != MODE_FLOAT
)
11524 flag
= s390_safe_attr_type (insn
);
11526 if (flag
== TYPE_FLOADSF
|| flag
== TYPE_FLOADDF
)
11529 regno
= REGNO (SET_DEST (set
));
11532 while (!s390_fpload_toreg (ready
[i
], regno
) && i
> 0)
11539 memmove (&ready
[1], &ready
[0], sizeof (rtx_insn
*) * i
);
11544 /* The s390_sched_state variable tracks the state of the current or
11545 the last instruction group.
11547 0,1,2 number of instructions scheduled in the current group
11548 3 the last group is complete - normal insns
11549 4 the last group was a cracked/expanded insn */
11551 static int s390_sched_state
;
11553 #define S390_OOO_SCHED_STATE_NORMAL 3
11554 #define S390_OOO_SCHED_STATE_CRACKED 4
11556 #define S390_OOO_SCHED_ATTR_MASK_CRACKED 0x1
11557 #define S390_OOO_SCHED_ATTR_MASK_EXPANDED 0x2
11558 #define S390_OOO_SCHED_ATTR_MASK_ENDGROUP 0x4
11559 #define S390_OOO_SCHED_ATTR_MASK_GROUPALONE 0x8
11561 static unsigned int
11562 s390_get_sched_attrmask (rtx_insn
*insn
)
11564 unsigned int mask
= 0;
11566 if (get_attr_ooo_cracked (insn
))
11567 mask
|= S390_OOO_SCHED_ATTR_MASK_CRACKED
;
11568 if (get_attr_ooo_expanded (insn
))
11569 mask
|= S390_OOO_SCHED_ATTR_MASK_EXPANDED
;
11570 if (get_attr_ooo_endgroup (insn
))
11571 mask
|= S390_OOO_SCHED_ATTR_MASK_ENDGROUP
;
11572 if (get_attr_ooo_groupalone (insn
))
11573 mask
|= S390_OOO_SCHED_ATTR_MASK_GROUPALONE
;
11577 /* Return the scheduling score for INSN. The higher the score the
11578 better. The score is calculated from the OOO scheduling attributes
11579 of INSN and the scheduling state s390_sched_state. */
11581 s390_sched_score (rtx_insn
*insn
)
11583 unsigned int mask
= s390_get_sched_attrmask (insn
);
11586 switch (s390_sched_state
)
11589 /* Try to put insns into the first slot which would otherwise
11591 if ((mask
& S390_OOO_SCHED_ATTR_MASK_CRACKED
) != 0
11592 || (mask
& S390_OOO_SCHED_ATTR_MASK_EXPANDED
) != 0)
11594 if ((mask
& S390_OOO_SCHED_ATTR_MASK_GROUPALONE
) != 0)
11597 /* Prefer not cracked insns while trying to put together a
11599 if ((mask
& S390_OOO_SCHED_ATTR_MASK_CRACKED
) == 0
11600 && (mask
& S390_OOO_SCHED_ATTR_MASK_EXPANDED
) == 0
11601 && (mask
& S390_OOO_SCHED_ATTR_MASK_GROUPALONE
) == 0)
11603 if ((mask
& S390_OOO_SCHED_ATTR_MASK_ENDGROUP
) == 0)
11607 /* Prefer not cracked insns while trying to put together a
11609 if ((mask
& S390_OOO_SCHED_ATTR_MASK_CRACKED
) == 0
11610 && (mask
& S390_OOO_SCHED_ATTR_MASK_EXPANDED
) == 0
11611 && (mask
& S390_OOO_SCHED_ATTR_MASK_GROUPALONE
) == 0)
11613 /* Prefer endgroup insns in the last slot. */
11614 if ((mask
& S390_OOO_SCHED_ATTR_MASK_ENDGROUP
) != 0)
11617 case S390_OOO_SCHED_STATE_NORMAL
:
11618 /* Prefer not cracked insns if the last was not cracked. */
11619 if ((mask
& S390_OOO_SCHED_ATTR_MASK_CRACKED
) == 0
11620 && (mask
& S390_OOO_SCHED_ATTR_MASK_EXPANDED
) == 0)
11622 if ((mask
& S390_OOO_SCHED_ATTR_MASK_GROUPALONE
) != 0)
11625 case S390_OOO_SCHED_STATE_CRACKED
:
11626 /* Try to keep cracked insns together to prevent them from
11627 interrupting groups. */
11628 if ((mask
& S390_OOO_SCHED_ATTR_MASK_CRACKED
) != 0
11629 || (mask
& S390_OOO_SCHED_ATTR_MASK_EXPANDED
) != 0)
11636 /* This function is called via hook TARGET_SCHED_REORDER before
11637 issuing one insn from list READY which contains *NREADYP entries.
11638 For target z10 it reorders load instructions to avoid early load
11639 conflicts in the floating point pipeline */
11641 s390_sched_reorder (FILE *file
, int verbose
,
11642 rtx_insn
**ready
, int *nreadyp
, int clock ATTRIBUTE_UNUSED
)
11644 if (s390_tune
== PROCESSOR_2097_Z10
)
11645 if (reload_completed
&& *nreadyp
> 1)
11646 s390_z10_prevent_earlyload_conflicts (ready
, nreadyp
);
11648 if (s390_tune
== PROCESSOR_2827_ZEC12
11649 && reload_completed
11653 int last_index
= *nreadyp
- 1;
11654 int max_index
= -1;
11655 int max_score
= -1;
11658 /* Just move the insn with the highest score to the top (the
11659 end) of the list. A full sort is not needed since a conflict
11660 in the hazard recognition cannot happen. So the top insn in
11661 the ready list will always be taken. */
11662 for (i
= last_index
; i
>= 0; i
--)
11666 if (recog_memoized (ready
[i
]) < 0)
11669 score
= s390_sched_score (ready
[i
]);
11670 if (score
> max_score
)
11677 if (max_index
!= -1)
11679 if (max_index
!= last_index
)
11681 tmp
= ready
[max_index
];
11682 ready
[max_index
] = ready
[last_index
];
11683 ready
[last_index
] = tmp
;
11687 "move insn %d to the top of list\n",
11688 INSN_UID (ready
[last_index
]));
11690 else if (verbose
> 5)
11692 "best insn %d already on top\n",
11693 INSN_UID (ready
[last_index
]));
11698 fprintf (file
, "ready list ooo attributes - sched state: %d\n",
11701 for (i
= last_index
; i
>= 0; i
--)
11703 if (recog_memoized (ready
[i
]) < 0)
11705 fprintf (file
, "insn %d score: %d: ", INSN_UID (ready
[i
]),
11706 s390_sched_score (ready
[i
]));
11707 #define PRINT_OOO_ATTR(ATTR) fprintf (file, "%s ", get_attr_##ATTR (ready[i]) ? #ATTR : "!" #ATTR);
11708 PRINT_OOO_ATTR (ooo_cracked
);
11709 PRINT_OOO_ATTR (ooo_expanded
);
11710 PRINT_OOO_ATTR (ooo_endgroup
);
11711 PRINT_OOO_ATTR (ooo_groupalone
);
11712 #undef PRINT_OOO_ATTR
11713 fprintf (file
, "\n");
11718 return s390_issue_rate ();
11722 /* This function is called via hook TARGET_SCHED_VARIABLE_ISSUE after
11723 the scheduler has issued INSN. It stores the last issued insn into
11724 last_scheduled_insn in order to make it available for
11725 s390_sched_reorder. */
11727 s390_sched_variable_issue (FILE *file
, int verbose
, rtx_insn
*insn
, int more
)
11729 last_scheduled_insn
= insn
;
11731 if (s390_tune
== PROCESSOR_2827_ZEC12
11732 && reload_completed
11733 && recog_memoized (insn
) >= 0)
11735 unsigned int mask
= s390_get_sched_attrmask (insn
);
11737 if ((mask
& S390_OOO_SCHED_ATTR_MASK_CRACKED
) != 0
11738 || (mask
& S390_OOO_SCHED_ATTR_MASK_EXPANDED
) != 0)
11739 s390_sched_state
= S390_OOO_SCHED_STATE_CRACKED
;
11740 else if ((mask
& S390_OOO_SCHED_ATTR_MASK_ENDGROUP
) != 0
11741 || (mask
& S390_OOO_SCHED_ATTR_MASK_GROUPALONE
) != 0)
11742 s390_sched_state
= S390_OOO_SCHED_STATE_NORMAL
;
11745 /* Only normal insns are left (mask == 0). */
11746 switch (s390_sched_state
)
11751 case S390_OOO_SCHED_STATE_NORMAL
:
11752 if (s390_sched_state
== S390_OOO_SCHED_STATE_NORMAL
)
11753 s390_sched_state
= 1;
11755 s390_sched_state
++;
11758 case S390_OOO_SCHED_STATE_CRACKED
:
11759 s390_sched_state
= S390_OOO_SCHED_STATE_NORMAL
;
11765 fprintf (file
, "insn %d: ", INSN_UID (insn
));
11766 #define PRINT_OOO_ATTR(ATTR) \
11767 fprintf (file, "%s ", get_attr_##ATTR (insn) ? #ATTR : "");
11768 PRINT_OOO_ATTR (ooo_cracked
);
11769 PRINT_OOO_ATTR (ooo_expanded
);
11770 PRINT_OOO_ATTR (ooo_endgroup
);
11771 PRINT_OOO_ATTR (ooo_groupalone
);
11772 #undef PRINT_OOO_ATTR
11773 fprintf (file
, "\n");
11774 fprintf (file
, "sched state: %d\n", s390_sched_state
);
11778 if (GET_CODE (PATTERN (insn
)) != USE
11779 && GET_CODE (PATTERN (insn
)) != CLOBBER
)
11786 s390_sched_init (FILE *file ATTRIBUTE_UNUSED
,
11787 int verbose ATTRIBUTE_UNUSED
,
11788 int max_ready ATTRIBUTE_UNUSED
)
11790 last_scheduled_insn
= NULL
;
11791 s390_sched_state
= 0;
11794 /* This target hook implementation for TARGET_LOOP_UNROLL_ADJUST calculates
11795 a new number struct loop *loop should be unrolled if tuned for cpus with
11796 a built-in stride prefetcher.
11797 The loop is analyzed for memory accesses by calling check_dpu for
11798 each rtx of the loop. Depending on the loop_depth and the amount of
11799 memory accesses a new number <=nunroll is returned to improve the
11800 behaviour of the hardware prefetch unit. */
11802 s390_loop_unroll_adjust (unsigned nunroll
, struct loop
*loop
)
11807 unsigned mem_count
= 0;
11809 if (s390_tune
!= PROCESSOR_2097_Z10
11810 && s390_tune
!= PROCESSOR_2817_Z196
11811 && s390_tune
!= PROCESSOR_2827_ZEC12
)
11814 /* Count the number of memory references within the loop body. */
11815 bbs
= get_loop_body (loop
);
11816 subrtx_iterator::array_type array
;
11817 for (i
= 0; i
< loop
->num_nodes
; i
++)
11818 FOR_BB_INSNS (bbs
[i
], insn
)
11819 if (INSN_P (insn
) && INSN_CODE (insn
) != -1)
11820 FOR_EACH_SUBRTX (iter
, array
, PATTERN (insn
), NONCONST
)
11825 /* Prevent division by zero, and we do not need to adjust nunroll in this case. */
11826 if (mem_count
== 0)
11829 switch (loop_depth(loop
))
11832 return MIN (nunroll
, 28 / mem_count
);
11834 return MIN (nunroll
, 22 / mem_count
);
11836 return MIN (nunroll
, 16 / mem_count
);
11841 s390_option_override (void)
11844 cl_deferred_option
*opt
;
11845 vec
<cl_deferred_option
> *v
=
11846 (vec
<cl_deferred_option
> *) s390_deferred_options
;
11849 FOR_EACH_VEC_ELT (*v
, i
, opt
)
11851 switch (opt
->opt_index
)
11853 case OPT_mhotpatch_
:
11860 strncpy (s
, opt
->arg
, 256);
11862 t
= strchr (s
, ',');
11867 val1
= integral_argument (s
);
11868 val2
= integral_argument (t
);
11875 if (val1
== -1 || val2
== -1)
11877 /* argument is not a plain number */
11878 error ("arguments to %qs should be non-negative integers",
11882 else if (val1
> s390_hotpatch_hw_max
11883 || val2
> s390_hotpatch_hw_max
)
11885 error ("argument to %qs is too large (max. %d)",
11886 "-mhotpatch=n,m", s390_hotpatch_hw_max
);
11889 s390_hotpatch_hw_before_label
= val1
;
11890 s390_hotpatch_hw_after_label
= val2
;
11894 gcc_unreachable ();
11898 /* Set up function hooks. */
11899 init_machine_status
= s390_init_machine_status
;
11901 /* Architecture mode defaults according to ABI. */
11902 if (!(target_flags_explicit
& MASK_ZARCH
))
11905 target_flags
|= MASK_ZARCH
;
11907 target_flags
&= ~MASK_ZARCH
;
11910 /* Set the march default in case it hasn't been specified on
11912 if (s390_arch
== PROCESSOR_max
)
11914 s390_arch_string
= TARGET_ZARCH
? "z900" : "g5";
11915 s390_arch
= TARGET_ZARCH
? PROCESSOR_2064_Z900
: PROCESSOR_9672_G5
;
11916 s390_arch_flags
= processor_flags_table
[(int)s390_arch
];
11919 /* Determine processor to tune for. */
11920 if (s390_tune
== PROCESSOR_max
)
11922 s390_tune
= s390_arch
;
11923 s390_tune_flags
= s390_arch_flags
;
11926 /* Sanity checks. */
11927 if (TARGET_ZARCH
&& !TARGET_CPU_ZARCH
)
11928 error ("z/Architecture mode not supported on %s", s390_arch_string
);
11929 if (TARGET_64BIT
&& !TARGET_ZARCH
)
11930 error ("64-bit ABI not supported in ESA/390 mode");
11932 /* Use hardware DFP if available and not explicitly disabled by
11933 user. E.g. with -m31 -march=z10 -mzarch */
11934 if (!(target_flags_explicit
& MASK_HARD_DFP
) && TARGET_DFP
)
11935 target_flags
|= MASK_HARD_DFP
;
11937 /* Enable hardware transactions if available and not explicitly
11938 disabled by user. E.g. with -m31 -march=zEC12 -mzarch */
11939 if (!(target_flags_explicit
& MASK_OPT_HTM
) && TARGET_CPU_HTM
&& TARGET_ZARCH
)
11940 target_flags
|= MASK_OPT_HTM
;
11942 if (TARGET_HARD_DFP
&& !TARGET_DFP
)
11944 if (target_flags_explicit
& MASK_HARD_DFP
)
11946 if (!TARGET_CPU_DFP
)
11947 error ("hardware decimal floating point instructions"
11948 " not available on %s", s390_arch_string
);
11950 error ("hardware decimal floating point instructions"
11951 " not available in ESA/390 mode");
11954 target_flags
&= ~MASK_HARD_DFP
;
11957 if ((target_flags_explicit
& MASK_SOFT_FLOAT
) && TARGET_SOFT_FLOAT
)
11959 if ((target_flags_explicit
& MASK_HARD_DFP
) && TARGET_HARD_DFP
)
11960 error ("-mhard-dfp can%'t be used in conjunction with -msoft-float");
11962 target_flags
&= ~MASK_HARD_DFP
;
11965 /* Set processor cost function. */
11968 case PROCESSOR_2084_Z990
:
11969 s390_cost
= &z990_cost
;
11971 case PROCESSOR_2094_Z9_109
:
11972 s390_cost
= &z9_109_cost
;
11974 case PROCESSOR_2097_Z10
:
11975 s390_cost
= &z10_cost
;
11977 case PROCESSOR_2817_Z196
:
11978 s390_cost
= &z196_cost
;
11980 case PROCESSOR_2827_ZEC12
:
11981 s390_cost
= &zEC12_cost
;
11984 s390_cost
= &z900_cost
;
11987 if (TARGET_BACKCHAIN
&& TARGET_PACKED_STACK
&& TARGET_HARD_FLOAT
)
11988 error ("-mbackchain -mpacked-stack -mhard-float are not supported "
11991 if (s390_stack_size
)
11993 if (s390_stack_guard
>= s390_stack_size
)
11994 error ("stack size must be greater than the stack guard value");
11995 else if (s390_stack_size
> 1 << 16)
11996 error ("stack size must not be greater than 64k");
11998 else if (s390_stack_guard
)
11999 error ("-mstack-guard implies use of -mstack-size");
12001 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
12002 if (!(target_flags_explicit
& MASK_LONG_DOUBLE_128
))
12003 target_flags
|= MASK_LONG_DOUBLE_128
;
12006 if (s390_tune
== PROCESSOR_2097_Z10
12007 || s390_tune
== PROCESSOR_2817_Z196
12008 || s390_tune
== PROCESSOR_2827_ZEC12
)
12010 maybe_set_param_value (PARAM_MAX_UNROLLED_INSNS
, 100,
12011 global_options
.x_param_values
,
12012 global_options_set
.x_param_values
);
12013 maybe_set_param_value (PARAM_MAX_UNROLL_TIMES
, 32,
12014 global_options
.x_param_values
,
12015 global_options_set
.x_param_values
);
12016 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS
, 2000,
12017 global_options
.x_param_values
,
12018 global_options_set
.x_param_values
);
12019 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEEL_TIMES
, 64,
12020 global_options
.x_param_values
,
12021 global_options_set
.x_param_values
);
12024 maybe_set_param_value (PARAM_MAX_PENDING_LIST_LENGTH
, 256,
12025 global_options
.x_param_values
,
12026 global_options_set
.x_param_values
);
12027 /* values for loop prefetching */
12028 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE
, 256,
12029 global_options
.x_param_values
,
12030 global_options_set
.x_param_values
);
12031 maybe_set_param_value (PARAM_L1_CACHE_SIZE
, 128,
12032 global_options
.x_param_values
,
12033 global_options_set
.x_param_values
);
12034 /* s390 has more than 2 levels and the size is much larger. Since
12035 we are always running virtualized assume that we only get a small
12036 part of the caches above l1. */
12037 maybe_set_param_value (PARAM_L2_CACHE_SIZE
, 1500,
12038 global_options
.x_param_values
,
12039 global_options_set
.x_param_values
);
12040 maybe_set_param_value (PARAM_PREFETCH_MIN_INSN_TO_MEM_RATIO
, 2,
12041 global_options
.x_param_values
,
12042 global_options_set
.x_param_values
);
12043 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES
, 6,
12044 global_options
.x_param_values
,
12045 global_options_set
.x_param_values
);
12047 /* This cannot reside in s390_option_optimization_table since HAVE_prefetch
12048 requires the arch flags to be evaluated already. Since prefetching
12049 is beneficial on s390, we enable it if available. */
12050 if (flag_prefetch_loop_arrays
< 0 && HAVE_prefetch
&& optimize
>= 3)
12051 flag_prefetch_loop_arrays
= 1;
12053 /* Use the alternative scheduling-pressure algorithm by default. */
12054 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM
, 2,
12055 global_options
.x_param_values
,
12056 global_options_set
.x_param_values
);
12060 /* Don't emit DWARF3/4 unless specifically selected. The TPF
12061 debuggers do not yet support DWARF 3/4. */
12062 if (!global_options_set
.x_dwarf_strict
)
12064 if (!global_options_set
.x_dwarf_version
)
12068 /* Register a target-specific optimization-and-lowering pass
12069 to run immediately before prologue and epilogue generation.
12071 Registering the pass must be done at start up. It's
12072 convenient to do it here. */
12073 opt_pass
*new_pass
= new pass_s390_early_mach (g
);
12074 struct register_pass_info insert_pass_s390_early_mach
=
12076 new_pass
, /* pass */
12077 "pro_and_epilogue", /* reference_pass_name */
12078 1, /* ref_pass_instance_number */
12079 PASS_POS_INSERT_BEFORE
/* po_op */
12081 register_pass (&insert_pass_s390_early_mach
);
12084 /* Implement TARGET_USE_BY_PIECES_INFRASTRUCTURE_P. */
12087 s390_use_by_pieces_infrastructure_p (unsigned HOST_WIDE_INT size
,
12088 unsigned int align ATTRIBUTE_UNUSED
,
12089 enum by_pieces_operation op ATTRIBUTE_UNUSED
,
12090 bool speed_p ATTRIBUTE_UNUSED
)
12092 return (size
== 1 || size
== 2
12093 || size
== 4 || (TARGET_ZARCH
&& size
== 8));
12096 /* Initialize GCC target structure. */
12098 #undef TARGET_ASM_ALIGNED_HI_OP
12099 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
12100 #undef TARGET_ASM_ALIGNED_DI_OP
12101 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
12102 #undef TARGET_ASM_INTEGER
12103 #define TARGET_ASM_INTEGER s390_assemble_integer
12105 #undef TARGET_ASM_OPEN_PAREN
12106 #define TARGET_ASM_OPEN_PAREN ""
12108 #undef TARGET_ASM_CLOSE_PAREN
12109 #define TARGET_ASM_CLOSE_PAREN ""
12111 #undef TARGET_OPTION_OVERRIDE
12112 #define TARGET_OPTION_OVERRIDE s390_option_override
12114 #undef TARGET_ENCODE_SECTION_INFO
12115 #define TARGET_ENCODE_SECTION_INFO s390_encode_section_info
12117 #undef TARGET_SCALAR_MODE_SUPPORTED_P
12118 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
12121 #undef TARGET_HAVE_TLS
12122 #define TARGET_HAVE_TLS true
12124 #undef TARGET_CANNOT_FORCE_CONST_MEM
12125 #define TARGET_CANNOT_FORCE_CONST_MEM s390_cannot_force_const_mem
12127 #undef TARGET_DELEGITIMIZE_ADDRESS
12128 #define TARGET_DELEGITIMIZE_ADDRESS s390_delegitimize_address
12130 #undef TARGET_LEGITIMIZE_ADDRESS
12131 #define TARGET_LEGITIMIZE_ADDRESS s390_legitimize_address
12133 #undef TARGET_RETURN_IN_MEMORY
12134 #define TARGET_RETURN_IN_MEMORY s390_return_in_memory
12136 #undef TARGET_INIT_BUILTINS
12137 #define TARGET_INIT_BUILTINS s390_init_builtins
12138 #undef TARGET_EXPAND_BUILTIN
12139 #define TARGET_EXPAND_BUILTIN s390_expand_builtin
12141 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
12142 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA s390_output_addr_const_extra
12144 #undef TARGET_ASM_OUTPUT_MI_THUNK
12145 #define TARGET_ASM_OUTPUT_MI_THUNK s390_output_mi_thunk
12146 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
12147 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
12149 #undef TARGET_SCHED_ADJUST_PRIORITY
12150 #define TARGET_SCHED_ADJUST_PRIORITY s390_adjust_priority
12151 #undef TARGET_SCHED_ISSUE_RATE
12152 #define TARGET_SCHED_ISSUE_RATE s390_issue_rate
12153 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
12154 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD s390_first_cycle_multipass_dfa_lookahead
12156 #undef TARGET_SCHED_VARIABLE_ISSUE
12157 #define TARGET_SCHED_VARIABLE_ISSUE s390_sched_variable_issue
12158 #undef TARGET_SCHED_REORDER
12159 #define TARGET_SCHED_REORDER s390_sched_reorder
12160 #undef TARGET_SCHED_INIT
12161 #define TARGET_SCHED_INIT s390_sched_init
12163 #undef TARGET_CANNOT_COPY_INSN_P
12164 #define TARGET_CANNOT_COPY_INSN_P s390_cannot_copy_insn_p
12165 #undef TARGET_RTX_COSTS
12166 #define TARGET_RTX_COSTS s390_rtx_costs
12167 #undef TARGET_ADDRESS_COST
12168 #define TARGET_ADDRESS_COST s390_address_cost
12169 #undef TARGET_REGISTER_MOVE_COST
12170 #define TARGET_REGISTER_MOVE_COST s390_register_move_cost
12171 #undef TARGET_MEMORY_MOVE_COST
12172 #define TARGET_MEMORY_MOVE_COST s390_memory_move_cost
12174 #undef TARGET_MACHINE_DEPENDENT_REORG
12175 #define TARGET_MACHINE_DEPENDENT_REORG s390_reorg
12177 #undef TARGET_VALID_POINTER_MODE
12178 #define TARGET_VALID_POINTER_MODE s390_valid_pointer_mode
12180 #undef TARGET_BUILD_BUILTIN_VA_LIST
12181 #define TARGET_BUILD_BUILTIN_VA_LIST s390_build_builtin_va_list
12182 #undef TARGET_EXPAND_BUILTIN_VA_START
12183 #define TARGET_EXPAND_BUILTIN_VA_START s390_va_start
12184 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
12185 #define TARGET_GIMPLIFY_VA_ARG_EXPR s390_gimplify_va_arg
12187 #undef TARGET_PROMOTE_FUNCTION_MODE
12188 #define TARGET_PROMOTE_FUNCTION_MODE s390_promote_function_mode
12189 #undef TARGET_PASS_BY_REFERENCE
12190 #define TARGET_PASS_BY_REFERENCE s390_pass_by_reference
12192 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
12193 #define TARGET_FUNCTION_OK_FOR_SIBCALL s390_function_ok_for_sibcall
12194 #undef TARGET_FUNCTION_ARG
12195 #define TARGET_FUNCTION_ARG s390_function_arg
12196 #undef TARGET_FUNCTION_ARG_ADVANCE
12197 #define TARGET_FUNCTION_ARG_ADVANCE s390_function_arg_advance
12198 #undef TARGET_FUNCTION_VALUE
12199 #define TARGET_FUNCTION_VALUE s390_function_value
12200 #undef TARGET_LIBCALL_VALUE
12201 #define TARGET_LIBCALL_VALUE s390_libcall_value
12203 #undef TARGET_KEEP_LEAF_WHEN_PROFILED
12204 #define TARGET_KEEP_LEAF_WHEN_PROFILED s390_keep_leaf_when_profiled
12206 #undef TARGET_FIXED_CONDITION_CODE_REGS
12207 #define TARGET_FIXED_CONDITION_CODE_REGS s390_fixed_condition_code_regs
12209 #undef TARGET_CC_MODES_COMPATIBLE
12210 #define TARGET_CC_MODES_COMPATIBLE s390_cc_modes_compatible
12212 #undef TARGET_INVALID_WITHIN_DOLOOP
12213 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_insn_null
12216 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
12217 #define TARGET_ASM_OUTPUT_DWARF_DTPREL s390_output_dwarf_dtprel
12220 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
12221 #undef TARGET_MANGLE_TYPE
12222 #define TARGET_MANGLE_TYPE s390_mangle_type
12225 #undef TARGET_SCALAR_MODE_SUPPORTED_P
12226 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
12228 #undef TARGET_PREFERRED_RELOAD_CLASS
12229 #define TARGET_PREFERRED_RELOAD_CLASS s390_preferred_reload_class
12231 #undef TARGET_SECONDARY_RELOAD
12232 #define TARGET_SECONDARY_RELOAD s390_secondary_reload
12234 #undef TARGET_LIBGCC_CMP_RETURN_MODE
12235 #define TARGET_LIBGCC_CMP_RETURN_MODE s390_libgcc_cmp_return_mode
12237 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
12238 #define TARGET_LIBGCC_SHIFT_COUNT_MODE s390_libgcc_shift_count_mode
12240 #undef TARGET_LEGITIMATE_ADDRESS_P
12241 #define TARGET_LEGITIMATE_ADDRESS_P s390_legitimate_address_p
12243 #undef TARGET_LEGITIMATE_CONSTANT_P
12244 #define TARGET_LEGITIMATE_CONSTANT_P s390_legitimate_constant_p
12246 #undef TARGET_LRA_P
12247 #define TARGET_LRA_P s390_lra_p
12249 #undef TARGET_CAN_ELIMINATE
12250 #define TARGET_CAN_ELIMINATE s390_can_eliminate
12252 #undef TARGET_CONDITIONAL_REGISTER_USAGE
12253 #define TARGET_CONDITIONAL_REGISTER_USAGE s390_conditional_register_usage
12255 #undef TARGET_LOOP_UNROLL_ADJUST
12256 #define TARGET_LOOP_UNROLL_ADJUST s390_loop_unroll_adjust
12258 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
12259 #define TARGET_ASM_TRAMPOLINE_TEMPLATE s390_asm_trampoline_template
12260 #undef TARGET_TRAMPOLINE_INIT
12261 #define TARGET_TRAMPOLINE_INIT s390_trampoline_init
12263 #undef TARGET_UNWIND_WORD_MODE
12264 #define TARGET_UNWIND_WORD_MODE s390_unwind_word_mode
12266 #undef TARGET_CANONICALIZE_COMPARISON
12267 #define TARGET_CANONICALIZE_COMPARISON s390_canonicalize_comparison
12269 #undef TARGET_HARD_REGNO_SCRATCH_OK
12270 #define TARGET_HARD_REGNO_SCRATCH_OK s390_hard_regno_scratch_ok
12272 #undef TARGET_ATTRIBUTE_TABLE
12273 #define TARGET_ATTRIBUTE_TABLE s390_attribute_table
12275 #undef TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P
12276 #define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P hook_bool_const_tree_true
12278 #undef TARGET_SET_UP_BY_PROLOGUE
12279 #define TARGET_SET_UP_BY_PROLOGUE s300_set_up_by_prologue
12281 #undef TARGET_USE_BY_PIECES_INFRASTRUCTURE_P
12282 #define TARGET_USE_BY_PIECES_INFRASTRUCTURE_P \
12283 s390_use_by_pieces_infrastructure_p
12285 struct gcc_target targetm
= TARGET_INITIALIZER
;
12287 #include "gt-s390.h"