1 /* Subroutines used for code generation on IBM S/390 and zSeries
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006,
3 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
4 Contributed by Hartmut Penner (hpenner@de.ibm.com) and
5 Ulrich Weigand (uweigand@de.ibm.com) and
6 Andreas Krebbel (Andreas.Krebbel@de.ibm.com).
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
26 #include "coretypes.h"
32 #include "hard-reg-set.h"
33 #include "insn-config.h"
34 #include "conditions.h"
36 #include "insn-attr.h"
43 #include "diagnostic-core.h"
45 #include "basic-block.h"
46 #include "integrate.h"
49 #include "target-def.h"
51 #include "langhooks.h"
59 /* Define the specific costs for a given cpu. */
61 struct processor_costs
64 const int m
; /* cost of an M instruction. */
65 const int mghi
; /* cost of an MGHI instruction. */
66 const int mh
; /* cost of an MH instruction. */
67 const int mhi
; /* cost of an MHI instruction. */
68 const int ml
; /* cost of an ML instruction. */
69 const int mr
; /* cost of an MR instruction. */
70 const int ms
; /* cost of an MS instruction. */
71 const int msg
; /* cost of an MSG instruction. */
72 const int msgf
; /* cost of an MSGF instruction. */
73 const int msgfr
; /* cost of an MSGFR instruction. */
74 const int msgr
; /* cost of an MSGR instruction. */
75 const int msr
; /* cost of an MSR instruction. */
76 const int mult_df
; /* cost of multiplication in DFmode. */
79 const int sqxbr
; /* cost of square root in TFmode. */
80 const int sqdbr
; /* cost of square root in DFmode. */
81 const int sqebr
; /* cost of square root in SFmode. */
82 /* multiply and add */
83 const int madbr
; /* cost of multiply and add in DFmode. */
84 const int maebr
; /* cost of multiply and add in SFmode. */
96 const struct processor_costs
*s390_cost
;
99 struct processor_costs z900_cost
=
101 COSTS_N_INSNS (5), /* M */
102 COSTS_N_INSNS (10), /* MGHI */
103 COSTS_N_INSNS (5), /* MH */
104 COSTS_N_INSNS (4), /* MHI */
105 COSTS_N_INSNS (5), /* ML */
106 COSTS_N_INSNS (5), /* MR */
107 COSTS_N_INSNS (4), /* MS */
108 COSTS_N_INSNS (15), /* MSG */
109 COSTS_N_INSNS (7), /* MSGF */
110 COSTS_N_INSNS (7), /* MSGFR */
111 COSTS_N_INSNS (10), /* MSGR */
112 COSTS_N_INSNS (4), /* MSR */
113 COSTS_N_INSNS (7), /* multiplication in DFmode */
114 COSTS_N_INSNS (13), /* MXBR */
115 COSTS_N_INSNS (136), /* SQXBR */
116 COSTS_N_INSNS (44), /* SQDBR */
117 COSTS_N_INSNS (35), /* SQEBR */
118 COSTS_N_INSNS (18), /* MADBR */
119 COSTS_N_INSNS (13), /* MAEBR */
120 COSTS_N_INSNS (134), /* DXBR */
121 COSTS_N_INSNS (30), /* DDBR */
122 COSTS_N_INSNS (27), /* DEBR */
123 COSTS_N_INSNS (220), /* DLGR */
124 COSTS_N_INSNS (34), /* DLR */
125 COSTS_N_INSNS (34), /* DR */
126 COSTS_N_INSNS (32), /* DSGFR */
127 COSTS_N_INSNS (32), /* DSGR */
131 struct processor_costs z990_cost
=
133 COSTS_N_INSNS (4), /* M */
134 COSTS_N_INSNS (2), /* MGHI */
135 COSTS_N_INSNS (2), /* MH */
136 COSTS_N_INSNS (2), /* MHI */
137 COSTS_N_INSNS (4), /* ML */
138 COSTS_N_INSNS (4), /* MR */
139 COSTS_N_INSNS (5), /* MS */
140 COSTS_N_INSNS (6), /* MSG */
141 COSTS_N_INSNS (4), /* MSGF */
142 COSTS_N_INSNS (4), /* MSGFR */
143 COSTS_N_INSNS (4), /* MSGR */
144 COSTS_N_INSNS (4), /* MSR */
145 COSTS_N_INSNS (1), /* multiplication in DFmode */
146 COSTS_N_INSNS (28), /* MXBR */
147 COSTS_N_INSNS (130), /* SQXBR */
148 COSTS_N_INSNS (66), /* SQDBR */
149 COSTS_N_INSNS (38), /* SQEBR */
150 COSTS_N_INSNS (1), /* MADBR */
151 COSTS_N_INSNS (1), /* MAEBR */
152 COSTS_N_INSNS (60), /* DXBR */
153 COSTS_N_INSNS (40), /* DDBR */
154 COSTS_N_INSNS (26), /* DEBR */
155 COSTS_N_INSNS (176), /* DLGR */
156 COSTS_N_INSNS (31), /* DLR */
157 COSTS_N_INSNS (31), /* DR */
158 COSTS_N_INSNS (31), /* DSGFR */
159 COSTS_N_INSNS (31), /* DSGR */
163 struct processor_costs z9_109_cost
=
165 COSTS_N_INSNS (4), /* M */
166 COSTS_N_INSNS (2), /* MGHI */
167 COSTS_N_INSNS (2), /* MH */
168 COSTS_N_INSNS (2), /* MHI */
169 COSTS_N_INSNS (4), /* ML */
170 COSTS_N_INSNS (4), /* MR */
171 COSTS_N_INSNS (5), /* MS */
172 COSTS_N_INSNS (6), /* MSG */
173 COSTS_N_INSNS (4), /* MSGF */
174 COSTS_N_INSNS (4), /* MSGFR */
175 COSTS_N_INSNS (4), /* MSGR */
176 COSTS_N_INSNS (4), /* MSR */
177 COSTS_N_INSNS (1), /* multiplication in DFmode */
178 COSTS_N_INSNS (28), /* MXBR */
179 COSTS_N_INSNS (130), /* SQXBR */
180 COSTS_N_INSNS (66), /* SQDBR */
181 COSTS_N_INSNS (38), /* SQEBR */
182 COSTS_N_INSNS (1), /* MADBR */
183 COSTS_N_INSNS (1), /* MAEBR */
184 COSTS_N_INSNS (60), /* DXBR */
185 COSTS_N_INSNS (40), /* DDBR */
186 COSTS_N_INSNS (26), /* DEBR */
187 COSTS_N_INSNS (30), /* DLGR */
188 COSTS_N_INSNS (23), /* DLR */
189 COSTS_N_INSNS (23), /* DR */
190 COSTS_N_INSNS (24), /* DSGFR */
191 COSTS_N_INSNS (24), /* DSGR */
195 struct processor_costs z10_cost
=
197 COSTS_N_INSNS (10), /* M */
198 COSTS_N_INSNS (10), /* MGHI */
199 COSTS_N_INSNS (10), /* MH */
200 COSTS_N_INSNS (10), /* MHI */
201 COSTS_N_INSNS (10), /* ML */
202 COSTS_N_INSNS (10), /* MR */
203 COSTS_N_INSNS (10), /* MS */
204 COSTS_N_INSNS (10), /* MSG */
205 COSTS_N_INSNS (10), /* MSGF */
206 COSTS_N_INSNS (10), /* MSGFR */
207 COSTS_N_INSNS (10), /* MSGR */
208 COSTS_N_INSNS (10), /* MSR */
209 COSTS_N_INSNS (1) , /* multiplication in DFmode */
210 COSTS_N_INSNS (50), /* MXBR */
211 COSTS_N_INSNS (120), /* SQXBR */
212 COSTS_N_INSNS (52), /* SQDBR */
213 COSTS_N_INSNS (38), /* SQEBR */
214 COSTS_N_INSNS (1), /* MADBR */
215 COSTS_N_INSNS (1), /* MAEBR */
216 COSTS_N_INSNS (111), /* DXBR */
217 COSTS_N_INSNS (39), /* DDBR */
218 COSTS_N_INSNS (32), /* DEBR */
219 COSTS_N_INSNS (160), /* DLGR */
220 COSTS_N_INSNS (71), /* DLR */
221 COSTS_N_INSNS (71), /* DR */
222 COSTS_N_INSNS (71), /* DSGFR */
223 COSTS_N_INSNS (71), /* DSGR */
227 struct processor_costs z196_cost
=
229 COSTS_N_INSNS (7), /* M */
230 COSTS_N_INSNS (5), /* MGHI */
231 COSTS_N_INSNS (5), /* MH */
232 COSTS_N_INSNS (5), /* MHI */
233 COSTS_N_INSNS (7), /* ML */
234 COSTS_N_INSNS (7), /* MR */
235 COSTS_N_INSNS (6), /* MS */
236 COSTS_N_INSNS (8), /* MSG */
237 COSTS_N_INSNS (6), /* MSGF */
238 COSTS_N_INSNS (6), /* MSGFR */
239 COSTS_N_INSNS (8), /* MSGR */
240 COSTS_N_INSNS (6), /* MSR */
241 COSTS_N_INSNS (1) , /* multiplication in DFmode */
242 COSTS_N_INSNS (40), /* MXBR B+40 */
243 COSTS_N_INSNS (100), /* SQXBR B+100 */
244 COSTS_N_INSNS (42), /* SQDBR B+42 */
245 COSTS_N_INSNS (28), /* SQEBR B+28 */
246 COSTS_N_INSNS (1), /* MADBR B */
247 COSTS_N_INSNS (1), /* MAEBR B */
248 COSTS_N_INSNS (101), /* DXBR B+101 */
249 COSTS_N_INSNS (29), /* DDBR */
250 COSTS_N_INSNS (22), /* DEBR */
251 COSTS_N_INSNS (160), /* DLGR cracked */
252 COSTS_N_INSNS (160), /* DLR cracked */
253 COSTS_N_INSNS (160), /* DR expanded */
254 COSTS_N_INSNS (160), /* DSGFR cracked */
255 COSTS_N_INSNS (160), /* DSGR cracked */
258 extern int reload_completed
;
260 /* Kept up to date using the SCHED_VARIABLE_ISSUE hook. */
261 static rtx last_scheduled_insn
;
263 /* Structure used to hold the components of a S/390 memory
264 address. A legitimate address on S/390 is of the general
266 base + index + displacement
267 where any of the components is optional.
269 base and index are registers of the class ADDR_REGS,
270 displacement is an unsigned 12-bit immediate constant. */
281 /* Which cpu are we tuning for. */
282 enum processor_type s390_tune
= PROCESSOR_max
;
284 /* Which instruction set architecture to use. */
285 enum processor_type s390_arch
;
288 HOST_WIDE_INT s390_warn_framesize
= 0;
289 HOST_WIDE_INT s390_stack_size
= 0;
290 HOST_WIDE_INT s390_stack_guard
= 0;
292 /* The following structure is embedded in the machine
293 specific part of struct function. */
295 struct GTY (()) s390_frame_layout
297 /* Offset within stack frame. */
298 HOST_WIDE_INT gprs_offset
;
299 HOST_WIDE_INT f0_offset
;
300 HOST_WIDE_INT f4_offset
;
301 HOST_WIDE_INT f8_offset
;
302 HOST_WIDE_INT backchain_offset
;
304 /* Number of first and last gpr where slots in the register
305 save area are reserved for. */
306 int first_save_gpr_slot
;
307 int last_save_gpr_slot
;
309 /* Number of first and last gpr to be saved, restored. */
311 int first_restore_gpr
;
313 int last_restore_gpr
;
315 /* Bits standing for floating point registers. Set, if the
316 respective register has to be saved. Starting with reg 16 (f0)
317 at the rightmost bit.
318 Bit 15 - 8 7 6 5 4 3 2 1 0
319 fpr 15 - 8 7 5 3 1 6 4 2 0
320 reg 31 - 24 23 22 21 20 19 18 17 16 */
321 unsigned int fpr_bitmap
;
323 /* Number of floating point registers f8-f15 which must be saved. */
326 /* Set if return address needs to be saved.
327 This flag is set by s390_return_addr_rtx if it could not use
328 the initial value of r14 and therefore depends on r14 saved
330 bool save_return_addr_p
;
332 /* Size of stack frame. */
333 HOST_WIDE_INT frame_size
;
336 /* Define the structure for the machine field in struct function. */
338 struct GTY(()) machine_function
340 struct s390_frame_layout frame_layout
;
342 /* Literal pool base register. */
345 /* True if we may need to perform branch splitting. */
346 bool split_branches_pending_p
;
348 /* Some local-dynamic TLS symbol name. */
349 const char *some_ld_name
;
351 bool has_landing_pad_p
;
354 /* Few accessor macros for struct cfun->machine->s390_frame_layout. */
356 #define cfun_frame_layout (cfun->machine->frame_layout)
357 #define cfun_save_high_fprs_p (!!cfun_frame_layout.high_fprs)
358 #define cfun_gprs_save_area_size ((cfun_frame_layout.last_save_gpr_slot - \
359 cfun_frame_layout.first_save_gpr_slot + 1) * UNITS_PER_LONG)
360 #define cfun_set_fpr_bit(BITNUM) (cfun->machine->frame_layout.fpr_bitmap |= \
362 #define cfun_fpr_bit_p(BITNUM) (!!(cfun->machine->frame_layout.fpr_bitmap & \
365 /* Number of GPRs and FPRs used for argument passing. */
366 #define GP_ARG_NUM_REG 5
367 #define FP_ARG_NUM_REG (TARGET_64BIT? 4 : 2)
369 /* A couple of shortcuts. */
370 #define CONST_OK_FOR_J(x) \
371 CONST_OK_FOR_CONSTRAINT_P((x), 'J', "J")
372 #define CONST_OK_FOR_K(x) \
373 CONST_OK_FOR_CONSTRAINT_P((x), 'K', "K")
374 #define CONST_OK_FOR_Os(x) \
375 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Os")
376 #define CONST_OK_FOR_Op(x) \
377 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Op")
378 #define CONST_OK_FOR_On(x) \
379 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "On")
381 #define REGNO_PAIR_OK(REGNO, MODE) \
382 (HARD_REGNO_NREGS ((REGNO), (MODE)) == 1 || !((REGNO) & 1))
384 /* That's the read ahead of the dynamic branch prediction unit in
385 bytes on a z10 (or higher) CPU. */
386 #define PREDICT_DISTANCE (TARGET_Z10 ? 384 : 2048)
388 static enum machine_mode
389 s390_libgcc_cmp_return_mode (void)
391 return TARGET_64BIT
? DImode
: SImode
;
394 static enum machine_mode
395 s390_libgcc_shift_count_mode (void)
397 return TARGET_64BIT
? DImode
: SImode
;
400 static enum machine_mode
401 s390_unwind_word_mode (void)
403 return TARGET_64BIT
? DImode
: SImode
;
406 /* Return true if the back end supports mode MODE. */
408 s390_scalar_mode_supported_p (enum machine_mode mode
)
410 /* In contrast to the default implementation reject TImode constants on 31bit
411 TARGET_ZARCH for ABI compliance. */
412 if (!TARGET_64BIT
&& TARGET_ZARCH
&& mode
== TImode
)
415 if (DECIMAL_FLOAT_MODE_P (mode
))
416 return default_decimal_float_supported_p ();
418 return default_scalar_mode_supported_p (mode
);
421 /* Set the has_landing_pad_p flag in struct machine_function to VALUE. */
424 s390_set_has_landing_pad_p (bool value
)
426 cfun
->machine
->has_landing_pad_p
= value
;
429 /* If two condition code modes are compatible, return a condition code
430 mode which is compatible with both. Otherwise, return
433 static enum machine_mode
434 s390_cc_modes_compatible (enum machine_mode m1
, enum machine_mode m2
)
442 if (m2
== CCUmode
|| m2
== CCTmode
|| m2
== CCZ1mode
443 || m2
== CCSmode
|| m2
== CCSRmode
|| m2
== CCURmode
)
464 /* Return true if SET either doesn't set the CC register, or else
465 the source and destination have matching CC modes and that
466 CC mode is at least as constrained as REQ_MODE. */
469 s390_match_ccmode_set (rtx set
, enum machine_mode req_mode
)
471 enum machine_mode set_mode
;
473 gcc_assert (GET_CODE (set
) == SET
);
475 if (GET_CODE (SET_DEST (set
)) != REG
|| !CC_REGNO_P (REGNO (SET_DEST (set
))))
478 set_mode
= GET_MODE (SET_DEST (set
));
492 if (req_mode
!= set_mode
)
497 if (req_mode
!= CCSmode
&& req_mode
!= CCUmode
&& req_mode
!= CCTmode
498 && req_mode
!= CCSRmode
&& req_mode
!= CCURmode
)
504 if (req_mode
!= CCAmode
)
512 return (GET_MODE (SET_SRC (set
)) == set_mode
);
515 /* Return true if every SET in INSN that sets the CC register
516 has source and destination with matching CC modes and that
517 CC mode is at least as constrained as REQ_MODE.
518 If REQ_MODE is VOIDmode, always return false. */
521 s390_match_ccmode (rtx insn
, enum machine_mode req_mode
)
525 /* s390_tm_ccmode returns VOIDmode to indicate failure. */
526 if (req_mode
== VOIDmode
)
529 if (GET_CODE (PATTERN (insn
)) == SET
)
530 return s390_match_ccmode_set (PATTERN (insn
), req_mode
);
532 if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
533 for (i
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
535 rtx set
= XVECEXP (PATTERN (insn
), 0, i
);
536 if (GET_CODE (set
) == SET
)
537 if (!s390_match_ccmode_set (set
, req_mode
))
544 /* If a test-under-mask instruction can be used to implement
545 (compare (and ... OP1) OP2), return the CC mode required
546 to do that. Otherwise, return VOIDmode.
547 MIXED is true if the instruction can distinguish between
548 CC1 and CC2 for mixed selected bits (TMxx), it is false
549 if the instruction cannot (TM). */
552 s390_tm_ccmode (rtx op1
, rtx op2
, bool mixed
)
556 /* ??? Fixme: should work on CONST_DOUBLE as well. */
557 if (GET_CODE (op1
) != CONST_INT
|| GET_CODE (op2
) != CONST_INT
)
560 /* Selected bits all zero: CC0.
561 e.g.: int a; if ((a & (16 + 128)) == 0) */
562 if (INTVAL (op2
) == 0)
565 /* Selected bits all one: CC3.
566 e.g.: int a; if ((a & (16 + 128)) == 16 + 128) */
567 if (INTVAL (op2
) == INTVAL (op1
))
570 /* Exactly two bits selected, mixed zeroes and ones: CC1 or CC2. e.g.:
572 if ((a & (16 + 128)) == 16) -> CCT1
573 if ((a & (16 + 128)) == 128) -> CCT2 */
576 bit1
= exact_log2 (INTVAL (op2
));
577 bit0
= exact_log2 (INTVAL (op1
) ^ INTVAL (op2
));
578 if (bit0
!= -1 && bit1
!= -1)
579 return bit0
> bit1
? CCT1mode
: CCT2mode
;
585 /* Given a comparison code OP (EQ, NE, etc.) and the operands
586 OP0 and OP1 of a COMPARE, return the mode to be used for the
590 s390_select_ccmode (enum rtx_code code
, rtx op0
, rtx op1
)
596 if ((GET_CODE (op0
) == NEG
|| GET_CODE (op0
) == ABS
)
597 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_INT
)
599 if (GET_CODE (op0
) == PLUS
&& GET_CODE (XEXP (op0
, 1)) == CONST_INT
600 && CONST_OK_FOR_K (INTVAL (XEXP (op0
, 1))))
602 if ((GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
603 || GET_CODE (op1
) == NEG
)
604 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_INT
)
607 if (GET_CODE (op0
) == AND
)
609 /* Check whether we can potentially do it via TM. */
610 enum machine_mode ccmode
;
611 ccmode
= s390_tm_ccmode (XEXP (op0
, 1), op1
, 1);
612 if (ccmode
!= VOIDmode
)
614 /* Relax CCTmode to CCZmode to allow fall-back to AND
615 if that turns out to be beneficial. */
616 return ccmode
== CCTmode
? CCZmode
: ccmode
;
620 if (register_operand (op0
, HImode
)
621 && GET_CODE (op1
) == CONST_INT
622 && (INTVAL (op1
) == -1 || INTVAL (op1
) == 65535))
624 if (register_operand (op0
, QImode
)
625 && GET_CODE (op1
) == CONST_INT
626 && (INTVAL (op1
) == -1 || INTVAL (op1
) == 255))
635 /* The only overflow condition of NEG and ABS happens when
636 -INT_MAX is used as parameter, which stays negative. So
637 we have an overflow from a positive value to a negative.
638 Using CCAP mode the resulting cc can be used for comparisons. */
639 if ((GET_CODE (op0
) == NEG
|| GET_CODE (op0
) == ABS
)
640 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_INT
)
643 /* If constants are involved in an add instruction it is possible to use
644 the resulting cc for comparisons with zero. Knowing the sign of the
645 constant the overflow behavior gets predictable. e.g.:
646 int a, b; if ((b = a + c) > 0)
647 with c as a constant value: c < 0 -> CCAN and c >= 0 -> CCAP */
648 if (GET_CODE (op0
) == PLUS
&& GET_CODE (XEXP (op0
, 1)) == CONST_INT
649 && CONST_OK_FOR_K (INTVAL (XEXP (op0
, 1))))
651 if (INTVAL (XEXP((op0
), 1)) < 0)
665 if ((GET_CODE (op0
) == SIGN_EXTEND
|| GET_CODE (op0
) == ZERO_EXTEND
)
666 && GET_CODE (op1
) != CONST_INT
)
672 if (GET_CODE (op0
) == PLUS
673 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_INT
)
676 if ((GET_CODE (op0
) == SIGN_EXTEND
|| GET_CODE (op0
) == ZERO_EXTEND
)
677 && GET_CODE (op1
) != CONST_INT
)
683 if (GET_CODE (op0
) == MINUS
684 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_INT
)
687 if ((GET_CODE (op0
) == SIGN_EXTEND
|| GET_CODE (op0
) == ZERO_EXTEND
)
688 && GET_CODE (op1
) != CONST_INT
)
697 /* Replace the comparison OP0 CODE OP1 by a semantically equivalent one
698 that we can implement more efficiently. */
701 s390_canonicalize_comparison (enum rtx_code
*code
, rtx
*op0
, rtx
*op1
)
703 /* Convert ZERO_EXTRACT back to AND to enable TM patterns. */
704 if ((*code
== EQ
|| *code
== NE
)
705 && *op1
== const0_rtx
706 && GET_CODE (*op0
) == ZERO_EXTRACT
707 && GET_CODE (XEXP (*op0
, 1)) == CONST_INT
708 && GET_CODE (XEXP (*op0
, 2)) == CONST_INT
709 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0
, 0))))
711 rtx inner
= XEXP (*op0
, 0);
712 HOST_WIDE_INT modesize
= GET_MODE_BITSIZE (GET_MODE (inner
));
713 HOST_WIDE_INT len
= INTVAL (XEXP (*op0
, 1));
714 HOST_WIDE_INT pos
= INTVAL (XEXP (*op0
, 2));
716 if (len
> 0 && len
< modesize
717 && pos
>= 0 && pos
+ len
<= modesize
718 && modesize
<= HOST_BITS_PER_WIDE_INT
)
720 unsigned HOST_WIDE_INT block
;
721 block
= ((unsigned HOST_WIDE_INT
) 1 << len
) - 1;
722 block
<<= modesize
- pos
- len
;
724 *op0
= gen_rtx_AND (GET_MODE (inner
), inner
,
725 gen_int_mode (block
, GET_MODE (inner
)));
729 /* Narrow AND of memory against immediate to enable TM. */
730 if ((*code
== EQ
|| *code
== NE
)
731 && *op1
== const0_rtx
732 && GET_CODE (*op0
) == AND
733 && GET_CODE (XEXP (*op0
, 1)) == CONST_INT
734 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0
, 0))))
736 rtx inner
= XEXP (*op0
, 0);
737 rtx mask
= XEXP (*op0
, 1);
739 /* Ignore paradoxical SUBREGs if all extra bits are masked out. */
740 if (GET_CODE (inner
) == SUBREG
741 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (inner
)))
742 && (GET_MODE_SIZE (GET_MODE (inner
))
743 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner
))))
745 & GET_MODE_MASK (GET_MODE (inner
))
746 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (inner
))))
748 inner
= SUBREG_REG (inner
);
750 /* Do not change volatile MEMs. */
751 if (MEM_P (inner
) && !MEM_VOLATILE_P (inner
))
753 int part
= s390_single_part (XEXP (*op0
, 1),
754 GET_MODE (inner
), QImode
, 0);
757 mask
= gen_int_mode (s390_extract_part (mask
, QImode
, 0), QImode
);
758 inner
= adjust_address_nv (inner
, QImode
, part
);
759 *op0
= gen_rtx_AND (QImode
, inner
, mask
);
764 /* Narrow comparisons against 0xffff to HImode if possible. */
765 if ((*code
== EQ
|| *code
== NE
)
766 && GET_CODE (*op1
) == CONST_INT
767 && INTVAL (*op1
) == 0xffff
768 && SCALAR_INT_MODE_P (GET_MODE (*op0
))
769 && (nonzero_bits (*op0
, GET_MODE (*op0
))
770 & ~(unsigned HOST_WIDE_INT
) 0xffff) == 0)
772 *op0
= gen_lowpart (HImode
, *op0
);
776 /* Remove redundant UNSPEC_CCU_TO_INT conversions if possible. */
777 if (GET_CODE (*op0
) == UNSPEC
778 && XINT (*op0
, 1) == UNSPEC_CCU_TO_INT
779 && XVECLEN (*op0
, 0) == 1
780 && GET_MODE (XVECEXP (*op0
, 0, 0)) == CCUmode
781 && GET_CODE (XVECEXP (*op0
, 0, 0)) == REG
782 && REGNO (XVECEXP (*op0
, 0, 0)) == CC_REGNUM
783 && *op1
== const0_rtx
)
785 enum rtx_code new_code
= UNKNOWN
;
788 case EQ
: new_code
= EQ
; break;
789 case NE
: new_code
= NE
; break;
790 case LT
: new_code
= GTU
; break;
791 case GT
: new_code
= LTU
; break;
792 case LE
: new_code
= GEU
; break;
793 case GE
: new_code
= LEU
; break;
797 if (new_code
!= UNKNOWN
)
799 *op0
= XVECEXP (*op0
, 0, 0);
804 /* Remove redundant UNSPEC_CCZ_TO_INT conversions if possible. */
805 if (GET_CODE (*op0
) == UNSPEC
806 && XINT (*op0
, 1) == UNSPEC_CCZ_TO_INT
807 && XVECLEN (*op0
, 0) == 1
808 && GET_MODE (XVECEXP (*op0
, 0, 0)) == CCZmode
809 && GET_CODE (XVECEXP (*op0
, 0, 0)) == REG
810 && REGNO (XVECEXP (*op0
, 0, 0)) == CC_REGNUM
811 && *op1
== const0_rtx
)
813 enum rtx_code new_code
= UNKNOWN
;
816 case EQ
: new_code
= EQ
; break;
817 case NE
: new_code
= NE
; break;
821 if (new_code
!= UNKNOWN
)
823 *op0
= XVECEXP (*op0
, 0, 0);
828 /* Simplify cascaded EQ, NE with const0_rtx. */
829 if ((*code
== NE
|| *code
== EQ
)
830 && (GET_CODE (*op0
) == EQ
|| GET_CODE (*op0
) == NE
)
831 && GET_MODE (*op0
) == SImode
832 && GET_MODE (XEXP (*op0
, 0)) == CCZ1mode
833 && REG_P (XEXP (*op0
, 0))
834 && XEXP (*op0
, 1) == const0_rtx
835 && *op1
== const0_rtx
)
837 if ((*code
== EQ
&& GET_CODE (*op0
) == NE
)
838 || (*code
== NE
&& GET_CODE (*op0
) == EQ
))
842 *op0
= XEXP (*op0
, 0);
845 /* Prefer register over memory as first operand. */
846 if (MEM_P (*op0
) && REG_P (*op1
))
848 rtx tem
= *op0
; *op0
= *op1
; *op1
= tem
;
849 *code
= swap_condition (*code
);
853 /* Emit a compare instruction suitable to implement the comparison
854 OP0 CODE OP1. Return the correct condition RTL to be placed in
855 the IF_THEN_ELSE of the conditional branch testing the result. */
858 s390_emit_compare (enum rtx_code code
, rtx op0
, rtx op1
)
860 enum machine_mode mode
= s390_select_ccmode (code
, op0
, op1
);
863 /* Do not output a redundant compare instruction if a compare_and_swap
864 pattern already computed the result and the machine modes are compatible. */
865 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
)
867 gcc_assert (s390_cc_modes_compatible (GET_MODE (op0
), mode
)
873 cc
= gen_rtx_REG (mode
, CC_REGNUM
);
874 emit_insn (gen_rtx_SET (VOIDmode
, cc
, gen_rtx_COMPARE (mode
, op0
, op1
)));
877 return gen_rtx_fmt_ee (code
, VOIDmode
, cc
, const0_rtx
);
880 /* Emit a SImode compare and swap instruction setting MEM to NEW_RTX if OLD
882 Return the correct condition RTL to be placed in the IF_THEN_ELSE of the
883 conditional branch testing the result. */
886 s390_emit_compare_and_swap (enum rtx_code code
, rtx old
, rtx mem
, rtx cmp
, rtx new_rtx
)
888 emit_insn (gen_sync_compare_and_swapsi (old
, mem
, cmp
, new_rtx
));
889 return s390_emit_compare (code
, gen_rtx_REG (CCZ1mode
, CC_REGNUM
), const0_rtx
);
892 /* Emit a jump instruction to TARGET. If COND is NULL_RTX, emit an
893 unconditional jump, else a conditional jump under condition COND. */
896 s390_emit_jump (rtx target
, rtx cond
)
900 target
= gen_rtx_LABEL_REF (VOIDmode
, target
);
902 target
= gen_rtx_IF_THEN_ELSE (VOIDmode
, cond
, target
, pc_rtx
);
904 insn
= gen_rtx_SET (VOIDmode
, pc_rtx
, target
);
905 emit_jump_insn (insn
);
908 /* Return branch condition mask to implement a branch
909 specified by CODE. Return -1 for invalid comparisons. */
912 s390_branch_condition_mask (rtx code
)
914 const int CC0
= 1 << 3;
915 const int CC1
= 1 << 2;
916 const int CC2
= 1 << 1;
917 const int CC3
= 1 << 0;
919 gcc_assert (GET_CODE (XEXP (code
, 0)) == REG
);
920 gcc_assert (REGNO (XEXP (code
, 0)) == CC_REGNUM
);
921 gcc_assert (XEXP (code
, 1) == const0_rtx
);
923 switch (GET_MODE (XEXP (code
, 0)))
927 switch (GET_CODE (code
))
930 case NE
: return CC1
| CC2
| CC3
;
936 switch (GET_CODE (code
))
939 case NE
: return CC0
| CC2
| CC3
;
945 switch (GET_CODE (code
))
948 case NE
: return CC0
| CC1
| CC3
;
954 switch (GET_CODE (code
))
957 case NE
: return CC0
| CC1
| CC2
;
963 switch (GET_CODE (code
))
965 case EQ
: return CC0
| CC2
;
966 case NE
: return CC1
| CC3
;
972 switch (GET_CODE (code
))
974 case LTU
: return CC2
| CC3
; /* carry */
975 case GEU
: return CC0
| CC1
; /* no carry */
981 switch (GET_CODE (code
))
983 case GTU
: return CC0
| CC1
; /* borrow */
984 case LEU
: return CC2
| CC3
; /* no borrow */
990 switch (GET_CODE (code
))
992 case EQ
: return CC0
| CC2
;
993 case NE
: return CC1
| CC3
;
994 case LTU
: return CC1
;
995 case GTU
: return CC3
;
996 case LEU
: return CC1
| CC2
;
997 case GEU
: return CC2
| CC3
;
1002 switch (GET_CODE (code
))
1004 case EQ
: return CC0
;
1005 case NE
: return CC1
| CC2
| CC3
;
1006 case LTU
: return CC1
;
1007 case GTU
: return CC2
;
1008 case LEU
: return CC0
| CC1
;
1009 case GEU
: return CC0
| CC2
;
1015 switch (GET_CODE (code
))
1017 case EQ
: return CC0
;
1018 case NE
: return CC2
| CC1
| CC3
;
1019 case LTU
: return CC2
;
1020 case GTU
: return CC1
;
1021 case LEU
: return CC0
| CC2
;
1022 case GEU
: return CC0
| CC1
;
1028 switch (GET_CODE (code
))
1030 case EQ
: return CC0
;
1031 case NE
: return CC1
| CC2
| CC3
;
1032 case LT
: return CC1
| CC3
;
1033 case GT
: return CC2
;
1034 case LE
: return CC0
| CC1
| CC3
;
1035 case GE
: return CC0
| CC2
;
1041 switch (GET_CODE (code
))
1043 case EQ
: return CC0
;
1044 case NE
: return CC1
| CC2
| CC3
;
1045 case LT
: return CC1
;
1046 case GT
: return CC2
| CC3
;
1047 case LE
: return CC0
| CC1
;
1048 case GE
: return CC0
| CC2
| CC3
;
1054 switch (GET_CODE (code
))
1056 case EQ
: return CC0
;
1057 case NE
: return CC1
| CC2
| CC3
;
1058 case LT
: return CC1
;
1059 case GT
: return CC2
;
1060 case LE
: return CC0
| CC1
;
1061 case GE
: return CC0
| CC2
;
1062 case UNORDERED
: return CC3
;
1063 case ORDERED
: return CC0
| CC1
| CC2
;
1064 case UNEQ
: return CC0
| CC3
;
1065 case UNLT
: return CC1
| CC3
;
1066 case UNGT
: return CC2
| CC3
;
1067 case UNLE
: return CC0
| CC1
| CC3
;
1068 case UNGE
: return CC0
| CC2
| CC3
;
1069 case LTGT
: return CC1
| CC2
;
1075 switch (GET_CODE (code
))
1077 case EQ
: return CC0
;
1078 case NE
: return CC2
| CC1
| CC3
;
1079 case LT
: return CC2
;
1080 case GT
: return CC1
;
1081 case LE
: return CC0
| CC2
;
1082 case GE
: return CC0
| CC1
;
1083 case UNORDERED
: return CC3
;
1084 case ORDERED
: return CC0
| CC2
| CC1
;
1085 case UNEQ
: return CC0
| CC3
;
1086 case UNLT
: return CC2
| CC3
;
1087 case UNGT
: return CC1
| CC3
;
1088 case UNLE
: return CC0
| CC2
| CC3
;
1089 case UNGE
: return CC0
| CC1
| CC3
;
1090 case LTGT
: return CC2
| CC1
;
1101 /* Return branch condition mask to implement a compare and branch
1102 specified by CODE. Return -1 for invalid comparisons. */
1105 s390_compare_and_branch_condition_mask (rtx code
)
1107 const int CC0
= 1 << 3;
1108 const int CC1
= 1 << 2;
1109 const int CC2
= 1 << 1;
1111 switch (GET_CODE (code
))
1135 /* If INV is false, return assembler mnemonic string to implement
1136 a branch specified by CODE. If INV is true, return mnemonic
1137 for the corresponding inverted branch. */
1140 s390_branch_condition_mnemonic (rtx code
, int inv
)
1144 static const char *const mnemonic
[16] =
1146 NULL
, "o", "h", "nle",
1147 "l", "nhe", "lh", "ne",
1148 "e", "nlh", "he", "nl",
1149 "le", "nh", "no", NULL
1152 if (GET_CODE (XEXP (code
, 0)) == REG
1153 && REGNO (XEXP (code
, 0)) == CC_REGNUM
1154 && XEXP (code
, 1) == const0_rtx
)
1155 mask
= s390_branch_condition_mask (code
);
1157 mask
= s390_compare_and_branch_condition_mask (code
);
1159 gcc_assert (mask
>= 0);
1164 gcc_assert (mask
>= 1 && mask
<= 14);
1166 return mnemonic
[mask
];
1169 /* Return the part of op which has a value different from def.
1170 The size of the part is determined by mode.
1171 Use this function only if you already know that op really
1172 contains such a part. */
1174 unsigned HOST_WIDE_INT
1175 s390_extract_part (rtx op
, enum machine_mode mode
, int def
)
1177 unsigned HOST_WIDE_INT value
= 0;
1178 int max_parts
= HOST_BITS_PER_WIDE_INT
/ GET_MODE_BITSIZE (mode
);
1179 int part_bits
= GET_MODE_BITSIZE (mode
);
1180 unsigned HOST_WIDE_INT part_mask
1181 = ((unsigned HOST_WIDE_INT
)1 << part_bits
) - 1;
1184 for (i
= 0; i
< max_parts
; i
++)
1187 value
= (unsigned HOST_WIDE_INT
) INTVAL (op
);
1189 value
>>= part_bits
;
1191 if ((value
& part_mask
) != (def
& part_mask
))
1192 return value
& part_mask
;
1198 /* If OP is an integer constant of mode MODE with exactly one
1199 part of mode PART_MODE unequal to DEF, return the number of that
1200 part. Otherwise, return -1. */
1203 s390_single_part (rtx op
,
1204 enum machine_mode mode
,
1205 enum machine_mode part_mode
,
1208 unsigned HOST_WIDE_INT value
= 0;
1209 int n_parts
= GET_MODE_SIZE (mode
) / GET_MODE_SIZE (part_mode
);
1210 unsigned HOST_WIDE_INT part_mask
1211 = ((unsigned HOST_WIDE_INT
)1 << GET_MODE_BITSIZE (part_mode
)) - 1;
1214 if (GET_CODE (op
) != CONST_INT
)
1217 for (i
= 0; i
< n_parts
; i
++)
1220 value
= (unsigned HOST_WIDE_INT
) INTVAL (op
);
1222 value
>>= GET_MODE_BITSIZE (part_mode
);
1224 if ((value
& part_mask
) != (def
& part_mask
))
1232 return part
== -1 ? -1 : n_parts
- 1 - part
;
1235 /* Return true if IN contains a contiguous bitfield in the lower SIZE
1236 bits and no other bits are set in IN. POS and LENGTH can be used
1237 to obtain the start position and the length of the bitfield.
1239 POS gives the position of the first bit of the bitfield counting
1240 from the lowest order bit starting with zero. In order to use this
1241 value for S/390 instructions this has to be converted to "bits big
1245 s390_contiguous_bitmask_p (unsigned HOST_WIDE_INT in
, int size
,
1246 int *pos
, int *length
)
1251 unsigned HOST_WIDE_INT mask
= 1ULL;
1252 bool contiguous
= false;
1254 for (i
= 0; i
< size
; mask
<<= 1, i
++)
1278 /* Calculate a mask for all bits beyond the contiguous bits. */
1279 mask
= (-1LL & ~(((1ULL << (tmp_length
+ tmp_pos
- 1)) << 1) - 1));
1284 if (tmp_length
+ tmp_pos
- 1 > size
)
1288 *length
= tmp_length
;
1296 /* Check whether we can (and want to) split a double-word
1297 move in mode MODE from SRC to DST into two single-word
1298 moves, moving the subword FIRST_SUBWORD first. */
1301 s390_split_ok_p (rtx dst
, rtx src
, enum machine_mode mode
, int first_subword
)
1303 /* Floating point registers cannot be split. */
1304 if (FP_REG_P (src
) || FP_REG_P (dst
))
1307 /* We don't need to split if operands are directly accessible. */
1308 if (s_operand (src
, mode
) || s_operand (dst
, mode
))
1311 /* Non-offsettable memory references cannot be split. */
1312 if ((GET_CODE (src
) == MEM
&& !offsettable_memref_p (src
))
1313 || (GET_CODE (dst
) == MEM
&& !offsettable_memref_p (dst
)))
1316 /* Moving the first subword must not clobber a register
1317 needed to move the second subword. */
1318 if (register_operand (dst
, mode
))
1320 rtx subreg
= operand_subword (dst
, first_subword
, 0, mode
);
1321 if (reg_overlap_mentioned_p (subreg
, src
))
1328 /* Return true if it can be proven that [MEM1, MEM1 + SIZE]
1329 and [MEM2, MEM2 + SIZE] do overlap and false
1333 s390_overlap_p (rtx mem1
, rtx mem2
, HOST_WIDE_INT size
)
1335 rtx addr1
, addr2
, addr_delta
;
1336 HOST_WIDE_INT delta
;
1338 if (GET_CODE (mem1
) != MEM
|| GET_CODE (mem2
) != MEM
)
1344 addr1
= XEXP (mem1
, 0);
1345 addr2
= XEXP (mem2
, 0);
1347 addr_delta
= simplify_binary_operation (MINUS
, Pmode
, addr2
, addr1
);
1349 /* This overlapping check is used by peepholes merging memory block operations.
1350 Overlapping operations would otherwise be recognized by the S/390 hardware
1351 and would fall back to a slower implementation. Allowing overlapping
1352 operations would lead to slow code but not to wrong code. Therefore we are
1353 somewhat optimistic if we cannot prove that the memory blocks are
1355 That's why we return false here although this may accept operations on
1356 overlapping memory areas. */
1357 if (!addr_delta
|| GET_CODE (addr_delta
) != CONST_INT
)
1360 delta
= INTVAL (addr_delta
);
1363 || (delta
> 0 && delta
< size
)
1364 || (delta
< 0 && -delta
< size
))
1370 /* Check whether the address of memory reference MEM2 equals exactly
1371 the address of memory reference MEM1 plus DELTA. Return true if
1372 we can prove this to be the case, false otherwise. */
1375 s390_offset_p (rtx mem1
, rtx mem2
, rtx delta
)
1377 rtx addr1
, addr2
, addr_delta
;
1379 if (GET_CODE (mem1
) != MEM
|| GET_CODE (mem2
) != MEM
)
1382 addr1
= XEXP (mem1
, 0);
1383 addr2
= XEXP (mem2
, 0);
1385 addr_delta
= simplify_binary_operation (MINUS
, Pmode
, addr2
, addr1
);
1386 if (!addr_delta
|| !rtx_equal_p (addr_delta
, delta
))
1392 /* Expand logical operator CODE in mode MODE with operands OPERANDS. */
1395 s390_expand_logical_operator (enum rtx_code code
, enum machine_mode mode
,
1398 enum machine_mode wmode
= mode
;
1399 rtx dst
= operands
[0];
1400 rtx src1
= operands
[1];
1401 rtx src2
= operands
[2];
1404 /* If we cannot handle the operation directly, use a temp register. */
1405 if (!s390_logical_operator_ok_p (operands
))
1406 dst
= gen_reg_rtx (mode
);
1408 /* QImode and HImode patterns make sense only if we have a destination
1409 in memory. Otherwise perform the operation in SImode. */
1410 if ((mode
== QImode
|| mode
== HImode
) && GET_CODE (dst
) != MEM
)
1413 /* Widen operands if required. */
1416 if (GET_CODE (dst
) == SUBREG
1417 && (tem
= simplify_subreg (wmode
, dst
, mode
, 0)) != 0)
1419 else if (REG_P (dst
))
1420 dst
= gen_rtx_SUBREG (wmode
, dst
, 0);
1422 dst
= gen_reg_rtx (wmode
);
1424 if (GET_CODE (src1
) == SUBREG
1425 && (tem
= simplify_subreg (wmode
, src1
, mode
, 0)) != 0)
1427 else if (GET_MODE (src1
) != VOIDmode
)
1428 src1
= gen_rtx_SUBREG (wmode
, force_reg (mode
, src1
), 0);
1430 if (GET_CODE (src2
) == SUBREG
1431 && (tem
= simplify_subreg (wmode
, src2
, mode
, 0)) != 0)
1433 else if (GET_MODE (src2
) != VOIDmode
)
1434 src2
= gen_rtx_SUBREG (wmode
, force_reg (mode
, src2
), 0);
1437 /* Emit the instruction. */
1438 op
= gen_rtx_SET (VOIDmode
, dst
, gen_rtx_fmt_ee (code
, wmode
, src1
, src2
));
1439 clob
= gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (CCmode
, CC_REGNUM
));
1440 emit_insn (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, op
, clob
)));
1442 /* Fix up the destination if needed. */
1443 if (dst
!= operands
[0])
1444 emit_move_insn (operands
[0], gen_lowpart (mode
, dst
));
1447 /* Check whether OPERANDS are OK for a logical operation (AND, IOR, XOR). */
1450 s390_logical_operator_ok_p (rtx
*operands
)
1452 /* If the destination operand is in memory, it needs to coincide
1453 with one of the source operands. After reload, it has to be
1454 the first source operand. */
1455 if (GET_CODE (operands
[0]) == MEM
)
1456 return rtx_equal_p (operands
[0], operands
[1])
1457 || (!reload_completed
&& rtx_equal_p (operands
[0], operands
[2]));
1462 /* Narrow logical operation CODE of memory operand MEMOP with immediate
1463 operand IMMOP to switch from SS to SI type instructions. */
1466 s390_narrow_logical_operator (enum rtx_code code
, rtx
*memop
, rtx
*immop
)
1468 int def
= code
== AND
? -1 : 0;
1472 gcc_assert (GET_CODE (*memop
) == MEM
);
1473 gcc_assert (!MEM_VOLATILE_P (*memop
));
1475 mask
= s390_extract_part (*immop
, QImode
, def
);
1476 part
= s390_single_part (*immop
, GET_MODE (*memop
), QImode
, def
);
1477 gcc_assert (part
>= 0);
1479 *memop
= adjust_address (*memop
, QImode
, part
);
1480 *immop
= gen_int_mode (mask
, QImode
);
1484 /* How to allocate a 'struct machine_function'. */
1486 static struct machine_function
*
1487 s390_init_machine_status (void)
1489 return ggc_alloc_cleared_machine_function ();
1492 /* Change optimizations to be performed, depending on the
1495 LEVEL is the optimization level specified; 2 if `-O2' is
1496 specified, 1 if `-O' is specified, and 0 if neither is specified.
1498 SIZE is nonzero if `-Os' is specified and zero otherwise. */
1501 s390_option_optimization (int level ATTRIBUTE_UNUSED
, int size
)
1503 /* ??? There are apparently still problems with -fcaller-saves. */
1504 flag_caller_saves
= 0;
1506 /* Use MVCLE instructions to decrease code size if requested. */
1508 target_flags
|= MASK_MVCLE
;
1511 /* Implement TARGET_OPTION_INIT_STRUCT. */
1514 s390_option_init_struct (struct gcc_options
*opts
)
1516 /* By default, always emit DWARF-2 unwind info. This allows debugging
1517 without maintaining a stack frame back-chain. */
1518 opts
->x_flag_asynchronous_unwind_tables
= 1;
1521 /* Return true if ARG is the name of a processor. Set *TYPE and *FLAGS
1522 to the associated processor_type and processor_flags if so. */
1525 s390_handle_arch_option (const char *arg
,
1526 enum processor_type
*type
,
1531 const char *const name
; /* processor name or nickname. */
1532 const enum processor_type processor
;
1533 const int flags
; /* From enum processor_flags. */
1535 const processor_alias_table
[] =
1537 {"g5", PROCESSOR_9672_G5
, PF_IEEE_FLOAT
},
1538 {"g6", PROCESSOR_9672_G6
, PF_IEEE_FLOAT
},
1539 {"z900", PROCESSOR_2064_Z900
, PF_IEEE_FLOAT
| PF_ZARCH
},
1540 {"z990", PROCESSOR_2084_Z990
, PF_IEEE_FLOAT
| PF_ZARCH
1541 | PF_LONG_DISPLACEMENT
},
1542 {"z9-109", PROCESSOR_2094_Z9_109
, PF_IEEE_FLOAT
| PF_ZARCH
1543 | PF_LONG_DISPLACEMENT
| PF_EXTIMM
},
1544 {"z9-ec", PROCESSOR_2094_Z9_109
, PF_IEEE_FLOAT
| PF_ZARCH
1545 | PF_LONG_DISPLACEMENT
| PF_EXTIMM
| PF_DFP
},
1546 {"z10", PROCESSOR_2097_Z10
, PF_IEEE_FLOAT
| PF_ZARCH
1547 | PF_LONG_DISPLACEMENT
| PF_EXTIMM
| PF_DFP
| PF_Z10
},
1548 {"z196", PROCESSOR_2817_Z196
, PF_IEEE_FLOAT
| PF_ZARCH
1549 | PF_LONG_DISPLACEMENT
| PF_EXTIMM
| PF_DFP
| PF_Z10
| PF_Z196
},
1553 for (i
= 0; i
< ARRAY_SIZE (processor_alias_table
); i
++)
1554 if (strcmp (arg
, processor_alias_table
[i
].name
) == 0)
1556 *type
= processor_alias_table
[i
].processor
;
1557 *flags
= processor_alias_table
[i
].flags
;
1563 /* Implement TARGET_HANDLE_OPTION. */
1566 s390_handle_option (size_t code
, const char *arg
, int value ATTRIBUTE_UNUSED
)
1571 return s390_handle_arch_option (arg
, &s390_arch
, &s390_arch_flags
);
1573 case OPT_mstack_guard_
:
1574 if (sscanf (arg
, HOST_WIDE_INT_PRINT_DEC
, &s390_stack_guard
) != 1)
1576 if (exact_log2 (s390_stack_guard
) == -1)
1577 error ("stack guard value must be an exact power of 2");
1580 case OPT_mstack_size_
:
1581 if (sscanf (arg
, HOST_WIDE_INT_PRINT_DEC
, &s390_stack_size
) != 1)
1583 if (exact_log2 (s390_stack_size
) == -1)
1584 error ("stack size must be an exact power of 2");
1588 return s390_handle_arch_option (arg
, &s390_tune
, &s390_tune_flags
);
1590 case OPT_mwarn_framesize_
:
1591 return sscanf (arg
, HOST_WIDE_INT_PRINT_DEC
, &s390_warn_framesize
) == 1;
1599 s390_option_override (void)
1601 /* Set up function hooks. */
1602 init_machine_status
= s390_init_machine_status
;
1604 /* Architecture mode defaults according to ABI. */
1605 if (!(target_flags_explicit
& MASK_ZARCH
))
1608 target_flags
|= MASK_ZARCH
;
1610 target_flags
&= ~MASK_ZARCH
;
1613 /* Determine processor architectural level. */
1614 if (!s390_arch_string
)
1616 s390_arch_string
= TARGET_ZARCH
? "z900" : "g5";
1617 s390_handle_arch_option (s390_arch_string
, &s390_arch
, &s390_arch_flags
);
1620 /* Determine processor to tune for. */
1621 if (s390_tune
== PROCESSOR_max
)
1623 s390_tune
= s390_arch
;
1624 s390_tune_flags
= s390_arch_flags
;
1627 /* Sanity checks. */
1628 if (TARGET_ZARCH
&& !TARGET_CPU_ZARCH
)
1629 error ("z/Architecture mode not supported on %s", s390_arch_string
);
1630 if (TARGET_64BIT
&& !TARGET_ZARCH
)
1631 error ("64-bit ABI not supported in ESA/390 mode");
1633 if (TARGET_HARD_DFP
&& !TARGET_DFP
)
1635 if (target_flags_explicit
& MASK_HARD_DFP
)
1637 if (!TARGET_CPU_DFP
)
1638 error ("Hardware decimal floating point instructions"
1639 " not available on %s", s390_arch_string
);
1641 error ("Hardware decimal floating point instructions"
1642 " not available in ESA/390 mode");
1645 target_flags
&= ~MASK_HARD_DFP
;
1648 if ((target_flags_explicit
& MASK_SOFT_FLOAT
) && TARGET_SOFT_FLOAT
)
1650 if ((target_flags_explicit
& MASK_HARD_DFP
) && TARGET_HARD_DFP
)
1651 error ("-mhard-dfp can't be used in conjunction with -msoft-float");
1653 target_flags
&= ~MASK_HARD_DFP
;
1656 /* Set processor cost function. */
1659 case PROCESSOR_2084_Z990
:
1660 s390_cost
= &z990_cost
;
1662 case PROCESSOR_2094_Z9_109
:
1663 s390_cost
= &z9_109_cost
;
1665 case PROCESSOR_2097_Z10
:
1666 s390_cost
= &z10_cost
;
1667 case PROCESSOR_2817_Z196
:
1668 s390_cost
= &z196_cost
;
1671 s390_cost
= &z900_cost
;
1674 if (TARGET_BACKCHAIN
&& TARGET_PACKED_STACK
&& TARGET_HARD_FLOAT
)
1675 error ("-mbackchain -mpacked-stack -mhard-float are not supported "
1678 if (s390_stack_size
)
1680 if (s390_stack_guard
>= s390_stack_size
)
1681 error ("stack size must be greater than the stack guard value");
1682 else if (s390_stack_size
> 1 << 16)
1683 error ("stack size must not be greater than 64k");
1685 else if (s390_stack_guard
)
1686 error ("-mstack-guard implies use of -mstack-size");
1688 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
1689 if (!(target_flags_explicit
& MASK_LONG_DOUBLE_128
))
1690 target_flags
|= MASK_LONG_DOUBLE_128
;
1693 if (s390_tune
== PROCESSOR_2097_Z10
1694 || s390_tune
== PROCESSOR_2817_Z196
)
1696 maybe_set_param_value (PARAM_MAX_UNROLLED_INSNS
, 100,
1697 global_options
.x_param_values
,
1698 global_options_set
.x_param_values
);
1699 maybe_set_param_value (PARAM_MAX_UNROLL_TIMES
, 32,
1700 global_options
.x_param_values
,
1701 global_options_set
.x_param_values
);
1702 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS
, 2000,
1703 global_options
.x_param_values
,
1704 global_options_set
.x_param_values
);
1705 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEEL_TIMES
, 64,
1706 global_options
.x_param_values
,
1707 global_options_set
.x_param_values
);
1710 maybe_set_param_value (PARAM_MAX_PENDING_LIST_LENGTH
, 256,
1711 global_options
.x_param_values
,
1712 global_options_set
.x_param_values
);
1713 /* values for loop prefetching */
1714 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE
, 256,
1715 global_options
.x_param_values
,
1716 global_options_set
.x_param_values
);
1717 maybe_set_param_value (PARAM_L1_CACHE_SIZE
, 128,
1718 global_options
.x_param_values
,
1719 global_options_set
.x_param_values
);
1720 /* s390 has more than 2 levels and the size is much larger. Since
1721 we are always running virtualized assume that we only get a small
1722 part of the caches above l1. */
1723 maybe_set_param_value (PARAM_L2_CACHE_SIZE
, 1500,
1724 global_options
.x_param_values
,
1725 global_options_set
.x_param_values
);
1726 maybe_set_param_value (PARAM_PREFETCH_MIN_INSN_TO_MEM_RATIO
, 2,
1727 global_options
.x_param_values
,
1728 global_options_set
.x_param_values
);
1729 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES
, 6,
1730 global_options
.x_param_values
,
1731 global_options_set
.x_param_values
);
1733 /* This cannot reside in s390_option_optimization since HAVE_prefetch
1734 requires the arch flags to be evaluated already. Since prefetching
1735 is beneficial on s390, we enable it if available. */
1736 if (flag_prefetch_loop_arrays
< 0 && HAVE_prefetch
&& optimize
>= 3)
1737 flag_prefetch_loop_arrays
= 1;
1740 /* Map for smallest class containing reg regno. */
1742 const enum reg_class regclass_map
[FIRST_PSEUDO_REGISTER
] =
1743 { GENERAL_REGS
, ADDR_REGS
, ADDR_REGS
, ADDR_REGS
,
1744 ADDR_REGS
, ADDR_REGS
, ADDR_REGS
, ADDR_REGS
,
1745 ADDR_REGS
, ADDR_REGS
, ADDR_REGS
, ADDR_REGS
,
1746 ADDR_REGS
, ADDR_REGS
, ADDR_REGS
, ADDR_REGS
,
1747 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
1748 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
1749 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
1750 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
1751 ADDR_REGS
, CC_REGS
, ADDR_REGS
, ADDR_REGS
,
1752 ACCESS_REGS
, ACCESS_REGS
1755 /* Return attribute type of insn. */
1757 static enum attr_type
1758 s390_safe_attr_type (rtx insn
)
1760 if (recog_memoized (insn
) >= 0)
1761 return get_attr_type (insn
);
1766 /* Return true if DISP is a valid short displacement. */
1769 s390_short_displacement (rtx disp
)
1771 /* No displacement is OK. */
1775 /* Without the long displacement facility we don't need to
1776 distingiush between long and short displacement. */
1777 if (!TARGET_LONG_DISPLACEMENT
)
1780 /* Integer displacement in range. */
1781 if (GET_CODE (disp
) == CONST_INT
)
1782 return INTVAL (disp
) >= 0 && INTVAL (disp
) < 4096;
1784 /* GOT offset is not OK, the GOT can be large. */
1785 if (GET_CODE (disp
) == CONST
1786 && GET_CODE (XEXP (disp
, 0)) == UNSPEC
1787 && (XINT (XEXP (disp
, 0), 1) == UNSPEC_GOT
1788 || XINT (XEXP (disp
, 0), 1) == UNSPEC_GOTNTPOFF
))
1791 /* All other symbolic constants are literal pool references,
1792 which are OK as the literal pool must be small. */
1793 if (GET_CODE (disp
) == CONST
)
1799 /* Decompose a RTL expression ADDR for a memory address into
1800 its components, returned in OUT.
1802 Returns false if ADDR is not a valid memory address, true
1803 otherwise. If OUT is NULL, don't return the components,
1804 but check for validity only.
1806 Note: Only addresses in canonical form are recognized.
1807 LEGITIMIZE_ADDRESS should convert non-canonical forms to the
1808 canonical form so that they will be recognized. */
1811 s390_decompose_address (rtx addr
, struct s390_address
*out
)
1813 HOST_WIDE_INT offset
= 0;
1814 rtx base
= NULL_RTX
;
1815 rtx indx
= NULL_RTX
;
1816 rtx disp
= NULL_RTX
;
1818 bool pointer
= false;
1819 bool base_ptr
= false;
1820 bool indx_ptr
= false;
1821 bool literal_pool
= false;
1823 /* We may need to substitute the literal pool base register into the address
1824 below. However, at this point we do not know which register is going to
1825 be used as base, so we substitute the arg pointer register. This is going
1826 to be treated as holding a pointer below -- it shouldn't be used for any
1828 rtx fake_pool_base
= gen_rtx_REG (Pmode
, ARG_POINTER_REGNUM
);
1830 /* Decompose address into base + index + displacement. */
1832 if (GET_CODE (addr
) == REG
|| GET_CODE (addr
) == UNSPEC
)
1835 else if (GET_CODE (addr
) == PLUS
)
1837 rtx op0
= XEXP (addr
, 0);
1838 rtx op1
= XEXP (addr
, 1);
1839 enum rtx_code code0
= GET_CODE (op0
);
1840 enum rtx_code code1
= GET_CODE (op1
);
1842 if (code0
== REG
|| code0
== UNSPEC
)
1844 if (code1
== REG
|| code1
== UNSPEC
)
1846 indx
= op0
; /* index + base */
1852 base
= op0
; /* base + displacement */
1857 else if (code0
== PLUS
)
1859 indx
= XEXP (op0
, 0); /* index + base + disp */
1860 base
= XEXP (op0
, 1);
1871 disp
= addr
; /* displacement */
1873 /* Extract integer part of displacement. */
1877 if (GET_CODE (disp
) == CONST_INT
)
1879 offset
= INTVAL (disp
);
1882 else if (GET_CODE (disp
) == CONST
1883 && GET_CODE (XEXP (disp
, 0)) == PLUS
1884 && GET_CODE (XEXP (XEXP (disp
, 0), 1)) == CONST_INT
)
1886 offset
= INTVAL (XEXP (XEXP (disp
, 0), 1));
1887 disp
= XEXP (XEXP (disp
, 0), 0);
1891 /* Strip off CONST here to avoid special case tests later. */
1892 if (disp
&& GET_CODE (disp
) == CONST
)
1893 disp
= XEXP (disp
, 0);
1895 /* We can convert literal pool addresses to
1896 displacements by basing them off the base register. */
1897 if (disp
&& GET_CODE (disp
) == SYMBOL_REF
&& CONSTANT_POOL_ADDRESS_P (disp
))
1899 /* Either base or index must be free to hold the base register. */
1901 base
= fake_pool_base
, literal_pool
= true;
1903 indx
= fake_pool_base
, literal_pool
= true;
1907 /* Mark up the displacement. */
1908 disp
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, disp
),
1909 UNSPEC_LTREL_OFFSET
);
1912 /* Validate base register. */
1915 if (GET_CODE (base
) == UNSPEC
)
1916 switch (XINT (base
, 1))
1920 disp
= gen_rtx_UNSPEC (Pmode
,
1921 gen_rtvec (1, XVECEXP (base
, 0, 0)),
1922 UNSPEC_LTREL_OFFSET
);
1926 base
= XVECEXP (base
, 0, 1);
1929 case UNSPEC_LTREL_BASE
:
1930 if (XVECLEN (base
, 0) == 1)
1931 base
= fake_pool_base
, literal_pool
= true;
1933 base
= XVECEXP (base
, 0, 1);
1941 || (GET_MODE (base
) != SImode
1942 && GET_MODE (base
) != Pmode
))
1945 if (REGNO (base
) == STACK_POINTER_REGNUM
1946 || REGNO (base
) == FRAME_POINTER_REGNUM
1947 || ((reload_completed
|| reload_in_progress
)
1948 && frame_pointer_needed
1949 && REGNO (base
) == HARD_FRAME_POINTER_REGNUM
)
1950 || REGNO (base
) == ARG_POINTER_REGNUM
1952 && REGNO (base
) == PIC_OFFSET_TABLE_REGNUM
))
1953 pointer
= base_ptr
= true;
1955 if ((reload_completed
|| reload_in_progress
)
1956 && base
== cfun
->machine
->base_reg
)
1957 pointer
= base_ptr
= literal_pool
= true;
1960 /* Validate index register. */
1963 if (GET_CODE (indx
) == UNSPEC
)
1964 switch (XINT (indx
, 1))
1968 disp
= gen_rtx_UNSPEC (Pmode
,
1969 gen_rtvec (1, XVECEXP (indx
, 0, 0)),
1970 UNSPEC_LTREL_OFFSET
);
1974 indx
= XVECEXP (indx
, 0, 1);
1977 case UNSPEC_LTREL_BASE
:
1978 if (XVECLEN (indx
, 0) == 1)
1979 indx
= fake_pool_base
, literal_pool
= true;
1981 indx
= XVECEXP (indx
, 0, 1);
1989 || (GET_MODE (indx
) != SImode
1990 && GET_MODE (indx
) != Pmode
))
1993 if (REGNO (indx
) == STACK_POINTER_REGNUM
1994 || REGNO (indx
) == FRAME_POINTER_REGNUM
1995 || ((reload_completed
|| reload_in_progress
)
1996 && frame_pointer_needed
1997 && REGNO (indx
) == HARD_FRAME_POINTER_REGNUM
)
1998 || REGNO (indx
) == ARG_POINTER_REGNUM
2000 && REGNO (indx
) == PIC_OFFSET_TABLE_REGNUM
))
2001 pointer
= indx_ptr
= true;
2003 if ((reload_completed
|| reload_in_progress
)
2004 && indx
== cfun
->machine
->base_reg
)
2005 pointer
= indx_ptr
= literal_pool
= true;
2008 /* Prefer to use pointer as base, not index. */
2009 if (base
&& indx
&& !base_ptr
2010 && (indx_ptr
|| (!REG_POINTER (base
) && REG_POINTER (indx
))))
2017 /* Validate displacement. */
2020 /* If virtual registers are involved, the displacement will change later
2021 anyway as the virtual registers get eliminated. This could make a
2022 valid displacement invalid, but it is more likely to make an invalid
2023 displacement valid, because we sometimes access the register save area
2024 via negative offsets to one of those registers.
2025 Thus we don't check the displacement for validity here. If after
2026 elimination the displacement turns out to be invalid after all,
2027 this is fixed up by reload in any case. */
2028 if (base
!= arg_pointer_rtx
2029 && indx
!= arg_pointer_rtx
2030 && base
!= return_address_pointer_rtx
2031 && indx
!= return_address_pointer_rtx
2032 && base
!= frame_pointer_rtx
2033 && indx
!= frame_pointer_rtx
2034 && base
!= virtual_stack_vars_rtx
2035 && indx
!= virtual_stack_vars_rtx
)
2036 if (!DISP_IN_RANGE (offset
))
2041 /* All the special cases are pointers. */
2044 /* In the small-PIC case, the linker converts @GOT
2045 and @GOTNTPOFF offsets to possible displacements. */
2046 if (GET_CODE (disp
) == UNSPEC
2047 && (XINT (disp
, 1) == UNSPEC_GOT
2048 || XINT (disp
, 1) == UNSPEC_GOTNTPOFF
)
2054 /* Accept pool label offsets. */
2055 else if (GET_CODE (disp
) == UNSPEC
2056 && XINT (disp
, 1) == UNSPEC_POOL_OFFSET
)
2059 /* Accept literal pool references. */
2060 else if (GET_CODE (disp
) == UNSPEC
2061 && XINT (disp
, 1) == UNSPEC_LTREL_OFFSET
)
2063 orig_disp
= gen_rtx_CONST (Pmode
, disp
);
2066 /* If we have an offset, make sure it does not
2067 exceed the size of the constant pool entry. */
2068 rtx sym
= XVECEXP (disp
, 0, 0);
2069 if (offset
>= GET_MODE_SIZE (get_pool_mode (sym
)))
2072 orig_disp
= plus_constant (orig_disp
, offset
);
2087 out
->disp
= orig_disp
;
2088 out
->pointer
= pointer
;
2089 out
->literal_pool
= literal_pool
;
2095 /* Decompose a RTL expression OP for a shift count into its components,
2096 and return the base register in BASE and the offset in OFFSET.
2098 Return true if OP is a valid shift count, false if not. */
2101 s390_decompose_shift_count (rtx op
, rtx
*base
, HOST_WIDE_INT
*offset
)
2103 HOST_WIDE_INT off
= 0;
2105 /* We can have an integer constant, an address register,
2106 or a sum of the two. */
2107 if (GET_CODE (op
) == CONST_INT
)
2112 if (op
&& GET_CODE (op
) == PLUS
&& GET_CODE (XEXP (op
, 1)) == CONST_INT
)
2114 off
= INTVAL (XEXP (op
, 1));
2117 while (op
&& GET_CODE (op
) == SUBREG
)
2118 op
= SUBREG_REG (op
);
2120 if (op
&& GET_CODE (op
) != REG
)
2132 /* Return true if CODE is a valid address without index. */
2135 s390_legitimate_address_without_index_p (rtx op
)
2137 struct s390_address addr
;
2139 if (!s390_decompose_address (XEXP (op
, 0), &addr
))
2148 /* Return true if ADDR is of kind symbol_ref or symbol_ref + const_int
2149 and return these parts in SYMREF and ADDEND. You can pass NULL in
2150 SYMREF and/or ADDEND if you are not interested in these values.
2151 Literal pool references are *not* considered symbol references. */
2154 s390_symref_operand_p (rtx addr
, rtx
*symref
, HOST_WIDE_INT
*addend
)
2156 HOST_WIDE_INT tmpaddend
= 0;
2158 if (GET_CODE (addr
) == CONST
)
2159 addr
= XEXP (addr
, 0);
2161 if (GET_CODE (addr
) == PLUS
)
2163 if (GET_CODE (XEXP (addr
, 0)) == SYMBOL_REF
2164 && !CONSTANT_POOL_ADDRESS_P (XEXP (addr
, 0))
2165 && CONST_INT_P (XEXP (addr
, 1)))
2167 tmpaddend
= INTVAL (XEXP (addr
, 1));
2168 addr
= XEXP (addr
, 0);
2174 if (GET_CODE (addr
) != SYMBOL_REF
|| CONSTANT_POOL_ADDRESS_P (addr
))
2180 *addend
= tmpaddend
;
2186 /* Return true if the address in OP is valid for constraint letter C
2187 if wrapped in a MEM rtx. Set LIT_POOL_OK to true if it literal
2188 pool MEMs should be accepted. Only the Q, R, S, T constraint
2189 letters are allowed for C. */
2192 s390_check_qrst_address (char c
, rtx op
, bool lit_pool_ok
)
2194 struct s390_address addr
;
2195 bool decomposed
= false;
2197 /* This check makes sure that no symbolic address (except literal
2198 pool references) are accepted by the R or T constraints. */
2199 if (s390_symref_operand_p (op
, NULL
, NULL
))
2202 /* Ensure literal pool references are only accepted if LIT_POOL_OK. */
2205 if (!s390_decompose_address (op
, &addr
))
2207 if (addr
.literal_pool
)
2214 case 'Q': /* no index short displacement */
2215 if (!decomposed
&& !s390_decompose_address (op
, &addr
))
2219 if (!s390_short_displacement (addr
.disp
))
2223 case 'R': /* with index short displacement */
2224 if (TARGET_LONG_DISPLACEMENT
)
2226 if (!decomposed
&& !s390_decompose_address (op
, &addr
))
2228 if (!s390_short_displacement (addr
.disp
))
2231 /* Any invalid address here will be fixed up by reload,
2232 so accept it for the most generic constraint. */
2235 case 'S': /* no index long displacement */
2236 if (!TARGET_LONG_DISPLACEMENT
)
2238 if (!decomposed
&& !s390_decompose_address (op
, &addr
))
2242 if (s390_short_displacement (addr
.disp
))
2246 case 'T': /* with index long displacement */
2247 if (!TARGET_LONG_DISPLACEMENT
)
2249 /* Any invalid address here will be fixed up by reload,
2250 so accept it for the most generic constraint. */
2251 if ((decomposed
|| s390_decompose_address (op
, &addr
))
2252 && s390_short_displacement (addr
.disp
))
2262 /* Evaluates constraint strings described by the regular expression
2263 ([A|B|Z](Q|R|S|T))|U|W|Y and returns 1 if OP is a valid operand for
2264 the constraint given in STR, or 0 else. */
2267 s390_mem_constraint (const char *str
, rtx op
)
2274 /* Check for offsettable variants of memory constraints. */
2275 if (!MEM_P (op
) || MEM_VOLATILE_P (op
))
2277 if ((reload_completed
|| reload_in_progress
)
2278 ? !offsettable_memref_p (op
) : !offsettable_nonstrict_memref_p (op
))
2280 return s390_check_qrst_address (str
[1], XEXP (op
, 0), true);
2282 /* Check for non-literal-pool variants of memory constraints. */
2285 return s390_check_qrst_address (str
[1], XEXP (op
, 0), false);
2290 if (GET_CODE (op
) != MEM
)
2292 return s390_check_qrst_address (c
, XEXP (op
, 0), true);
2294 return (s390_check_qrst_address ('Q', op
, true)
2295 || s390_check_qrst_address ('R', op
, true));
2297 return (s390_check_qrst_address ('S', op
, true)
2298 || s390_check_qrst_address ('T', op
, true));
2300 /* Simply check for the basic form of a shift count. Reload will
2301 take care of making sure we have a proper base register. */
2302 if (!s390_decompose_shift_count (op
, NULL
, NULL
))
2306 return s390_check_qrst_address (str
[1], op
, true);
2314 /* Evaluates constraint strings starting with letter O. Input
2315 parameter C is the second letter following the "O" in the constraint
2316 string. Returns 1 if VALUE meets the respective constraint and 0
2320 s390_O_constraint_str (const char c
, HOST_WIDE_INT value
)
2328 return trunc_int_for_mode (value
, SImode
) == value
;
2332 || s390_single_part (GEN_INT (value
), DImode
, SImode
, 0) == 1;
2335 return s390_single_part (GEN_INT (value
- 1), DImode
, SImode
, -1) == 1;
2343 /* Evaluates constraint strings starting with letter N. Parameter STR
2344 contains the letters following letter "N" in the constraint string.
2345 Returns true if VALUE matches the constraint. */
2348 s390_N_constraint_str (const char *str
, HOST_WIDE_INT value
)
2350 enum machine_mode mode
, part_mode
;
2352 int part
, part_goal
;
2358 part_goal
= str
[0] - '0';
2402 if (GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (part_mode
))
2405 part
= s390_single_part (GEN_INT (value
), mode
, part_mode
, def
);
2408 if (part_goal
!= -1 && part_goal
!= part
)
2415 /* Returns true if the input parameter VALUE is a float zero. */
2418 s390_float_const_zero_p (rtx value
)
2420 return (GET_MODE_CLASS (GET_MODE (value
)) == MODE_FLOAT
2421 && value
== CONST0_RTX (GET_MODE (value
)));
2425 /* Compute a (partial) cost for rtx X. Return true if the complete
2426 cost has been computed, and false if subexpressions should be
2427 scanned. In either case, *TOTAL contains the cost result.
2428 CODE contains GET_CODE (x), OUTER_CODE contains the code
2429 of the superexpression of x. */
2432 s390_rtx_costs (rtx x
, int code
, int outer_code
, int *total
,
2433 bool speed ATTRIBUTE_UNUSED
)
2456 *total
= COSTS_N_INSNS (1);
2461 /* Check for multiply and add. */
2462 if ((GET_MODE (x
) == DFmode
|| GET_MODE (x
) == SFmode
)
2463 && GET_CODE (XEXP (x
, 0)) == MULT
2464 && TARGET_HARD_FLOAT
&& TARGET_FUSED_MADD
)
2466 /* This is the multiply and add case. */
2467 if (GET_MODE (x
) == DFmode
)
2468 *total
= s390_cost
->madbr
;
2470 *total
= s390_cost
->maebr
;
2471 *total
+= (rtx_cost (XEXP (XEXP (x
, 0), 0), MULT
, speed
)
2472 + rtx_cost (XEXP (XEXP (x
, 0), 1), MULT
, speed
)
2473 + rtx_cost (XEXP (x
, 1), (enum rtx_code
) code
, speed
));
2474 return true; /* Do not do an additional recursive descent. */
2476 *total
= COSTS_N_INSNS (1);
2480 switch (GET_MODE (x
))
2484 rtx left
= XEXP (x
, 0);
2485 rtx right
= XEXP (x
, 1);
2486 if (GET_CODE (right
) == CONST_INT
2487 && CONST_OK_FOR_K (INTVAL (right
)))
2488 *total
= s390_cost
->mhi
;
2489 else if (GET_CODE (left
) == SIGN_EXTEND
)
2490 *total
= s390_cost
->mh
;
2492 *total
= s390_cost
->ms
; /* msr, ms, msy */
2497 rtx left
= XEXP (x
, 0);
2498 rtx right
= XEXP (x
, 1);
2501 if (GET_CODE (right
) == CONST_INT
2502 && CONST_OK_FOR_K (INTVAL (right
)))
2503 *total
= s390_cost
->mghi
;
2504 else if (GET_CODE (left
) == SIGN_EXTEND
)
2505 *total
= s390_cost
->msgf
;
2507 *total
= s390_cost
->msg
; /* msgr, msg */
2509 else /* TARGET_31BIT */
2511 if (GET_CODE (left
) == SIGN_EXTEND
2512 && GET_CODE (right
) == SIGN_EXTEND
)
2513 /* mulsidi case: mr, m */
2514 *total
= s390_cost
->m
;
2515 else if (GET_CODE (left
) == ZERO_EXTEND
2516 && GET_CODE (right
) == ZERO_EXTEND
2517 && TARGET_CPU_ZARCH
)
2518 /* umulsidi case: ml, mlr */
2519 *total
= s390_cost
->ml
;
2521 /* Complex calculation is required. */
2522 *total
= COSTS_N_INSNS (40);
2528 *total
= s390_cost
->mult_df
;
2531 *total
= s390_cost
->mxbr
;
2540 if (GET_MODE (x
) == TImode
) /* 128 bit division */
2541 *total
= s390_cost
->dlgr
;
2542 else if (GET_MODE (x
) == DImode
)
2544 rtx right
= XEXP (x
, 1);
2545 if (GET_CODE (right
) == ZERO_EXTEND
) /* 64 by 32 bit division */
2546 *total
= s390_cost
->dlr
;
2547 else /* 64 by 64 bit division */
2548 *total
= s390_cost
->dlgr
;
2550 else if (GET_MODE (x
) == SImode
) /* 32 bit division */
2551 *total
= s390_cost
->dlr
;
2556 if (GET_MODE (x
) == DImode
)
2558 rtx right
= XEXP (x
, 1);
2559 if (GET_CODE (right
) == ZERO_EXTEND
) /* 64 by 32 bit division */
2561 *total
= s390_cost
->dsgfr
;
2563 *total
= s390_cost
->dr
;
2564 else /* 64 by 64 bit division */
2565 *total
= s390_cost
->dsgr
;
2567 else if (GET_MODE (x
) == SImode
) /* 32 bit division */
2568 *total
= s390_cost
->dlr
;
2569 else if (GET_MODE (x
) == SFmode
)
2571 *total
= s390_cost
->debr
;
2573 else if (GET_MODE (x
) == DFmode
)
2575 *total
= s390_cost
->ddbr
;
2577 else if (GET_MODE (x
) == TFmode
)
2579 *total
= s390_cost
->dxbr
;
2584 if (GET_MODE (x
) == SFmode
)
2585 *total
= s390_cost
->sqebr
;
2586 else if (GET_MODE (x
) == DFmode
)
2587 *total
= s390_cost
->sqdbr
;
2589 *total
= s390_cost
->sqxbr
;
2594 if (outer_code
== MULT
|| outer_code
== DIV
|| outer_code
== MOD
2595 || outer_code
== PLUS
|| outer_code
== MINUS
2596 || outer_code
== COMPARE
)
2601 *total
= COSTS_N_INSNS (1);
2602 if (GET_CODE (XEXP (x
, 0)) == AND
2603 && GET_CODE (XEXP (x
, 1)) == CONST_INT
2604 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
)
2606 rtx op0
= XEXP (XEXP (x
, 0), 0);
2607 rtx op1
= XEXP (XEXP (x
, 0), 1);
2608 rtx op2
= XEXP (x
, 1);
2610 if (memory_operand (op0
, GET_MODE (op0
))
2611 && s390_tm_ccmode (op1
, op2
, 0) != VOIDmode
)
2613 if (register_operand (op0
, GET_MODE (op0
))
2614 && s390_tm_ccmode (op1
, op2
, 1) != VOIDmode
)
2624 /* Return the cost of an address rtx ADDR. */
2627 s390_address_cost (rtx addr
, bool speed ATTRIBUTE_UNUSED
)
2629 struct s390_address ad
;
2630 if (!s390_decompose_address (addr
, &ad
))
2633 return ad
.indx
? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (1);
2636 /* If OP is a SYMBOL_REF of a thread-local symbol, return its TLS mode,
2637 otherwise return 0. */
2640 tls_symbolic_operand (rtx op
)
2642 if (GET_CODE (op
) != SYMBOL_REF
)
2644 return SYMBOL_REF_TLS_MODEL (op
);
2647 /* Split DImode access register reference REG (on 64-bit) into its constituent
2648 low and high parts, and store them into LO and HI. Note that gen_lowpart/
2649 gen_highpart cannot be used as they assume all registers are word-sized,
2650 while our access registers have only half that size. */
2653 s390_split_access_reg (rtx reg
, rtx
*lo
, rtx
*hi
)
2655 gcc_assert (TARGET_64BIT
);
2656 gcc_assert (ACCESS_REG_P (reg
));
2657 gcc_assert (GET_MODE (reg
) == DImode
);
2658 gcc_assert (!(REGNO (reg
) & 1));
2660 *lo
= gen_rtx_REG (SImode
, REGNO (reg
) + 1);
2661 *hi
= gen_rtx_REG (SImode
, REGNO (reg
));
2664 /* Return true if OP contains a symbol reference */
2667 symbolic_reference_mentioned_p (rtx op
)
2672 if (GET_CODE (op
) == SYMBOL_REF
|| GET_CODE (op
) == LABEL_REF
)
2675 fmt
= GET_RTX_FORMAT (GET_CODE (op
));
2676 for (i
= GET_RTX_LENGTH (GET_CODE (op
)) - 1; i
>= 0; i
--)
2682 for (j
= XVECLEN (op
, i
) - 1; j
>= 0; j
--)
2683 if (symbolic_reference_mentioned_p (XVECEXP (op
, i
, j
)))
2687 else if (fmt
[i
] == 'e' && symbolic_reference_mentioned_p (XEXP (op
, i
)))
2694 /* Return true if OP contains a reference to a thread-local symbol. */
2697 tls_symbolic_reference_mentioned_p (rtx op
)
2702 if (GET_CODE (op
) == SYMBOL_REF
)
2703 return tls_symbolic_operand (op
);
2705 fmt
= GET_RTX_FORMAT (GET_CODE (op
));
2706 for (i
= GET_RTX_LENGTH (GET_CODE (op
)) - 1; i
>= 0; i
--)
2712 for (j
= XVECLEN (op
, i
) - 1; j
>= 0; j
--)
2713 if (tls_symbolic_reference_mentioned_p (XVECEXP (op
, i
, j
)))
2717 else if (fmt
[i
] == 'e' && tls_symbolic_reference_mentioned_p (XEXP (op
, i
)))
2725 /* Return true if OP is a legitimate general operand when
2726 generating PIC code. It is given that flag_pic is on
2727 and that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2730 legitimate_pic_operand_p (rtx op
)
2732 /* Accept all non-symbolic constants. */
2733 if (!SYMBOLIC_CONST (op
))
2736 /* Reject everything else; must be handled
2737 via emit_symbolic_move. */
2741 /* Returns true if the constant value OP is a legitimate general operand.
2742 It is given that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2745 legitimate_constant_p (rtx op
)
2747 /* Accept all non-symbolic constants. */
2748 if (!SYMBOLIC_CONST (op
))
2751 /* Accept immediate LARL operands. */
2752 if (TARGET_CPU_ZARCH
&& larl_operand (op
, VOIDmode
))
2755 /* Thread-local symbols are never legal constants. This is
2756 so that emit_call knows that computing such addresses
2757 might require a function call. */
2758 if (TLS_SYMBOLIC_CONST (op
))
2761 /* In the PIC case, symbolic constants must *not* be
2762 forced into the literal pool. We accept them here,
2763 so that they will be handled by emit_symbolic_move. */
2767 /* All remaining non-PIC symbolic constants are
2768 forced into the literal pool. */
2772 /* Determine if it's legal to put X into the constant pool. This
2773 is not possible if X contains the address of a symbol that is
2774 not constant (TLS) or not known at final link time (PIC). */
2777 s390_cannot_force_const_mem (rtx x
)
2779 switch (GET_CODE (x
))
2783 /* Accept all non-symbolic constants. */
2787 /* Labels are OK iff we are non-PIC. */
2788 return flag_pic
!= 0;
2791 /* 'Naked' TLS symbol references are never OK,
2792 non-TLS symbols are OK iff we are non-PIC. */
2793 if (tls_symbolic_operand (x
))
2796 return flag_pic
!= 0;
2799 return s390_cannot_force_const_mem (XEXP (x
, 0));
2802 return s390_cannot_force_const_mem (XEXP (x
, 0))
2803 || s390_cannot_force_const_mem (XEXP (x
, 1));
2806 switch (XINT (x
, 1))
2808 /* Only lt-relative or GOT-relative UNSPECs are OK. */
2809 case UNSPEC_LTREL_OFFSET
:
2817 case UNSPEC_GOTNTPOFF
:
2818 case UNSPEC_INDNTPOFF
:
2821 /* If the literal pool shares the code section, be put
2822 execute template placeholders into the pool as well. */
2824 return TARGET_CPU_ZARCH
;
2836 /* Returns true if the constant value OP is a legitimate general
2837 operand during and after reload. The difference to
2838 legitimate_constant_p is that this function will not accept
2839 a constant that would need to be forced to the literal pool
2840 before it can be used as operand.
2841 This function accepts all constants which can be loaded directly
2845 legitimate_reload_constant_p (rtx op
)
2847 /* Accept la(y) operands. */
2848 if (GET_CODE (op
) == CONST_INT
2849 && DISP_IN_RANGE (INTVAL (op
)))
2852 /* Accept l(g)hi/l(g)fi operands. */
2853 if (GET_CODE (op
) == CONST_INT
2854 && (CONST_OK_FOR_K (INTVAL (op
)) || CONST_OK_FOR_Os (INTVAL (op
))))
2857 /* Accept lliXX operands. */
2859 && GET_CODE (op
) == CONST_INT
2860 && trunc_int_for_mode (INTVAL (op
), word_mode
) == INTVAL (op
)
2861 && s390_single_part (op
, word_mode
, HImode
, 0) >= 0)
2865 && GET_CODE (op
) == CONST_INT
2866 && trunc_int_for_mode (INTVAL (op
), word_mode
) == INTVAL (op
)
2867 && s390_single_part (op
, word_mode
, SImode
, 0) >= 0)
2870 /* Accept larl operands. */
2871 if (TARGET_CPU_ZARCH
2872 && larl_operand (op
, VOIDmode
))
2875 /* Accept floating-point zero operands that fit into a single GPR. */
2876 if (GET_CODE (op
) == CONST_DOUBLE
2877 && s390_float_const_zero_p (op
)
2878 && GET_MODE_SIZE (GET_MODE (op
)) <= UNITS_PER_WORD
)
2881 /* Accept double-word operands that can be split. */
2882 if (GET_CODE (op
) == CONST_INT
2883 && trunc_int_for_mode (INTVAL (op
), word_mode
) != INTVAL (op
))
2885 enum machine_mode dword_mode
= word_mode
== SImode
? DImode
: TImode
;
2886 rtx hi
= operand_subword (op
, 0, 0, dword_mode
);
2887 rtx lo
= operand_subword (op
, 1, 0, dword_mode
);
2888 return legitimate_reload_constant_p (hi
)
2889 && legitimate_reload_constant_p (lo
);
2892 /* Everything else cannot be handled without reload. */
2896 /* Returns true if the constant value OP is a legitimate fp operand
2897 during and after reload.
2898 This function accepts all constants which can be loaded directly
2902 legitimate_reload_fp_constant_p (rtx op
)
2904 /* Accept floating-point zero operands if the load zero instruction
2907 && GET_CODE (op
) == CONST_DOUBLE
2908 && s390_float_const_zero_p (op
))
2914 /* Given an rtx OP being reloaded into a reg required to be in class RCLASS,
2915 return the class of reg to actually use. */
2918 s390_preferred_reload_class (rtx op
, enum reg_class rclass
)
2920 switch (GET_CODE (op
))
2922 /* Constants we cannot reload into general registers
2923 must be forced into the literal pool. */
2926 if (reg_class_subset_p (GENERAL_REGS
, rclass
)
2927 && legitimate_reload_constant_p (op
))
2928 return GENERAL_REGS
;
2929 else if (reg_class_subset_p (ADDR_REGS
, rclass
)
2930 && legitimate_reload_constant_p (op
))
2932 else if (reg_class_subset_p (FP_REGS
, rclass
)
2933 && legitimate_reload_fp_constant_p (op
))
2937 /* If a symbolic constant or a PLUS is reloaded,
2938 it is most likely being used as an address, so
2939 prefer ADDR_REGS. If 'class' is not a superset
2940 of ADDR_REGS, e.g. FP_REGS, reject this reload. */
2945 if (reg_class_subset_p (ADDR_REGS
, rclass
))
2957 /* Return true if ADDR is SYMBOL_REF + addend with addend being a
2958 multiple of ALIGNMENT and the SYMBOL_REF being naturally
2962 s390_check_symref_alignment (rtx addr
, HOST_WIDE_INT alignment
)
2964 HOST_WIDE_INT addend
;
2967 if (!s390_symref_operand_p (addr
, &symref
, &addend
))
2970 return (!SYMBOL_REF_NOT_NATURALLY_ALIGNED_P (symref
)
2971 && !(addend
& (alignment
- 1)));
2974 /* ADDR is moved into REG using larl. If ADDR isn't a valid larl
2975 operand SCRATCH is used to reload the even part of the address and
2979 s390_reload_larl_operand (rtx reg
, rtx addr
, rtx scratch
)
2981 HOST_WIDE_INT addend
;
2984 if (!s390_symref_operand_p (addr
, &symref
, &addend
))
2988 /* Easy case. The addend is even so larl will do fine. */
2989 emit_move_insn (reg
, addr
);
2992 /* We can leave the scratch register untouched if the target
2993 register is a valid base register. */
2994 if (REGNO (reg
) < FIRST_PSEUDO_REGISTER
2995 && REGNO_REG_CLASS (REGNO (reg
)) == ADDR_REGS
)
2998 gcc_assert (REGNO (scratch
) < FIRST_PSEUDO_REGISTER
);
2999 gcc_assert (REGNO_REG_CLASS (REGNO (scratch
)) == ADDR_REGS
);
3002 emit_move_insn (scratch
,
3003 gen_rtx_CONST (Pmode
,
3004 gen_rtx_PLUS (Pmode
, symref
,
3005 GEN_INT (addend
- 1))));
3007 emit_move_insn (scratch
, symref
);
3009 /* Increment the address using la in order to avoid clobbering cc. */
3010 emit_move_insn (reg
, gen_rtx_PLUS (Pmode
, scratch
, const1_rtx
));
3014 /* Generate what is necessary to move between REG and MEM using
3015 SCRATCH. The direction is given by TOMEM. */
3018 s390_reload_symref_address (rtx reg
, rtx mem
, rtx scratch
, bool tomem
)
3020 /* Reload might have pulled a constant out of the literal pool.
3021 Force it back in. */
3022 if (CONST_INT_P (mem
) || GET_CODE (mem
) == CONST_DOUBLE
3023 || GET_CODE (mem
) == CONST
)
3024 mem
= force_const_mem (GET_MODE (reg
), mem
);
3026 gcc_assert (MEM_P (mem
));
3028 /* For a load from memory we can leave the scratch register
3029 untouched if the target register is a valid base register. */
3031 && REGNO (reg
) < FIRST_PSEUDO_REGISTER
3032 && REGNO_REG_CLASS (REGNO (reg
)) == ADDR_REGS
3033 && GET_MODE (reg
) == GET_MODE (scratch
))
3036 /* Load address into scratch register. Since we can't have a
3037 secondary reload for a secondary reload we have to cover the case
3038 where larl would need a secondary reload here as well. */
3039 s390_reload_larl_operand (scratch
, XEXP (mem
, 0), scratch
);
3041 /* Now we can use a standard load/store to do the move. */
3043 emit_move_insn (replace_equiv_address (mem
, scratch
), reg
);
3045 emit_move_insn (reg
, replace_equiv_address (mem
, scratch
));
3048 /* Inform reload about cases where moving X with a mode MODE to a register in
3049 RCLASS requires an extra scratch or immediate register. Return the class
3050 needed for the immediate register. */
3053 s390_secondary_reload (bool in_p
, rtx x
, reg_class_t rclass_i
,
3054 enum machine_mode mode
, secondary_reload_info
*sri
)
3056 enum reg_class rclass
= (enum reg_class
) rclass_i
;
3058 /* Intermediate register needed. */
3059 if (reg_classes_intersect_p (CC_REGS
, rclass
))
3060 return GENERAL_REGS
;
3064 /* On z10 several optimizer steps may generate larl operands with
3067 && s390_symref_operand_p (x
, NULL
, NULL
)
3069 && !s390_check_symref_alignment (x
, 2))
3070 sri
->icode
= ((mode
== DImode
) ? CODE_FOR_reloaddi_larl_odd_addend_z10
3071 : CODE_FOR_reloadsi_larl_odd_addend_z10
);
3073 /* On z10 we need a scratch register when moving QI, TI or floating
3074 point mode values from or to a memory location with a SYMBOL_REF
3075 or if the symref addend of a SI or DI move is not aligned to the
3076 width of the access. */
3078 && s390_symref_operand_p (XEXP (x
, 0), NULL
, NULL
)
3079 && (mode
== QImode
|| mode
== TImode
|| FLOAT_MODE_P (mode
)
3080 || (!TARGET_ZARCH
&& mode
== DImode
)
3081 || ((mode
== HImode
|| mode
== SImode
|| mode
== DImode
)
3082 && (!s390_check_symref_alignment (XEXP (x
, 0),
3083 GET_MODE_SIZE (mode
))))))
3085 #define __SECONDARY_RELOAD_CASE(M,m) \
3088 sri->icode = in_p ? CODE_FOR_reload##m##di_toreg_z10 : \
3089 CODE_FOR_reload##m##di_tomem_z10; \
3091 sri->icode = in_p ? CODE_FOR_reload##m##si_toreg_z10 : \
3092 CODE_FOR_reload##m##si_tomem_z10; \
3095 switch (GET_MODE (x
))
3097 __SECONDARY_RELOAD_CASE (QI
, qi
);
3098 __SECONDARY_RELOAD_CASE (HI
, hi
);
3099 __SECONDARY_RELOAD_CASE (SI
, si
);
3100 __SECONDARY_RELOAD_CASE (DI
, di
);
3101 __SECONDARY_RELOAD_CASE (TI
, ti
);
3102 __SECONDARY_RELOAD_CASE (SF
, sf
);
3103 __SECONDARY_RELOAD_CASE (DF
, df
);
3104 __SECONDARY_RELOAD_CASE (TF
, tf
);
3105 __SECONDARY_RELOAD_CASE (SD
, sd
);
3106 __SECONDARY_RELOAD_CASE (DD
, dd
);
3107 __SECONDARY_RELOAD_CASE (TD
, td
);
3112 #undef __SECONDARY_RELOAD_CASE
3116 /* We need a scratch register when loading a PLUS expression which
3117 is not a legitimate operand of the LOAD ADDRESS instruction. */
3118 if (in_p
&& s390_plus_operand (x
, mode
))
3119 sri
->icode
= (TARGET_64BIT
?
3120 CODE_FOR_reloaddi_plus
: CODE_FOR_reloadsi_plus
);
3122 /* Performing a multiword move from or to memory we have to make sure the
3123 second chunk in memory is addressable without causing a displacement
3124 overflow. If that would be the case we calculate the address in
3125 a scratch register. */
3127 && GET_CODE (XEXP (x
, 0)) == PLUS
3128 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
3129 && !DISP_IN_RANGE (INTVAL (XEXP (XEXP (x
, 0), 1))
3130 + GET_MODE_SIZE (mode
) - 1))
3132 /* For GENERAL_REGS a displacement overflow is no problem if occurring
3133 in a s_operand address since we may fallback to lm/stm. So we only
3134 have to care about overflows in the b+i+d case. */
3135 if ((reg_classes_intersect_p (GENERAL_REGS
, rclass
)
3136 && s390_class_max_nregs (GENERAL_REGS
, mode
) > 1
3137 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == PLUS
)
3138 /* For FP_REGS no lm/stm is available so this check is triggered
3139 for displacement overflows in b+i+d and b+d like addresses. */
3140 || (reg_classes_intersect_p (FP_REGS
, rclass
)
3141 && s390_class_max_nregs (FP_REGS
, mode
) > 1))
3144 sri
->icode
= (TARGET_64BIT
?
3145 CODE_FOR_reloaddi_nonoffmem_in
:
3146 CODE_FOR_reloadsi_nonoffmem_in
);
3148 sri
->icode
= (TARGET_64BIT
?
3149 CODE_FOR_reloaddi_nonoffmem_out
:
3150 CODE_FOR_reloadsi_nonoffmem_out
);
3154 /* A scratch address register is needed when a symbolic constant is
3155 copied to r0 compiling with -fPIC. In other cases the target
3156 register might be used as temporary (see legitimize_pic_address). */
3157 if (in_p
&& SYMBOLIC_CONST (x
) && flag_pic
== 2 && rclass
!= ADDR_REGS
)
3158 sri
->icode
= (TARGET_64BIT
?
3159 CODE_FOR_reloaddi_PIC_addr
:
3160 CODE_FOR_reloadsi_PIC_addr
);
3162 /* Either scratch or no register needed. */
3166 /* Generate code to load SRC, which is PLUS that is not a
3167 legitimate operand for the LA instruction, into TARGET.
3168 SCRATCH may be used as scratch register. */
3171 s390_expand_plus_operand (rtx target
, rtx src
,
3175 struct s390_address ad
;
3177 /* src must be a PLUS; get its two operands. */
3178 gcc_assert (GET_CODE (src
) == PLUS
);
3179 gcc_assert (GET_MODE (src
) == Pmode
);
3181 /* Check if any of the two operands is already scheduled
3182 for replacement by reload. This can happen e.g. when
3183 float registers occur in an address. */
3184 sum1
= find_replacement (&XEXP (src
, 0));
3185 sum2
= find_replacement (&XEXP (src
, 1));
3186 src
= gen_rtx_PLUS (Pmode
, sum1
, sum2
);
3188 /* If the address is already strictly valid, there's nothing to do. */
3189 if (!s390_decompose_address (src
, &ad
)
3190 || (ad
.base
&& !REGNO_OK_FOR_BASE_P (REGNO (ad
.base
)))
3191 || (ad
.indx
&& !REGNO_OK_FOR_INDEX_P (REGNO (ad
.indx
))))
3193 /* Otherwise, one of the operands cannot be an address register;
3194 we reload its value into the scratch register. */
3195 if (true_regnum (sum1
) < 1 || true_regnum (sum1
) > 15)
3197 emit_move_insn (scratch
, sum1
);
3200 if (true_regnum (sum2
) < 1 || true_regnum (sum2
) > 15)
3202 emit_move_insn (scratch
, sum2
);
3206 /* According to the way these invalid addresses are generated
3207 in reload.c, it should never happen (at least on s390) that
3208 *neither* of the PLUS components, after find_replacements
3209 was applied, is an address register. */
3210 if (sum1
== scratch
&& sum2
== scratch
)
3216 src
= gen_rtx_PLUS (Pmode
, sum1
, sum2
);
3219 /* Emit the LOAD ADDRESS pattern. Note that reload of PLUS
3220 is only ever performed on addresses, so we can mark the
3221 sum as legitimate for LA in any case. */
3222 s390_load_address (target
, src
);
3226 /* Return true if ADDR is a valid memory address.
3227 STRICT specifies whether strict register checking applies. */
3230 s390_legitimate_address_p (enum machine_mode mode
, rtx addr
, bool strict
)
3232 struct s390_address ad
;
3235 && larl_operand (addr
, VOIDmode
)
3236 && (mode
== VOIDmode
3237 || s390_check_symref_alignment (addr
, GET_MODE_SIZE (mode
))))
3240 if (!s390_decompose_address (addr
, &ad
))
3245 if (ad
.base
&& !REGNO_OK_FOR_BASE_P (REGNO (ad
.base
)))
3248 if (ad
.indx
&& !REGNO_OK_FOR_INDEX_P (REGNO (ad
.indx
)))
3254 && !(REGNO (ad
.base
) >= FIRST_PSEUDO_REGISTER
3255 || REGNO_REG_CLASS (REGNO (ad
.base
)) == ADDR_REGS
))
3259 && !(REGNO (ad
.indx
) >= FIRST_PSEUDO_REGISTER
3260 || REGNO_REG_CLASS (REGNO (ad
.indx
)) == ADDR_REGS
))
3266 /* Return true if OP is a valid operand for the LA instruction.
3267 In 31-bit, we need to prove that the result is used as an
3268 address, as LA performs only a 31-bit addition. */
3271 legitimate_la_operand_p (rtx op
)
3273 struct s390_address addr
;
3274 if (!s390_decompose_address (op
, &addr
))
3277 return (TARGET_64BIT
|| addr
.pointer
);
3280 /* Return true if it is valid *and* preferable to use LA to
3281 compute the sum of OP1 and OP2. */
3284 preferred_la_operand_p (rtx op1
, rtx op2
)
3286 struct s390_address addr
;
3288 if (op2
!= const0_rtx
)
3289 op1
= gen_rtx_PLUS (Pmode
, op1
, op2
);
3291 if (!s390_decompose_address (op1
, &addr
))
3293 if (addr
.base
&& !REGNO_OK_FOR_BASE_P (REGNO (addr
.base
)))
3295 if (addr
.indx
&& !REGNO_OK_FOR_INDEX_P (REGNO (addr
.indx
)))
3298 /* Avoid LA instructions with index register on z196; it is
3299 preferable to use regular add instructions when possible. */
3300 if (addr
.indx
&& s390_tune
== PROCESSOR_2817_Z196
)
3303 if (!TARGET_64BIT
&& !addr
.pointer
)
3309 if ((addr
.base
&& REG_P (addr
.base
) && REG_POINTER (addr
.base
))
3310 || (addr
.indx
&& REG_P (addr
.indx
) && REG_POINTER (addr
.indx
)))
3316 /* Emit a forced load-address operation to load SRC into DST.
3317 This will use the LOAD ADDRESS instruction even in situations
3318 where legitimate_la_operand_p (SRC) returns false. */
3321 s390_load_address (rtx dst
, rtx src
)
3324 emit_move_insn (dst
, src
);
3326 emit_insn (gen_force_la_31 (dst
, src
));
3329 /* Return a legitimate reference for ORIG (an address) using the
3330 register REG. If REG is 0, a new pseudo is generated.
3332 There are two types of references that must be handled:
3334 1. Global data references must load the address from the GOT, via
3335 the PIC reg. An insn is emitted to do this load, and the reg is
3338 2. Static data references, constant pool addresses, and code labels
3339 compute the address as an offset from the GOT, whose base is in
3340 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
3341 differentiate them from global data objects. The returned
3342 address is the PIC reg + an unspec constant.
3344 TARGET_LEGITIMIZE_ADDRESS_P rejects symbolic references unless the PIC
3345 reg also appears in the address. */
3348 legitimize_pic_address (rtx orig
, rtx reg
)
3354 gcc_assert (!TLS_SYMBOLIC_CONST (addr
));
3356 if (GET_CODE (addr
) == LABEL_REF
3357 || (GET_CODE (addr
) == SYMBOL_REF
&& SYMBOL_REF_LOCAL_P (addr
)))
3359 /* This is a local symbol. */
3360 if (TARGET_CPU_ZARCH
&& larl_operand (addr
, VOIDmode
))
3362 /* Access local symbols PC-relative via LARL.
3363 This is the same as in the non-PIC case, so it is
3364 handled automatically ... */
3368 /* Access local symbols relative to the GOT. */
3370 rtx temp
= reg
? reg
: gen_reg_rtx (Pmode
);
3372 if (reload_in_progress
|| reload_completed
)
3373 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM
, true);
3375 addr
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
), UNSPEC_GOTOFF
);
3376 addr
= gen_rtx_CONST (Pmode
, addr
);
3377 addr
= force_const_mem (Pmode
, addr
);
3378 emit_move_insn (temp
, addr
);
3380 new_rtx
= gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
, temp
);
3383 s390_load_address (reg
, new_rtx
);
3388 else if (GET_CODE (addr
) == SYMBOL_REF
)
3391 reg
= gen_reg_rtx (Pmode
);
3395 /* Assume GOT offset < 4k. This is handled the same way
3396 in both 31- and 64-bit code (@GOT). */
3398 if (reload_in_progress
|| reload_completed
)
3399 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM
, true);
3401 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
), UNSPEC_GOT
);
3402 new_rtx
= gen_rtx_CONST (Pmode
, new_rtx
);
3403 new_rtx
= gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
, new_rtx
);
3404 new_rtx
= gen_const_mem (Pmode
, new_rtx
);
3405 emit_move_insn (reg
, new_rtx
);
3408 else if (TARGET_CPU_ZARCH
)
3410 /* If the GOT offset might be >= 4k, we determine the position
3411 of the GOT entry via a PC-relative LARL (@GOTENT). */
3413 rtx temp
= reg
? reg
: gen_reg_rtx (Pmode
);
3415 gcc_assert (REGNO (temp
) >= FIRST_PSEUDO_REGISTER
3416 || REGNO_REG_CLASS (REGNO (temp
)) == ADDR_REGS
);
3418 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
), UNSPEC_GOTENT
);
3419 new_rtx
= gen_rtx_CONST (Pmode
, new_rtx
);
3420 emit_move_insn (temp
, new_rtx
);
3422 new_rtx
= gen_const_mem (Pmode
, temp
);
3423 emit_move_insn (reg
, new_rtx
);
3428 /* If the GOT offset might be >= 4k, we have to load it
3429 from the literal pool (@GOT). */
3431 rtx temp
= reg
? reg
: gen_reg_rtx (Pmode
);
3433 gcc_assert (REGNO (temp
) >= FIRST_PSEUDO_REGISTER
3434 || REGNO_REG_CLASS (REGNO (temp
)) == ADDR_REGS
);
3436 if (reload_in_progress
|| reload_completed
)
3437 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM
, true);
3439 addr
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
), UNSPEC_GOT
);
3440 addr
= gen_rtx_CONST (Pmode
, addr
);
3441 addr
= force_const_mem (Pmode
, addr
);
3442 emit_move_insn (temp
, addr
);
3444 new_rtx
= gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
, temp
);
3445 new_rtx
= gen_const_mem (Pmode
, new_rtx
);
3446 emit_move_insn (reg
, new_rtx
);
3452 if (GET_CODE (addr
) == CONST
)
3454 addr
= XEXP (addr
, 0);
3455 if (GET_CODE (addr
) == UNSPEC
)
3457 gcc_assert (XVECLEN (addr
, 0) == 1);
3458 switch (XINT (addr
, 1))
3460 /* If someone moved a GOT-relative UNSPEC
3461 out of the literal pool, force them back in. */
3464 new_rtx
= force_const_mem (Pmode
, orig
);
3467 /* @GOT is OK as is if small. */
3470 new_rtx
= force_const_mem (Pmode
, orig
);
3473 /* @GOTENT is OK as is. */
3477 /* @PLT is OK as is on 64-bit, must be converted to
3478 GOT-relative @PLTOFF on 31-bit. */
3480 if (!TARGET_CPU_ZARCH
)
3482 rtx temp
= reg
? reg
: gen_reg_rtx (Pmode
);
3484 if (reload_in_progress
|| reload_completed
)
3485 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM
, true);
3487 addr
= XVECEXP (addr
, 0, 0);
3488 addr
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
),
3490 addr
= gen_rtx_CONST (Pmode
, addr
);
3491 addr
= force_const_mem (Pmode
, addr
);
3492 emit_move_insn (temp
, addr
);
3494 new_rtx
= gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
, temp
);
3497 s390_load_address (reg
, new_rtx
);
3503 /* Everything else cannot happen. */
3509 gcc_assert (GET_CODE (addr
) == PLUS
);
3511 if (GET_CODE (addr
) == PLUS
)
3513 rtx op0
= XEXP (addr
, 0), op1
= XEXP (addr
, 1);
3515 gcc_assert (!TLS_SYMBOLIC_CONST (op0
));
3516 gcc_assert (!TLS_SYMBOLIC_CONST (op1
));
3518 /* Check first to see if this is a constant offset
3519 from a local symbol reference. */
3520 if ((GET_CODE (op0
) == LABEL_REF
3521 || (GET_CODE (op0
) == SYMBOL_REF
&& SYMBOL_REF_LOCAL_P (op0
)))
3522 && GET_CODE (op1
) == CONST_INT
)
3524 if (TARGET_CPU_ZARCH
3525 && larl_operand (op0
, VOIDmode
)
3526 && INTVAL (op1
) < (HOST_WIDE_INT
)1 << 31
3527 && INTVAL (op1
) >= -((HOST_WIDE_INT
)1 << 31))
3529 if (INTVAL (op1
) & 1)
3531 /* LARL can't handle odd offsets, so emit a
3532 pair of LARL and LA. */
3533 rtx temp
= reg
? reg
: gen_reg_rtx (Pmode
);
3535 if (!DISP_IN_RANGE (INTVAL (op1
)))
3537 HOST_WIDE_INT even
= INTVAL (op1
) - 1;
3538 op0
= gen_rtx_PLUS (Pmode
, op0
, GEN_INT (even
));
3539 op0
= gen_rtx_CONST (Pmode
, op0
);
3543 emit_move_insn (temp
, op0
);
3544 new_rtx
= gen_rtx_PLUS (Pmode
, temp
, op1
);
3548 s390_load_address (reg
, new_rtx
);
3554 /* If the offset is even, we can just use LARL.
3555 This will happen automatically. */
3560 /* Access local symbols relative to the GOT. */
3562 rtx temp
= reg
? reg
: gen_reg_rtx (Pmode
);
3564 if (reload_in_progress
|| reload_completed
)
3565 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM
, true);
3567 addr
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, op0
),
3569 addr
= gen_rtx_PLUS (Pmode
, addr
, op1
);
3570 addr
= gen_rtx_CONST (Pmode
, addr
);
3571 addr
= force_const_mem (Pmode
, addr
);
3572 emit_move_insn (temp
, addr
);
3574 new_rtx
= gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
, temp
);
3577 s390_load_address (reg
, new_rtx
);
3583 /* Now, check whether it is a GOT relative symbol plus offset
3584 that was pulled out of the literal pool. Force it back in. */
3586 else if (GET_CODE (op0
) == UNSPEC
3587 && GET_CODE (op1
) == CONST_INT
3588 && XINT (op0
, 1) == UNSPEC_GOTOFF
)
3590 gcc_assert (XVECLEN (op0
, 0) == 1);
3592 new_rtx
= force_const_mem (Pmode
, orig
);
3595 /* Otherwise, compute the sum. */
3598 base
= legitimize_pic_address (XEXP (addr
, 0), reg
);
3599 new_rtx
= legitimize_pic_address (XEXP (addr
, 1),
3600 base
== reg
? NULL_RTX
: reg
);
3601 if (GET_CODE (new_rtx
) == CONST_INT
)
3602 new_rtx
= plus_constant (base
, INTVAL (new_rtx
));
3605 if (GET_CODE (new_rtx
) == PLUS
&& CONSTANT_P (XEXP (new_rtx
, 1)))
3607 base
= gen_rtx_PLUS (Pmode
, base
, XEXP (new_rtx
, 0));
3608 new_rtx
= XEXP (new_rtx
, 1);
3610 new_rtx
= gen_rtx_PLUS (Pmode
, base
, new_rtx
);
3613 if (GET_CODE (new_rtx
) == CONST
)
3614 new_rtx
= XEXP (new_rtx
, 0);
3615 new_rtx
= force_operand (new_rtx
, 0);
3622 /* Load the thread pointer into a register. */
3625 s390_get_thread_pointer (void)
3627 rtx tp
= gen_reg_rtx (Pmode
);
3629 emit_move_insn (tp
, gen_rtx_REG (Pmode
, TP_REGNUM
));
3630 mark_reg_pointer (tp
, BITS_PER_WORD
);
3635 /* Emit a tls call insn. The call target is the SYMBOL_REF stored
3636 in s390_tls_symbol which always refers to __tls_get_offset.
3637 The returned offset is written to RESULT_REG and an USE rtx is
3638 generated for TLS_CALL. */
3640 static GTY(()) rtx s390_tls_symbol
;
3643 s390_emit_tls_call_insn (rtx result_reg
, rtx tls_call
)
3647 gcc_assert (flag_pic
);
3649 if (!s390_tls_symbol
)
3650 s390_tls_symbol
= gen_rtx_SYMBOL_REF (Pmode
, "__tls_get_offset");
3652 insn
= s390_emit_call (s390_tls_symbol
, tls_call
, result_reg
,
3653 gen_rtx_REG (Pmode
, RETURN_REGNUM
));
3655 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), result_reg
);
3656 RTL_CONST_CALL_P (insn
) = 1;
3659 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3660 this (thread-local) address. REG may be used as temporary. */
3663 legitimize_tls_address (rtx addr
, rtx reg
)
3665 rtx new_rtx
, tls_call
, temp
, base
, r2
, insn
;
3667 if (GET_CODE (addr
) == SYMBOL_REF
)
3668 switch (tls_symbolic_operand (addr
))
3670 case TLS_MODEL_GLOBAL_DYNAMIC
:
3672 r2
= gen_rtx_REG (Pmode
, 2);
3673 tls_call
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
), UNSPEC_TLSGD
);
3674 new_rtx
= gen_rtx_CONST (Pmode
, tls_call
);
3675 new_rtx
= force_const_mem (Pmode
, new_rtx
);
3676 emit_move_insn (r2
, new_rtx
);
3677 s390_emit_tls_call_insn (r2
, tls_call
);
3678 insn
= get_insns ();
3681 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
), UNSPEC_NTPOFF
);
3682 temp
= gen_reg_rtx (Pmode
);
3683 emit_libcall_block (insn
, temp
, r2
, new_rtx
);
3685 new_rtx
= gen_rtx_PLUS (Pmode
, s390_get_thread_pointer (), temp
);
3688 s390_load_address (reg
, new_rtx
);
3693 case TLS_MODEL_LOCAL_DYNAMIC
:
3695 r2
= gen_rtx_REG (Pmode
, 2);
3696 tls_call
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, const0_rtx
), UNSPEC_TLSLDM
);
3697 new_rtx
= gen_rtx_CONST (Pmode
, tls_call
);
3698 new_rtx
= force_const_mem (Pmode
, new_rtx
);
3699 emit_move_insn (r2
, new_rtx
);
3700 s390_emit_tls_call_insn (r2
, tls_call
);
3701 insn
= get_insns ();
3704 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, const0_rtx
), UNSPEC_TLSLDM_NTPOFF
);
3705 temp
= gen_reg_rtx (Pmode
);
3706 emit_libcall_block (insn
, temp
, r2
, new_rtx
);
3708 new_rtx
= gen_rtx_PLUS (Pmode
, s390_get_thread_pointer (), temp
);
3709 base
= gen_reg_rtx (Pmode
);
3710 s390_load_address (base
, new_rtx
);
3712 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
), UNSPEC_DTPOFF
);
3713 new_rtx
= gen_rtx_CONST (Pmode
, new_rtx
);
3714 new_rtx
= force_const_mem (Pmode
, new_rtx
);
3715 temp
= gen_reg_rtx (Pmode
);
3716 emit_move_insn (temp
, new_rtx
);
3718 new_rtx
= gen_rtx_PLUS (Pmode
, base
, temp
);
3721 s390_load_address (reg
, new_rtx
);
3726 case TLS_MODEL_INITIAL_EXEC
:
3729 /* Assume GOT offset < 4k. This is handled the same way
3730 in both 31- and 64-bit code. */
3732 if (reload_in_progress
|| reload_completed
)
3733 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM
, true);
3735 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
), UNSPEC_GOTNTPOFF
);
3736 new_rtx
= gen_rtx_CONST (Pmode
, new_rtx
);
3737 new_rtx
= gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
, new_rtx
);
3738 new_rtx
= gen_const_mem (Pmode
, new_rtx
);
3739 temp
= gen_reg_rtx (Pmode
);
3740 emit_move_insn (temp
, new_rtx
);
3742 else if (TARGET_CPU_ZARCH
)
3744 /* If the GOT offset might be >= 4k, we determine the position
3745 of the GOT entry via a PC-relative LARL. */
3747 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
), UNSPEC_INDNTPOFF
);
3748 new_rtx
= gen_rtx_CONST (Pmode
, new_rtx
);
3749 temp
= gen_reg_rtx (Pmode
);
3750 emit_move_insn (temp
, new_rtx
);
3752 new_rtx
= gen_const_mem (Pmode
, temp
);
3753 temp
= gen_reg_rtx (Pmode
);
3754 emit_move_insn (temp
, new_rtx
);
3758 /* If the GOT offset might be >= 4k, we have to load it
3759 from the literal pool. */
3761 if (reload_in_progress
|| reload_completed
)
3762 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM
, true);
3764 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
), UNSPEC_GOTNTPOFF
);
3765 new_rtx
= gen_rtx_CONST (Pmode
, new_rtx
);
3766 new_rtx
= force_const_mem (Pmode
, new_rtx
);
3767 temp
= gen_reg_rtx (Pmode
);
3768 emit_move_insn (temp
, new_rtx
);
3770 new_rtx
= gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
, temp
);
3771 new_rtx
= gen_const_mem (Pmode
, new_rtx
);
3773 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (2, new_rtx
, addr
), UNSPEC_TLS_LOAD
);
3774 temp
= gen_reg_rtx (Pmode
);
3775 emit_insn (gen_rtx_SET (Pmode
, temp
, new_rtx
));
3779 /* In position-dependent code, load the absolute address of
3780 the GOT entry from the literal pool. */
3782 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
), UNSPEC_INDNTPOFF
);
3783 new_rtx
= gen_rtx_CONST (Pmode
, new_rtx
);
3784 new_rtx
= force_const_mem (Pmode
, new_rtx
);
3785 temp
= gen_reg_rtx (Pmode
);
3786 emit_move_insn (temp
, new_rtx
);
3789 new_rtx
= gen_const_mem (Pmode
, new_rtx
);
3790 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (2, new_rtx
, addr
), UNSPEC_TLS_LOAD
);
3791 temp
= gen_reg_rtx (Pmode
);
3792 emit_insn (gen_rtx_SET (Pmode
, temp
, new_rtx
));
3795 new_rtx
= gen_rtx_PLUS (Pmode
, s390_get_thread_pointer (), temp
);
3798 s390_load_address (reg
, new_rtx
);
3803 case TLS_MODEL_LOCAL_EXEC
:
3804 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
), UNSPEC_NTPOFF
);
3805 new_rtx
= gen_rtx_CONST (Pmode
, new_rtx
);
3806 new_rtx
= force_const_mem (Pmode
, new_rtx
);
3807 temp
= gen_reg_rtx (Pmode
);
3808 emit_move_insn (temp
, new_rtx
);
3810 new_rtx
= gen_rtx_PLUS (Pmode
, s390_get_thread_pointer (), temp
);
3813 s390_load_address (reg
, new_rtx
);
3822 else if (GET_CODE (addr
) == CONST
&& GET_CODE (XEXP (addr
, 0)) == UNSPEC
)
3824 switch (XINT (XEXP (addr
, 0), 1))
3826 case UNSPEC_INDNTPOFF
:
3827 gcc_assert (TARGET_CPU_ZARCH
);
3836 else if (GET_CODE (addr
) == CONST
&& GET_CODE (XEXP (addr
, 0)) == PLUS
3837 && GET_CODE (XEXP (XEXP (addr
, 0), 1)) == CONST_INT
)
3839 new_rtx
= XEXP (XEXP (addr
, 0), 0);
3840 if (GET_CODE (new_rtx
) != SYMBOL_REF
)
3841 new_rtx
= gen_rtx_CONST (Pmode
, new_rtx
);
3843 new_rtx
= legitimize_tls_address (new_rtx
, reg
);
3844 new_rtx
= plus_constant (new_rtx
, INTVAL (XEXP (XEXP (addr
, 0), 1)));
3845 new_rtx
= force_operand (new_rtx
, 0);
3849 gcc_unreachable (); /* for now ... */
3854 /* Emit insns making the address in operands[1] valid for a standard
3855 move to operands[0]. operands[1] is replaced by an address which
3856 should be used instead of the former RTX to emit the move
3860 emit_symbolic_move (rtx
*operands
)
3862 rtx temp
= !can_create_pseudo_p () ? operands
[0] : gen_reg_rtx (Pmode
);
3864 if (GET_CODE (operands
[0]) == MEM
)
3865 operands
[1] = force_reg (Pmode
, operands
[1]);
3866 else if (TLS_SYMBOLIC_CONST (operands
[1]))
3867 operands
[1] = legitimize_tls_address (operands
[1], temp
);
3869 operands
[1] = legitimize_pic_address (operands
[1], temp
);
3872 /* Try machine-dependent ways of modifying an illegitimate address X
3873 to be legitimate. If we find one, return the new, valid address.
3875 OLDX is the address as it was before break_out_memory_refs was called.
3876 In some cases it is useful to look at this to decide what needs to be done.
3878 MODE is the mode of the operand pointed to by X.
3880 When -fpic is used, special handling is needed for symbolic references.
3881 See comments by legitimize_pic_address for details. */
3884 s390_legitimize_address (rtx x
, rtx oldx ATTRIBUTE_UNUSED
,
3885 enum machine_mode mode ATTRIBUTE_UNUSED
)
3887 rtx constant_term
= const0_rtx
;
3889 if (TLS_SYMBOLIC_CONST (x
))
3891 x
= legitimize_tls_address (x
, 0);
3893 if (s390_legitimate_address_p (mode
, x
, FALSE
))
3896 else if (GET_CODE (x
) == PLUS
3897 && (TLS_SYMBOLIC_CONST (XEXP (x
, 0))
3898 || TLS_SYMBOLIC_CONST (XEXP (x
, 1))))
3904 if (SYMBOLIC_CONST (x
)
3905 || (GET_CODE (x
) == PLUS
3906 && (SYMBOLIC_CONST (XEXP (x
, 0))
3907 || SYMBOLIC_CONST (XEXP (x
, 1)))))
3908 x
= legitimize_pic_address (x
, 0);
3910 if (s390_legitimate_address_p (mode
, x
, FALSE
))
3914 x
= eliminate_constant_term (x
, &constant_term
);
3916 /* Optimize loading of large displacements by splitting them
3917 into the multiple of 4K and the rest; this allows the
3918 former to be CSE'd if possible.
3920 Don't do this if the displacement is added to a register
3921 pointing into the stack frame, as the offsets will
3922 change later anyway. */
3924 if (GET_CODE (constant_term
) == CONST_INT
3925 && !TARGET_LONG_DISPLACEMENT
3926 && !DISP_IN_RANGE (INTVAL (constant_term
))
3927 && !(REG_P (x
) && REGNO_PTR_FRAME_P (REGNO (x
))))
3929 HOST_WIDE_INT lower
= INTVAL (constant_term
) & 0xfff;
3930 HOST_WIDE_INT upper
= INTVAL (constant_term
) ^ lower
;
3932 rtx temp
= gen_reg_rtx (Pmode
);
3933 rtx val
= force_operand (GEN_INT (upper
), temp
);
3935 emit_move_insn (temp
, val
);
3937 x
= gen_rtx_PLUS (Pmode
, x
, temp
);
3938 constant_term
= GEN_INT (lower
);
3941 if (GET_CODE (x
) == PLUS
)
3943 if (GET_CODE (XEXP (x
, 0)) == REG
)
3945 rtx temp
= gen_reg_rtx (Pmode
);
3946 rtx val
= force_operand (XEXP (x
, 1), temp
);
3948 emit_move_insn (temp
, val
);
3950 x
= gen_rtx_PLUS (Pmode
, XEXP (x
, 0), temp
);
3953 else if (GET_CODE (XEXP (x
, 1)) == REG
)
3955 rtx temp
= gen_reg_rtx (Pmode
);
3956 rtx val
= force_operand (XEXP (x
, 0), temp
);
3958 emit_move_insn (temp
, val
);
3960 x
= gen_rtx_PLUS (Pmode
, temp
, XEXP (x
, 1));
3964 if (constant_term
!= const0_rtx
)
3965 x
= gen_rtx_PLUS (Pmode
, x
, constant_term
);
3970 /* Try a machine-dependent way of reloading an illegitimate address AD
3971 operand. If we find one, push the reload and and return the new address.
3973 MODE is the mode of the enclosing MEM. OPNUM is the operand number
3974 and TYPE is the reload type of the current reload. */
3977 legitimize_reload_address (rtx ad
, enum machine_mode mode ATTRIBUTE_UNUSED
,
3978 int opnum
, int type
)
3980 if (!optimize
|| TARGET_LONG_DISPLACEMENT
)
3983 if (GET_CODE (ad
) == PLUS
)
3985 rtx tem
= simplify_binary_operation (PLUS
, Pmode
,
3986 XEXP (ad
, 0), XEXP (ad
, 1));
3991 if (GET_CODE (ad
) == PLUS
3992 && GET_CODE (XEXP (ad
, 0)) == REG
3993 && GET_CODE (XEXP (ad
, 1)) == CONST_INT
3994 && !DISP_IN_RANGE (INTVAL (XEXP (ad
, 1))))
3996 HOST_WIDE_INT lower
= INTVAL (XEXP (ad
, 1)) & 0xfff;
3997 HOST_WIDE_INT upper
= INTVAL (XEXP (ad
, 1)) ^ lower
;
3998 rtx cst
, tem
, new_rtx
;
4000 cst
= GEN_INT (upper
);
4001 if (!legitimate_reload_constant_p (cst
))
4002 cst
= force_const_mem (Pmode
, cst
);
4004 tem
= gen_rtx_PLUS (Pmode
, XEXP (ad
, 0), cst
);
4005 new_rtx
= gen_rtx_PLUS (Pmode
, tem
, GEN_INT (lower
));
4007 push_reload (XEXP (tem
, 1), 0, &XEXP (tem
, 1), 0,
4008 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
4009 opnum
, (enum reload_type
) type
);
4016 /* Emit code to move LEN bytes from DST to SRC. */
4019 s390_expand_movmem (rtx dst
, rtx src
, rtx len
)
4021 if (GET_CODE (len
) == CONST_INT
&& INTVAL (len
) >= 0 && INTVAL (len
) <= 256)
4023 if (INTVAL (len
) > 0)
4024 emit_insn (gen_movmem_short (dst
, src
, GEN_INT (INTVAL (len
) - 1)));
4027 else if (TARGET_MVCLE
)
4029 emit_insn (gen_movmem_long (dst
, src
, convert_to_mode (Pmode
, len
, 1)));
4034 rtx dst_addr
, src_addr
, count
, blocks
, temp
;
4035 rtx loop_start_label
= gen_label_rtx ();
4036 rtx loop_end_label
= gen_label_rtx ();
4037 rtx end_label
= gen_label_rtx ();
4038 enum machine_mode mode
;
4040 mode
= GET_MODE (len
);
4041 if (mode
== VOIDmode
)
4044 dst_addr
= gen_reg_rtx (Pmode
);
4045 src_addr
= gen_reg_rtx (Pmode
);
4046 count
= gen_reg_rtx (mode
);
4047 blocks
= gen_reg_rtx (mode
);
4049 convert_move (count
, len
, 1);
4050 emit_cmp_and_jump_insns (count
, const0_rtx
,
4051 EQ
, NULL_RTX
, mode
, 1, end_label
);
4053 emit_move_insn (dst_addr
, force_operand (XEXP (dst
, 0), NULL_RTX
));
4054 emit_move_insn (src_addr
, force_operand (XEXP (src
, 0), NULL_RTX
));
4055 dst
= change_address (dst
, VOIDmode
, dst_addr
);
4056 src
= change_address (src
, VOIDmode
, src_addr
);
4058 temp
= expand_binop (mode
, add_optab
, count
, constm1_rtx
, count
, 1,
4061 emit_move_insn (count
, temp
);
4063 temp
= expand_binop (mode
, lshr_optab
, count
, GEN_INT (8), blocks
, 1,
4066 emit_move_insn (blocks
, temp
);
4068 emit_cmp_and_jump_insns (blocks
, const0_rtx
,
4069 EQ
, NULL_RTX
, mode
, 1, loop_end_label
);
4071 emit_label (loop_start_label
);
4074 && (GET_CODE (len
) != CONST_INT
|| INTVAL (len
) > 768))
4078 /* Issue a read prefetch for the +3 cache line. */
4079 prefetch
= gen_prefetch (gen_rtx_PLUS (Pmode
, src_addr
, GEN_INT (768)),
4080 const0_rtx
, const0_rtx
);
4081 PREFETCH_SCHEDULE_BARRIER_P (prefetch
) = true;
4082 emit_insn (prefetch
);
4084 /* Issue a write prefetch for the +3 cache line. */
4085 prefetch
= gen_prefetch (gen_rtx_PLUS (Pmode
, dst_addr
, GEN_INT (768)),
4086 const1_rtx
, const0_rtx
);
4087 PREFETCH_SCHEDULE_BARRIER_P (prefetch
) = true;
4088 emit_insn (prefetch
);
4091 emit_insn (gen_movmem_short (dst
, src
, GEN_INT (255)));
4092 s390_load_address (dst_addr
,
4093 gen_rtx_PLUS (Pmode
, dst_addr
, GEN_INT (256)));
4094 s390_load_address (src_addr
,
4095 gen_rtx_PLUS (Pmode
, src_addr
, GEN_INT (256)));
4097 temp
= expand_binop (mode
, add_optab
, blocks
, constm1_rtx
, blocks
, 1,
4100 emit_move_insn (blocks
, temp
);
4102 emit_cmp_and_jump_insns (blocks
, const0_rtx
,
4103 EQ
, NULL_RTX
, mode
, 1, loop_end_label
);
4105 emit_jump (loop_start_label
);
4106 emit_label (loop_end_label
);
4108 emit_insn (gen_movmem_short (dst
, src
,
4109 convert_to_mode (Pmode
, count
, 1)));
4110 emit_label (end_label
);
4114 /* Emit code to set LEN bytes at DST to VAL.
4115 Make use of clrmem if VAL is zero. */
4118 s390_expand_setmem (rtx dst
, rtx len
, rtx val
)
4120 if (GET_CODE (len
) == CONST_INT
&& INTVAL (len
) == 0)
4123 gcc_assert (GET_CODE (val
) == CONST_INT
|| GET_MODE (val
) == QImode
);
4125 if (GET_CODE (len
) == CONST_INT
&& INTVAL (len
) > 0 && INTVAL (len
) <= 257)
4127 if (val
== const0_rtx
&& INTVAL (len
) <= 256)
4128 emit_insn (gen_clrmem_short (dst
, GEN_INT (INTVAL (len
) - 1)));
4131 /* Initialize memory by storing the first byte. */
4132 emit_move_insn (adjust_address (dst
, QImode
, 0), val
);
4134 if (INTVAL (len
) > 1)
4136 /* Initiate 1 byte overlap move.
4137 The first byte of DST is propagated through DSTP1.
4138 Prepare a movmem for: DST+1 = DST (length = LEN - 1).
4139 DST is set to size 1 so the rest of the memory location
4140 does not count as source operand. */
4141 rtx dstp1
= adjust_address (dst
, VOIDmode
, 1);
4142 set_mem_size (dst
, const1_rtx
);
4144 emit_insn (gen_movmem_short (dstp1
, dst
,
4145 GEN_INT (INTVAL (len
) - 2)));
4150 else if (TARGET_MVCLE
)
4152 val
= force_not_mem (convert_modes (Pmode
, QImode
, val
, 1));
4153 emit_insn (gen_setmem_long (dst
, convert_to_mode (Pmode
, len
, 1), val
));
4158 rtx dst_addr
, count
, blocks
, temp
, dstp1
= NULL_RTX
;
4159 rtx loop_start_label
= gen_label_rtx ();
4160 rtx loop_end_label
= gen_label_rtx ();
4161 rtx end_label
= gen_label_rtx ();
4162 enum machine_mode mode
;
4164 mode
= GET_MODE (len
);
4165 if (mode
== VOIDmode
)
4168 dst_addr
= gen_reg_rtx (Pmode
);
4169 count
= gen_reg_rtx (mode
);
4170 blocks
= gen_reg_rtx (mode
);
4172 convert_move (count
, len
, 1);
4173 emit_cmp_and_jump_insns (count
, const0_rtx
,
4174 EQ
, NULL_RTX
, mode
, 1, end_label
);
4176 emit_move_insn (dst_addr
, force_operand (XEXP (dst
, 0), NULL_RTX
));
4177 dst
= change_address (dst
, VOIDmode
, dst_addr
);
4179 if (val
== const0_rtx
)
4180 temp
= expand_binop (mode
, add_optab
, count
, constm1_rtx
, count
, 1,
4184 dstp1
= adjust_address (dst
, VOIDmode
, 1);
4185 set_mem_size (dst
, const1_rtx
);
4187 /* Initialize memory by storing the first byte. */
4188 emit_move_insn (adjust_address (dst
, QImode
, 0), val
);
4190 /* If count is 1 we are done. */
4191 emit_cmp_and_jump_insns (count
, const1_rtx
,
4192 EQ
, NULL_RTX
, mode
, 1, end_label
);
4194 temp
= expand_binop (mode
, add_optab
, count
, GEN_INT (-2), count
, 1,
4198 emit_move_insn (count
, temp
);
4200 temp
= expand_binop (mode
, lshr_optab
, count
, GEN_INT (8), blocks
, 1,
4203 emit_move_insn (blocks
, temp
);
4205 emit_cmp_and_jump_insns (blocks
, const0_rtx
,
4206 EQ
, NULL_RTX
, mode
, 1, loop_end_label
);
4208 emit_label (loop_start_label
);
4211 && (GET_CODE (len
) != CONST_INT
|| INTVAL (len
) > 1024))
4213 /* Issue a write prefetch for the +4 cache line. */
4214 rtx prefetch
= gen_prefetch (gen_rtx_PLUS (Pmode
, dst_addr
,
4216 const1_rtx
, const0_rtx
);
4217 emit_insn (prefetch
);
4218 PREFETCH_SCHEDULE_BARRIER_P (prefetch
) = true;
4221 if (val
== const0_rtx
)
4222 emit_insn (gen_clrmem_short (dst
, GEN_INT (255)));
4224 emit_insn (gen_movmem_short (dstp1
, dst
, GEN_INT (255)));
4225 s390_load_address (dst_addr
,
4226 gen_rtx_PLUS (Pmode
, dst_addr
, GEN_INT (256)));
4228 temp
= expand_binop (mode
, add_optab
, blocks
, constm1_rtx
, blocks
, 1,
4231 emit_move_insn (blocks
, temp
);
4233 emit_cmp_and_jump_insns (blocks
, const0_rtx
,
4234 EQ
, NULL_RTX
, mode
, 1, loop_end_label
);
4236 emit_jump (loop_start_label
);
4237 emit_label (loop_end_label
);
4239 if (val
== const0_rtx
)
4240 emit_insn (gen_clrmem_short (dst
, convert_to_mode (Pmode
, count
, 1)));
4242 emit_insn (gen_movmem_short (dstp1
, dst
, convert_to_mode (Pmode
, count
, 1)));
4243 emit_label (end_label
);
4247 /* Emit code to compare LEN bytes at OP0 with those at OP1,
4248 and return the result in TARGET. */
4251 s390_expand_cmpmem (rtx target
, rtx op0
, rtx op1
, rtx len
)
4253 rtx ccreg
= gen_rtx_REG (CCUmode
, CC_REGNUM
);
4256 /* As the result of CMPINT is inverted compared to what we need,
4257 we have to swap the operands. */
4258 tmp
= op0
; op0
= op1
; op1
= tmp
;
4260 if (GET_CODE (len
) == CONST_INT
&& INTVAL (len
) >= 0 && INTVAL (len
) <= 256)
4262 if (INTVAL (len
) > 0)
4264 emit_insn (gen_cmpmem_short (op0
, op1
, GEN_INT (INTVAL (len
) - 1)));
4265 emit_insn (gen_cmpint (target
, ccreg
));
4268 emit_move_insn (target
, const0_rtx
);
4270 else if (TARGET_MVCLE
)
4272 emit_insn (gen_cmpmem_long (op0
, op1
, convert_to_mode (Pmode
, len
, 1)));
4273 emit_insn (gen_cmpint (target
, ccreg
));
4277 rtx addr0
, addr1
, count
, blocks
, temp
;
4278 rtx loop_start_label
= gen_label_rtx ();
4279 rtx loop_end_label
= gen_label_rtx ();
4280 rtx end_label
= gen_label_rtx ();
4281 enum machine_mode mode
;
4283 mode
= GET_MODE (len
);
4284 if (mode
== VOIDmode
)
4287 addr0
= gen_reg_rtx (Pmode
);
4288 addr1
= gen_reg_rtx (Pmode
);
4289 count
= gen_reg_rtx (mode
);
4290 blocks
= gen_reg_rtx (mode
);
4292 convert_move (count
, len
, 1);
4293 emit_cmp_and_jump_insns (count
, const0_rtx
,
4294 EQ
, NULL_RTX
, mode
, 1, end_label
);
4296 emit_move_insn (addr0
, force_operand (XEXP (op0
, 0), NULL_RTX
));
4297 emit_move_insn (addr1
, force_operand (XEXP (op1
, 0), NULL_RTX
));
4298 op0
= change_address (op0
, VOIDmode
, addr0
);
4299 op1
= change_address (op1
, VOIDmode
, addr1
);
4301 temp
= expand_binop (mode
, add_optab
, count
, constm1_rtx
, count
, 1,
4304 emit_move_insn (count
, temp
);
4306 temp
= expand_binop (mode
, lshr_optab
, count
, GEN_INT (8), blocks
, 1,
4309 emit_move_insn (blocks
, temp
);
4311 emit_cmp_and_jump_insns (blocks
, const0_rtx
,
4312 EQ
, NULL_RTX
, mode
, 1, loop_end_label
);
4314 emit_label (loop_start_label
);
4317 && (GET_CODE (len
) != CONST_INT
|| INTVAL (len
) > 512))
4321 /* Issue a read prefetch for the +2 cache line of operand 1. */
4322 prefetch
= gen_prefetch (gen_rtx_PLUS (Pmode
, addr0
, GEN_INT (512)),
4323 const0_rtx
, const0_rtx
);
4324 emit_insn (prefetch
);
4325 PREFETCH_SCHEDULE_BARRIER_P (prefetch
) = true;
4327 /* Issue a read prefetch for the +2 cache line of operand 2. */
4328 prefetch
= gen_prefetch (gen_rtx_PLUS (Pmode
, addr1
, GEN_INT (512)),
4329 const0_rtx
, const0_rtx
);
4330 emit_insn (prefetch
);
4331 PREFETCH_SCHEDULE_BARRIER_P (prefetch
) = true;
4334 emit_insn (gen_cmpmem_short (op0
, op1
, GEN_INT (255)));
4335 temp
= gen_rtx_NE (VOIDmode
, ccreg
, const0_rtx
);
4336 temp
= gen_rtx_IF_THEN_ELSE (VOIDmode
, temp
,
4337 gen_rtx_LABEL_REF (VOIDmode
, end_label
), pc_rtx
);
4338 temp
= gen_rtx_SET (VOIDmode
, pc_rtx
, temp
);
4339 emit_jump_insn (temp
);
4341 s390_load_address (addr0
,
4342 gen_rtx_PLUS (Pmode
, addr0
, GEN_INT (256)));
4343 s390_load_address (addr1
,
4344 gen_rtx_PLUS (Pmode
, addr1
, GEN_INT (256)));
4346 temp
= expand_binop (mode
, add_optab
, blocks
, constm1_rtx
, blocks
, 1,
4349 emit_move_insn (blocks
, temp
);
4351 emit_cmp_and_jump_insns (blocks
, const0_rtx
,
4352 EQ
, NULL_RTX
, mode
, 1, loop_end_label
);
4354 emit_jump (loop_start_label
);
4355 emit_label (loop_end_label
);
4357 emit_insn (gen_cmpmem_short (op0
, op1
,
4358 convert_to_mode (Pmode
, count
, 1)));
4359 emit_label (end_label
);
4361 emit_insn (gen_cmpint (target
, ccreg
));
4366 /* Expand conditional increment or decrement using alc/slb instructions.
4367 Should generate code setting DST to either SRC or SRC + INCREMENT,
4368 depending on the result of the comparison CMP_OP0 CMP_CODE CMP_OP1.
4369 Returns true if successful, false otherwise.
4371 That makes it possible to implement some if-constructs without jumps e.g.:
4372 (borrow = CC0 | CC1 and carry = CC2 | CC3)
4373 unsigned int a, b, c;
4374 if (a < b) c++; -> CCU b > a -> CC2; c += carry;
4375 if (a < b) c--; -> CCL3 a - b -> borrow; c -= borrow;
4376 if (a <= b) c++; -> CCL3 b - a -> borrow; c += carry;
4377 if (a <= b) c--; -> CCU a <= b -> borrow; c -= borrow;
4379 Checks for EQ and NE with a nonzero value need an additional xor e.g.:
4380 if (a == b) c++; -> CCL3 a ^= b; 0 - a -> borrow; c += carry;
4381 if (a == b) c--; -> CCU a ^= b; a <= 0 -> CC0 | CC1; c -= borrow;
4382 if (a != b) c++; -> CCU a ^= b; a > 0 -> CC2; c += carry;
4383 if (a != b) c--; -> CCL3 a ^= b; 0 - a -> borrow; c -= borrow; */
4386 s390_expand_addcc (enum rtx_code cmp_code
, rtx cmp_op0
, rtx cmp_op1
,
4387 rtx dst
, rtx src
, rtx increment
)
4389 enum machine_mode cmp_mode
;
4390 enum machine_mode cc_mode
;
4396 if ((GET_MODE (cmp_op0
) == SImode
|| GET_MODE (cmp_op0
) == VOIDmode
)
4397 && (GET_MODE (cmp_op1
) == SImode
|| GET_MODE (cmp_op1
) == VOIDmode
))
4399 else if ((GET_MODE (cmp_op0
) == DImode
|| GET_MODE (cmp_op0
) == VOIDmode
)
4400 && (GET_MODE (cmp_op1
) == DImode
|| GET_MODE (cmp_op1
) == VOIDmode
))
4405 /* Try ADD LOGICAL WITH CARRY. */
4406 if (increment
== const1_rtx
)
4408 /* Determine CC mode to use. */
4409 if (cmp_code
== EQ
|| cmp_code
== NE
)
4411 if (cmp_op1
!= const0_rtx
)
4413 cmp_op0
= expand_simple_binop (cmp_mode
, XOR
, cmp_op0
, cmp_op1
,
4414 NULL_RTX
, 0, OPTAB_WIDEN
);
4415 cmp_op1
= const0_rtx
;
4418 cmp_code
= cmp_code
== EQ
? LEU
: GTU
;
4421 if (cmp_code
== LTU
|| cmp_code
== LEU
)
4426 cmp_code
= swap_condition (cmp_code
);
4443 /* Emit comparison instruction pattern. */
4444 if (!register_operand (cmp_op0
, cmp_mode
))
4445 cmp_op0
= force_reg (cmp_mode
, cmp_op0
);
4447 insn
= gen_rtx_SET (VOIDmode
, gen_rtx_REG (cc_mode
, CC_REGNUM
),
4448 gen_rtx_COMPARE (cc_mode
, cmp_op0
, cmp_op1
));
4449 /* We use insn_invalid_p here to add clobbers if required. */
4450 ret
= insn_invalid_p (emit_insn (insn
));
4453 /* Emit ALC instruction pattern. */
4454 op_res
= gen_rtx_fmt_ee (cmp_code
, GET_MODE (dst
),
4455 gen_rtx_REG (cc_mode
, CC_REGNUM
),
4458 if (src
!= const0_rtx
)
4460 if (!register_operand (src
, GET_MODE (dst
)))
4461 src
= force_reg (GET_MODE (dst
), src
);
4463 op_res
= gen_rtx_PLUS (GET_MODE (dst
), op_res
, src
);
4464 op_res
= gen_rtx_PLUS (GET_MODE (dst
), op_res
, const0_rtx
);
4467 p
= rtvec_alloc (2);
4469 gen_rtx_SET (VOIDmode
, dst
, op_res
);
4471 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (CCmode
, CC_REGNUM
));
4472 emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
4477 /* Try SUBTRACT LOGICAL WITH BORROW. */
4478 if (increment
== constm1_rtx
)
4480 /* Determine CC mode to use. */
4481 if (cmp_code
== EQ
|| cmp_code
== NE
)
4483 if (cmp_op1
!= const0_rtx
)
4485 cmp_op0
= expand_simple_binop (cmp_mode
, XOR
, cmp_op0
, cmp_op1
,
4486 NULL_RTX
, 0, OPTAB_WIDEN
);
4487 cmp_op1
= const0_rtx
;
4490 cmp_code
= cmp_code
== EQ
? LEU
: GTU
;
4493 if (cmp_code
== GTU
|| cmp_code
== GEU
)
4498 cmp_code
= swap_condition (cmp_code
);
4515 /* Emit comparison instruction pattern. */
4516 if (!register_operand (cmp_op0
, cmp_mode
))
4517 cmp_op0
= force_reg (cmp_mode
, cmp_op0
);
4519 insn
= gen_rtx_SET (VOIDmode
, gen_rtx_REG (cc_mode
, CC_REGNUM
),
4520 gen_rtx_COMPARE (cc_mode
, cmp_op0
, cmp_op1
));
4521 /* We use insn_invalid_p here to add clobbers if required. */
4522 ret
= insn_invalid_p (emit_insn (insn
));
4525 /* Emit SLB instruction pattern. */
4526 if (!register_operand (src
, GET_MODE (dst
)))
4527 src
= force_reg (GET_MODE (dst
), src
);
4529 op_res
= gen_rtx_MINUS (GET_MODE (dst
),
4530 gen_rtx_MINUS (GET_MODE (dst
), src
, const0_rtx
),
4531 gen_rtx_fmt_ee (cmp_code
, GET_MODE (dst
),
4532 gen_rtx_REG (cc_mode
, CC_REGNUM
),
4534 p
= rtvec_alloc (2);
4536 gen_rtx_SET (VOIDmode
, dst
, op_res
);
4538 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (CCmode
, CC_REGNUM
));
4539 emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
4547 /* Expand code for the insv template. Return true if successful. */
4550 s390_expand_insv (rtx dest
, rtx op1
, rtx op2
, rtx src
)
4552 int bitsize
= INTVAL (op1
);
4553 int bitpos
= INTVAL (op2
);
4555 /* On z10 we can use the risbg instruction to implement insv. */
4557 && ((GET_MODE (dest
) == DImode
&& GET_MODE (src
) == DImode
)
4558 || (GET_MODE (dest
) == SImode
&& GET_MODE (src
) == SImode
)))
4563 op
= gen_rtx_SET (GET_MODE(src
),
4564 gen_rtx_ZERO_EXTRACT (GET_MODE (dest
), dest
, op1
, op2
),
4566 clobber
= gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (CCmode
, CC_REGNUM
));
4567 emit_insn (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, op
, clobber
)));
4572 /* We need byte alignment. */
4573 if (bitsize
% BITS_PER_UNIT
)
4577 && memory_operand (dest
, VOIDmode
)
4578 && (register_operand (src
, word_mode
)
4579 || const_int_operand (src
, VOIDmode
)))
4581 /* Emit standard pattern if possible. */
4582 enum machine_mode mode
= smallest_mode_for_size (bitsize
, MODE_INT
);
4583 if (GET_MODE_BITSIZE (mode
) == bitsize
)
4584 emit_move_insn (adjust_address (dest
, mode
, 0), gen_lowpart (mode
, src
));
4586 /* (set (ze (mem)) (const_int)). */
4587 else if (const_int_operand (src
, VOIDmode
))
4589 int size
= bitsize
/ BITS_PER_UNIT
;
4590 rtx src_mem
= adjust_address (force_const_mem (word_mode
, src
), BLKmode
,
4591 GET_MODE_SIZE (word_mode
) - size
);
4593 dest
= adjust_address (dest
, BLKmode
, 0);
4594 set_mem_size (dest
, GEN_INT (size
));
4595 s390_expand_movmem (dest
, src_mem
, GEN_INT (size
));
4598 /* (set (ze (mem)) (reg)). */
4599 else if (register_operand (src
, word_mode
))
4601 if (bitsize
<= GET_MODE_BITSIZE (SImode
))
4602 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode
, dest
, op1
,
4606 /* Emit st,stcmh sequence. */
4607 int stcmh_width
= bitsize
- GET_MODE_BITSIZE (SImode
);
4608 int size
= stcmh_width
/ BITS_PER_UNIT
;
4610 emit_move_insn (adjust_address (dest
, SImode
, size
),
4611 gen_lowpart (SImode
, src
));
4612 set_mem_size (dest
, GEN_INT (size
));
4613 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode
, dest
, GEN_INT
4614 (stcmh_width
), const0_rtx
),
4615 gen_rtx_LSHIFTRT (word_mode
, src
, GEN_INT
4616 (GET_MODE_BITSIZE (SImode
))));
4625 /* (set (ze (reg)) (const_int)). */
4627 && register_operand (dest
, word_mode
)
4628 && (bitpos
% 16) == 0
4629 && (bitsize
% 16) == 0
4630 && const_int_operand (src
, VOIDmode
))
4632 HOST_WIDE_INT val
= INTVAL (src
);
4633 int regpos
= bitpos
+ bitsize
;
4635 while (regpos
> bitpos
)
4637 enum machine_mode putmode
;
4640 if (TARGET_EXTIMM
&& (regpos
% 32 == 0) && (regpos
>= bitpos
+ 32))
4645 putsize
= GET_MODE_BITSIZE (putmode
);
4647 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode
, dest
,
4650 gen_int_mode (val
, putmode
));
4653 gcc_assert (regpos
== bitpos
);
4660 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic which returns a
4661 register that holds VAL of mode MODE shifted by COUNT bits. */
4664 s390_expand_mask_and_shift (rtx val
, enum machine_mode mode
, rtx count
)
4666 val
= expand_simple_binop (SImode
, AND
, val
, GEN_INT (GET_MODE_MASK (mode
)),
4667 NULL_RTX
, 1, OPTAB_DIRECT
);
4668 return expand_simple_binop (SImode
, ASHIFT
, val
, count
,
4669 NULL_RTX
, 1, OPTAB_DIRECT
);
4672 /* Structure to hold the initial parameters for a compare_and_swap operation
4673 in HImode and QImode. */
4675 struct alignment_context
4677 rtx memsi
; /* SI aligned memory location. */
4678 rtx shift
; /* Bit offset with regard to lsb. */
4679 rtx modemask
; /* Mask of the HQImode shifted by SHIFT bits. */
4680 rtx modemaski
; /* ~modemask */
4681 bool aligned
; /* True if memory is aligned, false else. */
4684 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic to initialize
4685 structure AC for transparent simplifying, if the memory alignment is known
4686 to be at least 32bit. MEM is the memory location for the actual operation
4687 and MODE its mode. */
4690 init_alignment_context (struct alignment_context
*ac
, rtx mem
,
4691 enum machine_mode mode
)
4693 ac
->shift
= GEN_INT (GET_MODE_SIZE (SImode
) - GET_MODE_SIZE (mode
));
4694 ac
->aligned
= (MEM_ALIGN (mem
) >= GET_MODE_BITSIZE (SImode
));
4697 ac
->memsi
= adjust_address (mem
, SImode
, 0); /* Memory is aligned. */
4700 /* Alignment is unknown. */
4701 rtx byteoffset
, addr
, align
;
4703 /* Force the address into a register. */
4704 addr
= force_reg (Pmode
, XEXP (mem
, 0));
4706 /* Align it to SImode. */
4707 align
= expand_simple_binop (Pmode
, AND
, addr
,
4708 GEN_INT (-GET_MODE_SIZE (SImode
)),
4709 NULL_RTX
, 1, OPTAB_DIRECT
);
4711 ac
->memsi
= gen_rtx_MEM (SImode
, align
);
4712 MEM_VOLATILE_P (ac
->memsi
) = MEM_VOLATILE_P (mem
);
4713 set_mem_alias_set (ac
->memsi
, ALIAS_SET_MEMORY_BARRIER
);
4714 set_mem_align (ac
->memsi
, GET_MODE_BITSIZE (SImode
));
4716 /* Calculate shiftcount. */
4717 byteoffset
= expand_simple_binop (Pmode
, AND
, addr
,
4718 GEN_INT (GET_MODE_SIZE (SImode
) - 1),
4719 NULL_RTX
, 1, OPTAB_DIRECT
);
4720 /* As we already have some offset, evaluate the remaining distance. */
4721 ac
->shift
= expand_simple_binop (SImode
, MINUS
, ac
->shift
, byteoffset
,
4722 NULL_RTX
, 1, OPTAB_DIRECT
);
4725 /* Shift is the byte count, but we need the bitcount. */
4726 ac
->shift
= expand_simple_binop (SImode
, MULT
, ac
->shift
, GEN_INT (BITS_PER_UNIT
),
4727 NULL_RTX
, 1, OPTAB_DIRECT
);
4728 /* Calculate masks. */
4729 ac
->modemask
= expand_simple_binop (SImode
, ASHIFT
,
4730 GEN_INT (GET_MODE_MASK (mode
)), ac
->shift
,
4731 NULL_RTX
, 1, OPTAB_DIRECT
);
4732 ac
->modemaski
= expand_simple_unop (SImode
, NOT
, ac
->modemask
, NULL_RTX
, 1);
4735 /* Expand an atomic compare and swap operation for HImode and QImode. MEM is
4736 the memory location, CMP the old value to compare MEM with and NEW_RTX the value
4737 to set if CMP == MEM.
4738 CMP is never in memory for compare_and_swap_cc because
4739 expand_bool_compare_and_swap puts it into a register for later compare. */
4742 s390_expand_cs_hqi (enum machine_mode mode
, rtx target
, rtx mem
, rtx cmp
, rtx new_rtx
)
4744 struct alignment_context ac
;
4745 rtx cmpv
, newv
, val
, resv
, cc
;
4746 rtx res
= gen_reg_rtx (SImode
);
4747 rtx csloop
= gen_label_rtx ();
4748 rtx csend
= gen_label_rtx ();
4750 gcc_assert (register_operand (target
, VOIDmode
));
4751 gcc_assert (MEM_P (mem
));
4753 init_alignment_context (&ac
, mem
, mode
);
4755 /* Shift the values to the correct bit positions. */
4756 if (!(ac
.aligned
&& MEM_P (cmp
)))
4757 cmp
= s390_expand_mask_and_shift (cmp
, mode
, ac
.shift
);
4758 if (!(ac
.aligned
&& MEM_P (new_rtx
)))
4759 new_rtx
= s390_expand_mask_and_shift (new_rtx
, mode
, ac
.shift
);
4761 /* Load full word. Subsequent loads are performed by CS. */
4762 val
= expand_simple_binop (SImode
, AND
, ac
.memsi
, ac
.modemaski
,
4763 NULL_RTX
, 1, OPTAB_DIRECT
);
4765 /* Start CS loop. */
4766 emit_label (csloop
);
4767 /* val = "<mem>00..0<mem>"
4768 * cmp = "00..0<cmp>00..0"
4769 * new = "00..0<new>00..0"
4772 /* Patch cmp and new with val at correct position. */
4773 if (ac
.aligned
&& MEM_P (cmp
))
4775 cmpv
= force_reg (SImode
, val
);
4776 store_bit_field (cmpv
, GET_MODE_BITSIZE (mode
), 0, SImode
, cmp
);
4779 cmpv
= force_reg (SImode
, expand_simple_binop (SImode
, IOR
, cmp
, val
,
4780 NULL_RTX
, 1, OPTAB_DIRECT
));
4781 if (ac
.aligned
&& MEM_P (new_rtx
))
4783 newv
= force_reg (SImode
, val
);
4784 store_bit_field (newv
, GET_MODE_BITSIZE (mode
), 0, SImode
, new_rtx
);
4787 newv
= force_reg (SImode
, expand_simple_binop (SImode
, IOR
, new_rtx
, val
,
4788 NULL_RTX
, 1, OPTAB_DIRECT
));
4790 /* Jump to end if we're done (likely?). */
4791 s390_emit_jump (csend
, s390_emit_compare_and_swap (EQ
, res
, ac
.memsi
,
4794 /* Check for changes outside mode. */
4795 resv
= expand_simple_binop (SImode
, AND
, res
, ac
.modemaski
,
4796 NULL_RTX
, 1, OPTAB_DIRECT
);
4797 cc
= s390_emit_compare (NE
, resv
, val
);
4798 emit_move_insn (val
, resv
);
4799 /* Loop internal if so. */
4800 s390_emit_jump (csloop
, cc
);
4804 /* Return the correct part of the bitfield. */
4805 convert_move (target
, expand_simple_binop (SImode
, LSHIFTRT
, res
, ac
.shift
,
4806 NULL_RTX
, 1, OPTAB_DIRECT
), 1);
4809 /* Expand an atomic operation CODE of mode MODE. MEM is the memory location
4810 and VAL the value to play with. If AFTER is true then store the value
4811 MEM holds after the operation, if AFTER is false then store the value MEM
4812 holds before the operation. If TARGET is zero then discard that value, else
4813 store it to TARGET. */
4816 s390_expand_atomic (enum machine_mode mode
, enum rtx_code code
,
4817 rtx target
, rtx mem
, rtx val
, bool after
)
4819 struct alignment_context ac
;
4821 rtx new_rtx
= gen_reg_rtx (SImode
);
4822 rtx orig
= gen_reg_rtx (SImode
);
4823 rtx csloop
= gen_label_rtx ();
4825 gcc_assert (!target
|| register_operand (target
, VOIDmode
));
4826 gcc_assert (MEM_P (mem
));
4828 init_alignment_context (&ac
, mem
, mode
);
4830 /* Shift val to the correct bit positions.
4831 Preserve "icm", but prevent "ex icm". */
4832 if (!(ac
.aligned
&& code
== SET
&& MEM_P (val
)))
4833 val
= s390_expand_mask_and_shift (val
, mode
, ac
.shift
);
4835 /* Further preparation insns. */
4836 if (code
== PLUS
|| code
== MINUS
)
4837 emit_move_insn (orig
, val
);
4838 else if (code
== MULT
|| code
== AND
) /* val = "11..1<val>11..1" */
4839 val
= expand_simple_binop (SImode
, XOR
, val
, ac
.modemaski
,
4840 NULL_RTX
, 1, OPTAB_DIRECT
);
4842 /* Load full word. Subsequent loads are performed by CS. */
4843 cmp
= force_reg (SImode
, ac
.memsi
);
4845 /* Start CS loop. */
4846 emit_label (csloop
);
4847 emit_move_insn (new_rtx
, cmp
);
4849 /* Patch new with val at correct position. */
4854 val
= expand_simple_binop (SImode
, code
, new_rtx
, orig
,
4855 NULL_RTX
, 1, OPTAB_DIRECT
);
4856 val
= expand_simple_binop (SImode
, AND
, val
, ac
.modemask
,
4857 NULL_RTX
, 1, OPTAB_DIRECT
);
4860 if (ac
.aligned
&& MEM_P (val
))
4861 store_bit_field (new_rtx
, GET_MODE_BITSIZE (mode
), 0, SImode
, val
);
4864 new_rtx
= expand_simple_binop (SImode
, AND
, new_rtx
, ac
.modemaski
,
4865 NULL_RTX
, 1, OPTAB_DIRECT
);
4866 new_rtx
= expand_simple_binop (SImode
, IOR
, new_rtx
, val
,
4867 NULL_RTX
, 1, OPTAB_DIRECT
);
4873 new_rtx
= expand_simple_binop (SImode
, code
, new_rtx
, val
,
4874 NULL_RTX
, 1, OPTAB_DIRECT
);
4876 case MULT
: /* NAND */
4877 new_rtx
= expand_simple_binop (SImode
, AND
, new_rtx
, val
,
4878 NULL_RTX
, 1, OPTAB_DIRECT
);
4879 new_rtx
= expand_simple_binop (SImode
, XOR
, new_rtx
, ac
.modemask
,
4880 NULL_RTX
, 1, OPTAB_DIRECT
);
4886 s390_emit_jump (csloop
, s390_emit_compare_and_swap (NE
, cmp
,
4887 ac
.memsi
, cmp
, new_rtx
));
4889 /* Return the correct part of the bitfield. */
4891 convert_move (target
, expand_simple_binop (SImode
, LSHIFTRT
,
4892 after
? new_rtx
: cmp
, ac
.shift
,
4893 NULL_RTX
, 1, OPTAB_DIRECT
), 1);
4896 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
4897 We need to emit DTP-relative relocations. */
4899 static void s390_output_dwarf_dtprel (FILE *, int, rtx
) ATTRIBUTE_UNUSED
;
4902 s390_output_dwarf_dtprel (FILE *file
, int size
, rtx x
)
4907 fputs ("\t.long\t", file
);
4910 fputs ("\t.quad\t", file
);
4915 output_addr_const (file
, x
);
4916 fputs ("@DTPOFF", file
);
4919 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
4920 /* Implement TARGET_MANGLE_TYPE. */
4923 s390_mangle_type (const_tree type
)
4925 if (TYPE_MAIN_VARIANT (type
) == long_double_type_node
4926 && TARGET_LONG_DOUBLE_128
)
4929 /* For all other types, use normal C++ mangling. */
4934 /* In the name of slightly smaller debug output, and to cater to
4935 general assembler lossage, recognize various UNSPEC sequences
4936 and turn them back into a direct symbol reference. */
4939 s390_delegitimize_address (rtx orig_x
)
4943 orig_x
= delegitimize_mem_from_attrs (orig_x
);
4945 if (GET_CODE (x
) != MEM
)
4949 if (GET_CODE (x
) == PLUS
4950 && GET_CODE (XEXP (x
, 1)) == CONST
4951 && GET_CODE (XEXP (x
, 0)) == REG
4952 && REGNO (XEXP (x
, 0)) == PIC_OFFSET_TABLE_REGNUM
)
4954 y
= XEXP (XEXP (x
, 1), 0);
4955 if (GET_CODE (y
) == UNSPEC
4956 && XINT (y
, 1) == UNSPEC_GOT
)
4957 return XVECEXP (y
, 0, 0);
4961 if (GET_CODE (x
) == CONST
)
4964 if (GET_CODE (y
) == UNSPEC
4965 && XINT (y
, 1) == UNSPEC_GOTENT
)
4966 return XVECEXP (y
, 0, 0);
4973 /* Output operand OP to stdio stream FILE.
4974 OP is an address (register + offset) which is not used to address data;
4975 instead the rightmost bits are interpreted as the value. */
4978 print_shift_count_operand (FILE *file
, rtx op
)
4980 HOST_WIDE_INT offset
;
4983 /* Extract base register and offset. */
4984 if (!s390_decompose_shift_count (op
, &base
, &offset
))
4990 gcc_assert (GET_CODE (base
) == REG
);
4991 gcc_assert (REGNO (base
) < FIRST_PSEUDO_REGISTER
);
4992 gcc_assert (REGNO_REG_CLASS (REGNO (base
)) == ADDR_REGS
);
4995 /* Offsets are constricted to twelve bits. */
4996 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, offset
& ((1 << 12) - 1));
4998 fprintf (file
, "(%s)", reg_names
[REGNO (base
)]);
5001 /* See 'get_some_local_dynamic_name'. */
5004 get_some_local_dynamic_name_1 (rtx
*px
, void *data ATTRIBUTE_UNUSED
)
5008 if (GET_CODE (x
) == SYMBOL_REF
&& CONSTANT_POOL_ADDRESS_P (x
))
5010 x
= get_pool_constant (x
);
5011 return for_each_rtx (&x
, get_some_local_dynamic_name_1
, 0);
5014 if (GET_CODE (x
) == SYMBOL_REF
5015 && tls_symbolic_operand (x
) == TLS_MODEL_LOCAL_DYNAMIC
)
5017 cfun
->machine
->some_ld_name
= XSTR (x
, 0);
5024 /* Locate some local-dynamic symbol still in use by this function
5025 so that we can print its name in local-dynamic base patterns. */
5028 get_some_local_dynamic_name (void)
5032 if (cfun
->machine
->some_ld_name
)
5033 return cfun
->machine
->some_ld_name
;
5035 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
5037 && for_each_rtx (&PATTERN (insn
), get_some_local_dynamic_name_1
, 0))
5038 return cfun
->machine
->some_ld_name
;
5043 /* Output machine-dependent UNSPECs occurring in address constant X
5044 in assembler syntax to stdio stream FILE. Returns true if the
5045 constant X could be recognized, false otherwise. */
5048 s390_output_addr_const_extra (FILE *file
, rtx x
)
5050 if (GET_CODE (x
) == UNSPEC
&& XVECLEN (x
, 0) == 1)
5051 switch (XINT (x
, 1))
5054 output_addr_const (file
, XVECEXP (x
, 0, 0));
5055 fprintf (file
, "@GOTENT");
5058 output_addr_const (file
, XVECEXP (x
, 0, 0));
5059 fprintf (file
, "@GOT");
5062 output_addr_const (file
, XVECEXP (x
, 0, 0));
5063 fprintf (file
, "@GOTOFF");
5066 output_addr_const (file
, XVECEXP (x
, 0, 0));
5067 fprintf (file
, "@PLT");
5070 output_addr_const (file
, XVECEXP (x
, 0, 0));
5071 fprintf (file
, "@PLTOFF");
5074 output_addr_const (file
, XVECEXP (x
, 0, 0));
5075 fprintf (file
, "@TLSGD");
5078 assemble_name (file
, get_some_local_dynamic_name ());
5079 fprintf (file
, "@TLSLDM");
5082 output_addr_const (file
, XVECEXP (x
, 0, 0));
5083 fprintf (file
, "@DTPOFF");
5086 output_addr_const (file
, XVECEXP (x
, 0, 0));
5087 fprintf (file
, "@NTPOFF");
5089 case UNSPEC_GOTNTPOFF
:
5090 output_addr_const (file
, XVECEXP (x
, 0, 0));
5091 fprintf (file
, "@GOTNTPOFF");
5093 case UNSPEC_INDNTPOFF
:
5094 output_addr_const (file
, XVECEXP (x
, 0, 0));
5095 fprintf (file
, "@INDNTPOFF");
5099 if (GET_CODE (x
) == UNSPEC
&& XVECLEN (x
, 0) == 2)
5100 switch (XINT (x
, 1))
5102 case UNSPEC_POOL_OFFSET
:
5103 x
= gen_rtx_MINUS (GET_MODE (x
), XVECEXP (x
, 0, 0), XVECEXP (x
, 0, 1));
5104 output_addr_const (file
, x
);
5110 /* Output address operand ADDR in assembler syntax to
5111 stdio stream FILE. */
5114 print_operand_address (FILE *file
, rtx addr
)
5116 struct s390_address ad
;
5118 if (s390_symref_operand_p (addr
, NULL
, NULL
))
5120 gcc_assert (TARGET_Z10
);
5121 output_addr_const (file
, addr
);
5125 if (!s390_decompose_address (addr
, &ad
)
5126 || (ad
.base
&& !REGNO_OK_FOR_BASE_P (REGNO (ad
.base
)))
5127 || (ad
.indx
&& !REGNO_OK_FOR_INDEX_P (REGNO (ad
.indx
))))
5128 output_operand_lossage ("cannot decompose address");
5131 output_addr_const (file
, ad
.disp
);
5133 fprintf (file
, "0");
5135 if (ad
.base
&& ad
.indx
)
5136 fprintf (file
, "(%s,%s)", reg_names
[REGNO (ad
.indx
)],
5137 reg_names
[REGNO (ad
.base
)]);
5139 fprintf (file
, "(%s)", reg_names
[REGNO (ad
.base
)]);
5142 /* Output operand X in assembler syntax to stdio stream FILE.
5143 CODE specified the format flag. The following format flags
5146 'C': print opcode suffix for branch condition.
5147 'D': print opcode suffix for inverse branch condition.
5148 'E': print opcode suffix for branch on index instruction.
5149 'J': print tls_load/tls_gdcall/tls_ldcall suffix
5150 'G': print the size of the operand in bytes.
5151 'O': print only the displacement of a memory reference.
5152 'R': print only the base register of a memory reference.
5153 'S': print S-type memory reference (base+displacement).
5154 'N': print the second word of a DImode operand.
5155 'M': print the second word of a TImode operand.
5156 'Y': print shift count operand.
5158 'b': print integer X as if it's an unsigned byte.
5159 'c': print integer X as if it's an signed byte.
5160 'x': print integer X as if it's an unsigned halfword.
5161 'h': print integer X as if it's a signed halfword.
5162 'i': print the first nonzero HImode part of X.
5163 'j': print the first HImode part unequal to -1 of X.
5164 'k': print the first nonzero SImode part of X.
5165 'm': print the first SImode part unequal to -1 of X.
5166 'o': print integer X as if it's an unsigned 32bit word. */
5169 print_operand (FILE *file
, rtx x
, int code
)
5174 fprintf (file
, s390_branch_condition_mnemonic (x
, FALSE
));
5178 fprintf (file
, s390_branch_condition_mnemonic (x
, TRUE
));
5182 if (GET_CODE (x
) == LE
)
5183 fprintf (file
, "l");
5184 else if (GET_CODE (x
) == GT
)
5185 fprintf (file
, "h");
5191 if (GET_CODE (x
) == SYMBOL_REF
)
5193 fprintf (file
, "%s", ":tls_load:");
5194 output_addr_const (file
, x
);
5196 else if (GET_CODE (x
) == UNSPEC
&& XINT (x
, 1) == UNSPEC_TLSGD
)
5198 fprintf (file
, "%s", ":tls_gdcall:");
5199 output_addr_const (file
, XVECEXP (x
, 0, 0));
5201 else if (GET_CODE (x
) == UNSPEC
&& XINT (x
, 1) == UNSPEC_TLSLDM
)
5203 fprintf (file
, "%s", ":tls_ldcall:");
5204 assemble_name (file
, get_some_local_dynamic_name ());
5211 fprintf (file
, "%u", GET_MODE_SIZE (GET_MODE (x
)));
5216 struct s390_address ad
;
5219 gcc_assert (GET_CODE (x
) == MEM
);
5220 ret
= s390_decompose_address (XEXP (x
, 0), &ad
);
5222 gcc_assert (!ad
.base
|| REGNO_OK_FOR_BASE_P (REGNO (ad
.base
)));
5223 gcc_assert (!ad
.indx
);
5226 output_addr_const (file
, ad
.disp
);
5228 fprintf (file
, "0");
5234 struct s390_address ad
;
5237 gcc_assert (GET_CODE (x
) == MEM
);
5238 ret
= s390_decompose_address (XEXP (x
, 0), &ad
);
5240 gcc_assert (!ad
.base
|| REGNO_OK_FOR_BASE_P (REGNO (ad
.base
)));
5241 gcc_assert (!ad
.indx
);
5244 fprintf (file
, "%s", reg_names
[REGNO (ad
.base
)]);
5246 fprintf (file
, "0");
5252 struct s390_address ad
;
5255 gcc_assert (GET_CODE (x
) == MEM
);
5256 ret
= s390_decompose_address (XEXP (x
, 0), &ad
);
5258 gcc_assert (!ad
.base
|| REGNO_OK_FOR_BASE_P (REGNO (ad
.base
)));
5259 gcc_assert (!ad
.indx
);
5262 output_addr_const (file
, ad
.disp
);
5264 fprintf (file
, "0");
5267 fprintf (file
, "(%s)", reg_names
[REGNO (ad
.base
)]);
5272 if (GET_CODE (x
) == REG
)
5273 x
= gen_rtx_REG (GET_MODE (x
), REGNO (x
) + 1);
5274 else if (GET_CODE (x
) == MEM
)
5275 x
= change_address (x
, VOIDmode
, plus_constant (XEXP (x
, 0), 4));
5281 if (GET_CODE (x
) == REG
)
5282 x
= gen_rtx_REG (GET_MODE (x
), REGNO (x
) + 1);
5283 else if (GET_CODE (x
) == MEM
)
5284 x
= change_address (x
, VOIDmode
, plus_constant (XEXP (x
, 0), 8));
5290 print_shift_count_operand (file
, x
);
5294 switch (GET_CODE (x
))
5297 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
5301 output_address (XEXP (x
, 0));
5308 output_addr_const (file
, x
);
5313 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (x
) & 0xff);
5314 else if (code
== 'c')
5315 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, ((INTVAL (x
) & 0xff) ^ 0x80) - 0x80);
5316 else if (code
== 'x')
5317 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (x
) & 0xffff);
5318 else if (code
== 'h')
5319 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, ((INTVAL (x
) & 0xffff) ^ 0x8000) - 0x8000);
5320 else if (code
== 'i')
5321 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
,
5322 s390_extract_part (x
, HImode
, 0));
5323 else if (code
== 'j')
5324 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
,
5325 s390_extract_part (x
, HImode
, -1));
5326 else if (code
== 'k')
5327 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
,
5328 s390_extract_part (x
, SImode
, 0));
5329 else if (code
== 'm')
5330 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
,
5331 s390_extract_part (x
, SImode
, -1));
5332 else if (code
== 'o')
5333 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (x
) & 0xffffffff);
5335 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (x
));
5339 gcc_assert (GET_MODE (x
) == VOIDmode
);
5341 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, CONST_DOUBLE_LOW (x
) & 0xff);
5342 else if (code
== 'x')
5343 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, CONST_DOUBLE_LOW (x
) & 0xffff);
5344 else if (code
== 'h')
5345 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, ((CONST_DOUBLE_LOW (x
) & 0xffff) ^ 0x8000) - 0x8000);
5351 fatal_insn ("UNKNOWN in print_operand !?", x
);
5356 /* Target hook for assembling integer objects. We need to define it
5357 here to work a round a bug in some versions of GAS, which couldn't
5358 handle values smaller than INT_MIN when printed in decimal. */
5361 s390_assemble_integer (rtx x
, unsigned int size
, int aligned_p
)
5363 if (size
== 8 && aligned_p
5364 && GET_CODE (x
) == CONST_INT
&& INTVAL (x
) < INT_MIN
)
5366 fprintf (asm_out_file
, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX
"\n",
5370 return default_assemble_integer (x
, size
, aligned_p
);
5373 /* Returns true if register REGNO is used for forming
5374 a memory address in expression X. */
5377 reg_used_in_mem_p (int regno
, rtx x
)
5379 enum rtx_code code
= GET_CODE (x
);
5385 if (refers_to_regno_p (regno
, regno
+1,
5389 else if (code
== SET
5390 && GET_CODE (SET_DEST (x
)) == PC
)
5392 if (refers_to_regno_p (regno
, regno
+1,
5397 fmt
= GET_RTX_FORMAT (code
);
5398 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
5401 && reg_used_in_mem_p (regno
, XEXP (x
, i
)))
5404 else if (fmt
[i
] == 'E')
5405 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
5406 if (reg_used_in_mem_p (regno
, XVECEXP (x
, i
, j
)))
5412 /* Returns true if expression DEP_RTX sets an address register
5413 used by instruction INSN to address memory. */
5416 addr_generation_dependency_p (rtx dep_rtx
, rtx insn
)
5420 if (GET_CODE (dep_rtx
) == INSN
)
5421 dep_rtx
= PATTERN (dep_rtx
);
5423 if (GET_CODE (dep_rtx
) == SET
)
5425 target
= SET_DEST (dep_rtx
);
5426 if (GET_CODE (target
) == STRICT_LOW_PART
)
5427 target
= XEXP (target
, 0);
5428 while (GET_CODE (target
) == SUBREG
)
5429 target
= SUBREG_REG (target
);
5431 if (GET_CODE (target
) == REG
)
5433 int regno
= REGNO (target
);
5435 if (s390_safe_attr_type (insn
) == TYPE_LA
)
5437 pat
= PATTERN (insn
);
5438 if (GET_CODE (pat
) == PARALLEL
)
5440 gcc_assert (XVECLEN (pat
, 0) == 2);
5441 pat
= XVECEXP (pat
, 0, 0);
5443 gcc_assert (GET_CODE (pat
) == SET
);
5444 return refers_to_regno_p (regno
, regno
+1, SET_SRC (pat
), 0);
5446 else if (get_attr_atype (insn
) == ATYPE_AGEN
)
5447 return reg_used_in_mem_p (regno
, PATTERN (insn
));
5453 /* Return 1, if dep_insn sets register used in insn in the agen unit. */
5456 s390_agen_dep_p (rtx dep_insn
, rtx insn
)
5458 rtx dep_rtx
= PATTERN (dep_insn
);
5461 if (GET_CODE (dep_rtx
) == SET
5462 && addr_generation_dependency_p (dep_rtx
, insn
))
5464 else if (GET_CODE (dep_rtx
) == PARALLEL
)
5466 for (i
= 0; i
< XVECLEN (dep_rtx
, 0); i
++)
5468 if (addr_generation_dependency_p (XVECEXP (dep_rtx
, 0, i
), insn
))
5476 /* A C statement (sans semicolon) to update the integer scheduling priority
5477 INSN_PRIORITY (INSN). Increase the priority to execute the INSN earlier,
5478 reduce the priority to execute INSN later. Do not define this macro if
5479 you do not need to adjust the scheduling priorities of insns.
5481 A STD instruction should be scheduled earlier,
5482 in order to use the bypass. */
5484 s390_adjust_priority (rtx insn ATTRIBUTE_UNUSED
, int priority
)
5486 if (! INSN_P (insn
))
5489 if (s390_tune
!= PROCESSOR_2084_Z990
5490 && s390_tune
!= PROCESSOR_2094_Z9_109
5491 && s390_tune
!= PROCESSOR_2097_Z10
5492 && s390_tune
!= PROCESSOR_2817_Z196
)
5495 switch (s390_safe_attr_type (insn
))
5499 priority
= priority
<< 3;
5503 priority
= priority
<< 1;
5512 /* The number of instructions that can be issued per cycle. */
5515 s390_issue_rate (void)
5519 case PROCESSOR_2084_Z990
:
5520 case PROCESSOR_2094_Z9_109
:
5521 case PROCESSOR_2817_Z196
:
5523 case PROCESSOR_2097_Z10
:
5531 s390_first_cycle_multipass_dfa_lookahead (void)
5536 /* Annotate every literal pool reference in X by an UNSPEC_LTREF expression.
5537 Fix up MEMs as required. */
5540 annotate_constant_pool_refs (rtx
*x
)
5545 gcc_assert (GET_CODE (*x
) != SYMBOL_REF
5546 || !CONSTANT_POOL_ADDRESS_P (*x
));
5548 /* Literal pool references can only occur inside a MEM ... */
5549 if (GET_CODE (*x
) == MEM
)
5551 rtx memref
= XEXP (*x
, 0);
5553 if (GET_CODE (memref
) == SYMBOL_REF
5554 && CONSTANT_POOL_ADDRESS_P (memref
))
5556 rtx base
= cfun
->machine
->base_reg
;
5557 rtx addr
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (2, memref
, base
),
5560 *x
= replace_equiv_address (*x
, addr
);
5564 if (GET_CODE (memref
) == CONST
5565 && GET_CODE (XEXP (memref
, 0)) == PLUS
5566 && GET_CODE (XEXP (XEXP (memref
, 0), 1)) == CONST_INT
5567 && GET_CODE (XEXP (XEXP (memref
, 0), 0)) == SYMBOL_REF
5568 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (memref
, 0), 0)))
5570 HOST_WIDE_INT off
= INTVAL (XEXP (XEXP (memref
, 0), 1));
5571 rtx sym
= XEXP (XEXP (memref
, 0), 0);
5572 rtx base
= cfun
->machine
->base_reg
;
5573 rtx addr
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (2, sym
, base
),
5576 *x
= replace_equiv_address (*x
, plus_constant (addr
, off
));
5581 /* ... or a load-address type pattern. */
5582 if (GET_CODE (*x
) == SET
)
5584 rtx addrref
= SET_SRC (*x
);
5586 if (GET_CODE (addrref
) == SYMBOL_REF
5587 && CONSTANT_POOL_ADDRESS_P (addrref
))
5589 rtx base
= cfun
->machine
->base_reg
;
5590 rtx addr
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (2, addrref
, base
),
5593 SET_SRC (*x
) = addr
;
5597 if (GET_CODE (addrref
) == CONST
5598 && GET_CODE (XEXP (addrref
, 0)) == PLUS
5599 && GET_CODE (XEXP (XEXP (addrref
, 0), 1)) == CONST_INT
5600 && GET_CODE (XEXP (XEXP (addrref
, 0), 0)) == SYMBOL_REF
5601 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (addrref
, 0), 0)))
5603 HOST_WIDE_INT off
= INTVAL (XEXP (XEXP (addrref
, 0), 1));
5604 rtx sym
= XEXP (XEXP (addrref
, 0), 0);
5605 rtx base
= cfun
->machine
->base_reg
;
5606 rtx addr
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (2, sym
, base
),
5609 SET_SRC (*x
) = plus_constant (addr
, off
);
5614 /* Annotate LTREL_BASE as well. */
5615 if (GET_CODE (*x
) == UNSPEC
5616 && XINT (*x
, 1) == UNSPEC_LTREL_BASE
)
5618 rtx base
= cfun
->machine
->base_reg
;
5619 *x
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (2, XVECEXP (*x
, 0, 0), base
),
5624 fmt
= GET_RTX_FORMAT (GET_CODE (*x
));
5625 for (i
= GET_RTX_LENGTH (GET_CODE (*x
)) - 1; i
>= 0; i
--)
5629 annotate_constant_pool_refs (&XEXP (*x
, i
));
5631 else if (fmt
[i
] == 'E')
5633 for (j
= 0; j
< XVECLEN (*x
, i
); j
++)
5634 annotate_constant_pool_refs (&XVECEXP (*x
, i
, j
));
5639 /* Split all branches that exceed the maximum distance.
5640 Returns true if this created a new literal pool entry. */
5643 s390_split_branches (void)
5645 rtx temp_reg
= gen_rtx_REG (Pmode
, RETURN_REGNUM
);
5646 int new_literal
= 0, ret
;
5647 rtx insn
, pat
, tmp
, target
;
5650 /* We need correct insn addresses. */
5652 shorten_branches (get_insns ());
5654 /* Find all branches that exceed 64KB, and split them. */
5656 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
5658 if (GET_CODE (insn
) != JUMP_INSN
)
5661 pat
= PATTERN (insn
);
5662 if (GET_CODE (pat
) == PARALLEL
&& XVECLEN (pat
, 0) > 2)
5663 pat
= XVECEXP (pat
, 0, 0);
5664 if (GET_CODE (pat
) != SET
|| SET_DEST (pat
) != pc_rtx
)
5667 if (GET_CODE (SET_SRC (pat
)) == LABEL_REF
)
5669 label
= &SET_SRC (pat
);
5671 else if (GET_CODE (SET_SRC (pat
)) == IF_THEN_ELSE
)
5673 if (GET_CODE (XEXP (SET_SRC (pat
), 1)) == LABEL_REF
)
5674 label
= &XEXP (SET_SRC (pat
), 1);
5675 else if (GET_CODE (XEXP (SET_SRC (pat
), 2)) == LABEL_REF
)
5676 label
= &XEXP (SET_SRC (pat
), 2);
5683 if (get_attr_length (insn
) <= 4)
5686 /* We are going to use the return register as scratch register,
5687 make sure it will be saved/restored by the prologue/epilogue. */
5688 cfun_frame_layout
.save_return_addr_p
= 1;
5693 tmp
= force_const_mem (Pmode
, *label
);
5694 tmp
= emit_insn_before (gen_rtx_SET (Pmode
, temp_reg
, tmp
), insn
);
5695 INSN_ADDRESSES_NEW (tmp
, -1);
5696 annotate_constant_pool_refs (&PATTERN (tmp
));
5703 target
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, *label
),
5704 UNSPEC_LTREL_OFFSET
);
5705 target
= gen_rtx_CONST (Pmode
, target
);
5706 target
= force_const_mem (Pmode
, target
);
5707 tmp
= emit_insn_before (gen_rtx_SET (Pmode
, temp_reg
, target
), insn
);
5708 INSN_ADDRESSES_NEW (tmp
, -1);
5709 annotate_constant_pool_refs (&PATTERN (tmp
));
5711 target
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (2, XEXP (target
, 0),
5712 cfun
->machine
->base_reg
),
5714 target
= gen_rtx_PLUS (Pmode
, temp_reg
, target
);
5717 ret
= validate_change (insn
, label
, target
, 0);
5725 /* Find an annotated literal pool symbol referenced in RTX X,
5726 and store it at REF. Will abort if X contains references to
5727 more than one such pool symbol; multiple references to the same
5728 symbol are allowed, however.
5730 The rtx pointed to by REF must be initialized to NULL_RTX
5731 by the caller before calling this routine. */
5734 find_constant_pool_ref (rtx x
, rtx
*ref
)
5739 /* Ignore LTREL_BASE references. */
5740 if (GET_CODE (x
) == UNSPEC
5741 && XINT (x
, 1) == UNSPEC_LTREL_BASE
)
5743 /* Likewise POOL_ENTRY insns. */
5744 if (GET_CODE (x
) == UNSPEC_VOLATILE
5745 && XINT (x
, 1) == UNSPECV_POOL_ENTRY
)
5748 gcc_assert (GET_CODE (x
) != SYMBOL_REF
5749 || !CONSTANT_POOL_ADDRESS_P (x
));
5751 if (GET_CODE (x
) == UNSPEC
&& XINT (x
, 1) == UNSPEC_LTREF
)
5753 rtx sym
= XVECEXP (x
, 0, 0);
5754 gcc_assert (GET_CODE (sym
) == SYMBOL_REF
5755 && CONSTANT_POOL_ADDRESS_P (sym
));
5757 if (*ref
== NULL_RTX
)
5760 gcc_assert (*ref
== sym
);
5765 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
5766 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
5770 find_constant_pool_ref (XEXP (x
, i
), ref
);
5772 else if (fmt
[i
] == 'E')
5774 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
5775 find_constant_pool_ref (XVECEXP (x
, i
, j
), ref
);
5780 /* Replace every reference to the annotated literal pool
5781 symbol REF in X by its base plus OFFSET. */
5784 replace_constant_pool_ref (rtx
*x
, rtx ref
, rtx offset
)
5789 gcc_assert (*x
!= ref
);
5791 if (GET_CODE (*x
) == UNSPEC
5792 && XINT (*x
, 1) == UNSPEC_LTREF
5793 && XVECEXP (*x
, 0, 0) == ref
)
5795 *x
= gen_rtx_PLUS (Pmode
, XVECEXP (*x
, 0, 1), offset
);
5799 if (GET_CODE (*x
) == PLUS
5800 && GET_CODE (XEXP (*x
, 1)) == CONST_INT
5801 && GET_CODE (XEXP (*x
, 0)) == UNSPEC
5802 && XINT (XEXP (*x
, 0), 1) == UNSPEC_LTREF
5803 && XVECEXP (XEXP (*x
, 0), 0, 0) == ref
)
5805 rtx addr
= gen_rtx_PLUS (Pmode
, XVECEXP (XEXP (*x
, 0), 0, 1), offset
);
5806 *x
= plus_constant (addr
, INTVAL (XEXP (*x
, 1)));
5810 fmt
= GET_RTX_FORMAT (GET_CODE (*x
));
5811 for (i
= GET_RTX_LENGTH (GET_CODE (*x
)) - 1; i
>= 0; i
--)
5815 replace_constant_pool_ref (&XEXP (*x
, i
), ref
, offset
);
5817 else if (fmt
[i
] == 'E')
5819 for (j
= 0; j
< XVECLEN (*x
, i
); j
++)
5820 replace_constant_pool_ref (&XVECEXP (*x
, i
, j
), ref
, offset
);
5825 /* Check whether X contains an UNSPEC_LTREL_BASE.
5826 Return its constant pool symbol if found, NULL_RTX otherwise. */
5829 find_ltrel_base (rtx x
)
5834 if (GET_CODE (x
) == UNSPEC
5835 && XINT (x
, 1) == UNSPEC_LTREL_BASE
)
5836 return XVECEXP (x
, 0, 0);
5838 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
5839 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
5843 rtx fnd
= find_ltrel_base (XEXP (x
, i
));
5847 else if (fmt
[i
] == 'E')
5849 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
5851 rtx fnd
= find_ltrel_base (XVECEXP (x
, i
, j
));
5861 /* Replace any occurrence of UNSPEC_LTREL_BASE in X with its base. */
5864 replace_ltrel_base (rtx
*x
)
5869 if (GET_CODE (*x
) == UNSPEC
5870 && XINT (*x
, 1) == UNSPEC_LTREL_BASE
)
5872 *x
= XVECEXP (*x
, 0, 1);
5876 fmt
= GET_RTX_FORMAT (GET_CODE (*x
));
5877 for (i
= GET_RTX_LENGTH (GET_CODE (*x
)) - 1; i
>= 0; i
--)
5881 replace_ltrel_base (&XEXP (*x
, i
));
5883 else if (fmt
[i
] == 'E')
5885 for (j
= 0; j
< XVECLEN (*x
, i
); j
++)
5886 replace_ltrel_base (&XVECEXP (*x
, i
, j
));
5892 /* We keep a list of constants which we have to add to internal
5893 constant tables in the middle of large functions. */
5895 #define NR_C_MODES 11
5896 enum machine_mode constant_modes
[NR_C_MODES
] =
5898 TFmode
, TImode
, TDmode
,
5899 DFmode
, DImode
, DDmode
,
5900 SFmode
, SImode
, SDmode
,
5907 struct constant
*next
;
5912 struct constant_pool
5914 struct constant_pool
*next
;
5918 rtx emit_pool_after
;
5920 struct constant
*constants
[NR_C_MODES
];
5921 struct constant
*execute
;
5926 /* Allocate new constant_pool structure. */
5928 static struct constant_pool
*
5929 s390_alloc_pool (void)
5931 struct constant_pool
*pool
;
5934 pool
= (struct constant_pool
*) xmalloc (sizeof *pool
);
5936 for (i
= 0; i
< NR_C_MODES
; i
++)
5937 pool
->constants
[i
] = NULL
;
5939 pool
->execute
= NULL
;
5940 pool
->label
= gen_label_rtx ();
5941 pool
->first_insn
= NULL_RTX
;
5942 pool
->pool_insn
= NULL_RTX
;
5943 pool
->insns
= BITMAP_ALLOC (NULL
);
5945 pool
->emit_pool_after
= NULL_RTX
;
5950 /* Create new constant pool covering instructions starting at INSN
5951 and chain it to the end of POOL_LIST. */
5953 static struct constant_pool
*
5954 s390_start_pool (struct constant_pool
**pool_list
, rtx insn
)
5956 struct constant_pool
*pool
, **prev
;
5958 pool
= s390_alloc_pool ();
5959 pool
->first_insn
= insn
;
5961 for (prev
= pool_list
; *prev
; prev
= &(*prev
)->next
)
5968 /* End range of instructions covered by POOL at INSN and emit
5969 placeholder insn representing the pool. */
5972 s390_end_pool (struct constant_pool
*pool
, rtx insn
)
5974 rtx pool_size
= GEN_INT (pool
->size
+ 8 /* alignment slop */);
5977 insn
= get_last_insn ();
5979 pool
->pool_insn
= emit_insn_after (gen_pool (pool_size
), insn
);
5980 INSN_ADDRESSES_NEW (pool
->pool_insn
, -1);
5983 /* Add INSN to the list of insns covered by POOL. */
5986 s390_add_pool_insn (struct constant_pool
*pool
, rtx insn
)
5988 bitmap_set_bit (pool
->insns
, INSN_UID (insn
));
5991 /* Return pool out of POOL_LIST that covers INSN. */
5993 static struct constant_pool
*
5994 s390_find_pool (struct constant_pool
*pool_list
, rtx insn
)
5996 struct constant_pool
*pool
;
5998 for (pool
= pool_list
; pool
; pool
= pool
->next
)
5999 if (bitmap_bit_p (pool
->insns
, INSN_UID (insn
)))
6005 /* Add constant VAL of mode MODE to the constant pool POOL. */
6008 s390_add_constant (struct constant_pool
*pool
, rtx val
, enum machine_mode mode
)
6013 for (i
= 0; i
< NR_C_MODES
; i
++)
6014 if (constant_modes
[i
] == mode
)
6016 gcc_assert (i
!= NR_C_MODES
);
6018 for (c
= pool
->constants
[i
]; c
!= NULL
; c
= c
->next
)
6019 if (rtx_equal_p (val
, c
->value
))
6024 c
= (struct constant
*) xmalloc (sizeof *c
);
6026 c
->label
= gen_label_rtx ();
6027 c
->next
= pool
->constants
[i
];
6028 pool
->constants
[i
] = c
;
6029 pool
->size
+= GET_MODE_SIZE (mode
);
6033 /* Return an rtx that represents the offset of X from the start of
6037 s390_pool_offset (struct constant_pool
*pool
, rtx x
)
6041 label
= gen_rtx_LABEL_REF (GET_MODE (x
), pool
->label
);
6042 x
= gen_rtx_UNSPEC (GET_MODE (x
), gen_rtvec (2, x
, label
),
6043 UNSPEC_POOL_OFFSET
);
6044 return gen_rtx_CONST (GET_MODE (x
), x
);
6047 /* Find constant VAL of mode MODE in the constant pool POOL.
6048 Return an RTX describing the distance from the start of
6049 the pool to the location of the new constant. */
6052 s390_find_constant (struct constant_pool
*pool
, rtx val
,
6053 enum machine_mode mode
)
6058 for (i
= 0; i
< NR_C_MODES
; i
++)
6059 if (constant_modes
[i
] == mode
)
6061 gcc_assert (i
!= NR_C_MODES
);
6063 for (c
= pool
->constants
[i
]; c
!= NULL
; c
= c
->next
)
6064 if (rtx_equal_p (val
, c
->value
))
6069 return s390_pool_offset (pool
, gen_rtx_LABEL_REF (Pmode
, c
->label
));
6072 /* Check whether INSN is an execute. Return the label_ref to its
6073 execute target template if so, NULL_RTX otherwise. */
6076 s390_execute_label (rtx insn
)
6078 if (GET_CODE (insn
) == INSN
6079 && GET_CODE (PATTERN (insn
)) == PARALLEL
6080 && GET_CODE (XVECEXP (PATTERN (insn
), 0, 0)) == UNSPEC
6081 && XINT (XVECEXP (PATTERN (insn
), 0, 0), 1) == UNSPEC_EXECUTE
)
6082 return XVECEXP (XVECEXP (PATTERN (insn
), 0, 0), 0, 2);
6087 /* Add execute target for INSN to the constant pool POOL. */
6090 s390_add_execute (struct constant_pool
*pool
, rtx insn
)
6094 for (c
= pool
->execute
; c
!= NULL
; c
= c
->next
)
6095 if (INSN_UID (insn
) == INSN_UID (c
->value
))
6100 c
= (struct constant
*) xmalloc (sizeof *c
);
6102 c
->label
= gen_label_rtx ();
6103 c
->next
= pool
->execute
;
6109 /* Find execute target for INSN in the constant pool POOL.
6110 Return an RTX describing the distance from the start of
6111 the pool to the location of the execute target. */
6114 s390_find_execute (struct constant_pool
*pool
, rtx insn
)
6118 for (c
= pool
->execute
; c
!= NULL
; c
= c
->next
)
6119 if (INSN_UID (insn
) == INSN_UID (c
->value
))
6124 return s390_pool_offset (pool
, gen_rtx_LABEL_REF (Pmode
, c
->label
));
6127 /* For an execute INSN, extract the execute target template. */
6130 s390_execute_target (rtx insn
)
6132 rtx pattern
= PATTERN (insn
);
6133 gcc_assert (s390_execute_label (insn
));
6135 if (XVECLEN (pattern
, 0) == 2)
6137 pattern
= copy_rtx (XVECEXP (pattern
, 0, 1));
6141 rtvec vec
= rtvec_alloc (XVECLEN (pattern
, 0) - 1);
6144 for (i
= 0; i
< XVECLEN (pattern
, 0) - 1; i
++)
6145 RTVEC_ELT (vec
, i
) = copy_rtx (XVECEXP (pattern
, 0, i
+ 1));
6147 pattern
= gen_rtx_PARALLEL (VOIDmode
, vec
);
6153 /* Indicate that INSN cannot be duplicated. This is the case for
6154 execute insns that carry a unique label. */
6157 s390_cannot_copy_insn_p (rtx insn
)
6159 rtx label
= s390_execute_label (insn
);
6160 return label
&& label
!= const0_rtx
;
6163 /* Dump out the constants in POOL. If REMOTE_LABEL is true,
6164 do not emit the pool base label. */
6167 s390_dump_pool (struct constant_pool
*pool
, bool remote_label
)
6170 rtx insn
= pool
->pool_insn
;
6173 /* Switch to rodata section. */
6174 if (TARGET_CPU_ZARCH
)
6176 insn
= emit_insn_after (gen_pool_section_start (), insn
);
6177 INSN_ADDRESSES_NEW (insn
, -1);
6180 /* Ensure minimum pool alignment. */
6181 if (TARGET_CPU_ZARCH
)
6182 insn
= emit_insn_after (gen_pool_align (GEN_INT (8)), insn
);
6184 insn
= emit_insn_after (gen_pool_align (GEN_INT (4)), insn
);
6185 INSN_ADDRESSES_NEW (insn
, -1);
6187 /* Emit pool base label. */
6190 insn
= emit_label_after (pool
->label
, insn
);
6191 INSN_ADDRESSES_NEW (insn
, -1);
6194 /* Dump constants in descending alignment requirement order,
6195 ensuring proper alignment for every constant. */
6196 for (i
= 0; i
< NR_C_MODES
; i
++)
6197 for (c
= pool
->constants
[i
]; c
; c
= c
->next
)
6199 /* Convert UNSPEC_LTREL_OFFSET unspecs to pool-relative references. */
6200 rtx value
= copy_rtx (c
->value
);
6201 if (GET_CODE (value
) == CONST
6202 && GET_CODE (XEXP (value
, 0)) == UNSPEC
6203 && XINT (XEXP (value
, 0), 1) == UNSPEC_LTREL_OFFSET
6204 && XVECLEN (XEXP (value
, 0), 0) == 1)
6205 value
= s390_pool_offset (pool
, XVECEXP (XEXP (value
, 0), 0, 0));
6207 insn
= emit_label_after (c
->label
, insn
);
6208 INSN_ADDRESSES_NEW (insn
, -1);
6210 value
= gen_rtx_UNSPEC_VOLATILE (constant_modes
[i
],
6211 gen_rtvec (1, value
),
6212 UNSPECV_POOL_ENTRY
);
6213 insn
= emit_insn_after (value
, insn
);
6214 INSN_ADDRESSES_NEW (insn
, -1);
6217 /* Ensure minimum alignment for instructions. */
6218 insn
= emit_insn_after (gen_pool_align (GEN_INT (2)), insn
);
6219 INSN_ADDRESSES_NEW (insn
, -1);
6221 /* Output in-pool execute template insns. */
6222 for (c
= pool
->execute
; c
; c
= c
->next
)
6224 insn
= emit_label_after (c
->label
, insn
);
6225 INSN_ADDRESSES_NEW (insn
, -1);
6227 insn
= emit_insn_after (s390_execute_target (c
->value
), insn
);
6228 INSN_ADDRESSES_NEW (insn
, -1);
6231 /* Switch back to previous section. */
6232 if (TARGET_CPU_ZARCH
)
6234 insn
= emit_insn_after (gen_pool_section_end (), insn
);
6235 INSN_ADDRESSES_NEW (insn
, -1);
6238 insn
= emit_barrier_after (insn
);
6239 INSN_ADDRESSES_NEW (insn
, -1);
6241 /* Remove placeholder insn. */
6242 remove_insn (pool
->pool_insn
);
6245 /* Free all memory used by POOL. */
6248 s390_free_pool (struct constant_pool
*pool
)
6250 struct constant
*c
, *next
;
6253 for (i
= 0; i
< NR_C_MODES
; i
++)
6254 for (c
= pool
->constants
[i
]; c
; c
= next
)
6260 for (c
= pool
->execute
; c
; c
= next
)
6266 BITMAP_FREE (pool
->insns
);
6271 /* Collect main literal pool. Return NULL on overflow. */
6273 static struct constant_pool
*
6274 s390_mainpool_start (void)
6276 struct constant_pool
*pool
;
6279 pool
= s390_alloc_pool ();
6281 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
6283 if (GET_CODE (insn
) == INSN
6284 && GET_CODE (PATTERN (insn
)) == SET
6285 && GET_CODE (SET_SRC (PATTERN (insn
))) == UNSPEC_VOLATILE
6286 && XINT (SET_SRC (PATTERN (insn
)), 1) == UNSPECV_MAIN_POOL
)
6288 gcc_assert (!pool
->pool_insn
);
6289 pool
->pool_insn
= insn
;
6292 if (!TARGET_CPU_ZARCH
&& s390_execute_label (insn
))
6294 s390_add_execute (pool
, insn
);
6296 else if (GET_CODE (insn
) == INSN
|| GET_CODE (insn
) == CALL_INSN
)
6298 rtx pool_ref
= NULL_RTX
;
6299 find_constant_pool_ref (PATTERN (insn
), &pool_ref
);
6302 rtx constant
= get_pool_constant (pool_ref
);
6303 enum machine_mode mode
= get_pool_mode (pool_ref
);
6304 s390_add_constant (pool
, constant
, mode
);
6308 /* If hot/cold partitioning is enabled we have to make sure that
6309 the literal pool is emitted in the same section where the
6310 initialization of the literal pool base pointer takes place.
6311 emit_pool_after is only used in the non-overflow case on non
6312 Z cpus where we can emit the literal pool at the end of the
6313 function body within the text section. */
6315 && NOTE_KIND (insn
) == NOTE_INSN_SWITCH_TEXT_SECTIONS
6316 && !pool
->emit_pool_after
)
6317 pool
->emit_pool_after
= PREV_INSN (insn
);
6320 gcc_assert (pool
->pool_insn
|| pool
->size
== 0);
6322 if (pool
->size
>= 4096)
6324 /* We're going to chunkify the pool, so remove the main
6325 pool placeholder insn. */
6326 remove_insn (pool
->pool_insn
);
6328 s390_free_pool (pool
);
6332 /* If the functions ends with the section where the literal pool
6333 should be emitted set the marker to its end. */
6334 if (pool
&& !pool
->emit_pool_after
)
6335 pool
->emit_pool_after
= get_last_insn ();
6340 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6341 Modify the current function to output the pool constants as well as
6342 the pool register setup instruction. */
6345 s390_mainpool_finish (struct constant_pool
*pool
)
6347 rtx base_reg
= cfun
->machine
->base_reg
;
6350 /* If the pool is empty, we're done. */
6351 if (pool
->size
== 0)
6353 /* We don't actually need a base register after all. */
6354 cfun
->machine
->base_reg
= NULL_RTX
;
6356 if (pool
->pool_insn
)
6357 remove_insn (pool
->pool_insn
);
6358 s390_free_pool (pool
);
6362 /* We need correct insn addresses. */
6363 shorten_branches (get_insns ());
6365 /* On zSeries, we use a LARL to load the pool register. The pool is
6366 located in the .rodata section, so we emit it after the function. */
6367 if (TARGET_CPU_ZARCH
)
6369 insn
= gen_main_base_64 (base_reg
, pool
->label
);
6370 insn
= emit_insn_after (insn
, pool
->pool_insn
);
6371 INSN_ADDRESSES_NEW (insn
, -1);
6372 remove_insn (pool
->pool_insn
);
6374 insn
= get_last_insn ();
6375 pool
->pool_insn
= emit_insn_after (gen_pool (const0_rtx
), insn
);
6376 INSN_ADDRESSES_NEW (pool
->pool_insn
, -1);
6378 s390_dump_pool (pool
, 0);
6381 /* On S/390, if the total size of the function's code plus literal pool
6382 does not exceed 4096 bytes, we use BASR to set up a function base
6383 pointer, and emit the literal pool at the end of the function. */
6384 else if (INSN_ADDRESSES (INSN_UID (pool
->emit_pool_after
))
6385 + pool
->size
+ 8 /* alignment slop */ < 4096)
6387 insn
= gen_main_base_31_small (base_reg
, pool
->label
);
6388 insn
= emit_insn_after (insn
, pool
->pool_insn
);
6389 INSN_ADDRESSES_NEW (insn
, -1);
6390 remove_insn (pool
->pool_insn
);
6392 insn
= emit_label_after (pool
->label
, insn
);
6393 INSN_ADDRESSES_NEW (insn
, -1);
6395 /* emit_pool_after will be set by s390_mainpool_start to the
6396 last insn of the section where the literal pool should be
6398 insn
= pool
->emit_pool_after
;
6400 pool
->pool_insn
= emit_insn_after (gen_pool (const0_rtx
), insn
);
6401 INSN_ADDRESSES_NEW (pool
->pool_insn
, -1);
6403 s390_dump_pool (pool
, 1);
6406 /* Otherwise, we emit an inline literal pool and use BASR to branch
6407 over it, setting up the pool register at the same time. */
6410 rtx pool_end
= gen_label_rtx ();
6412 insn
= gen_main_base_31_large (base_reg
, pool
->label
, pool_end
);
6413 insn
= emit_insn_after (insn
, pool
->pool_insn
);
6414 INSN_ADDRESSES_NEW (insn
, -1);
6415 remove_insn (pool
->pool_insn
);
6417 insn
= emit_label_after (pool
->label
, insn
);
6418 INSN_ADDRESSES_NEW (insn
, -1);
6420 pool
->pool_insn
= emit_insn_after (gen_pool (const0_rtx
), insn
);
6421 INSN_ADDRESSES_NEW (pool
->pool_insn
, -1);
6423 insn
= emit_label_after (pool_end
, pool
->pool_insn
);
6424 INSN_ADDRESSES_NEW (insn
, -1);
6426 s390_dump_pool (pool
, 1);
6430 /* Replace all literal pool references. */
6432 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
6435 replace_ltrel_base (&PATTERN (insn
));
6437 if (GET_CODE (insn
) == INSN
|| GET_CODE (insn
) == CALL_INSN
)
6439 rtx addr
, pool_ref
= NULL_RTX
;
6440 find_constant_pool_ref (PATTERN (insn
), &pool_ref
);
6443 if (s390_execute_label (insn
))
6444 addr
= s390_find_execute (pool
, insn
);
6446 addr
= s390_find_constant (pool
, get_pool_constant (pool_ref
),
6447 get_pool_mode (pool_ref
));
6449 replace_constant_pool_ref (&PATTERN (insn
), pool_ref
, addr
);
6450 INSN_CODE (insn
) = -1;
6456 /* Free the pool. */
6457 s390_free_pool (pool
);
6460 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6461 We have decided we cannot use this pool, so revert all changes
6462 to the current function that were done by s390_mainpool_start. */
6464 s390_mainpool_cancel (struct constant_pool
*pool
)
6466 /* We didn't actually change the instruction stream, so simply
6467 free the pool memory. */
6468 s390_free_pool (pool
);
6472 /* Chunkify the literal pool. */
6474 #define S390_POOL_CHUNK_MIN 0xc00
6475 #define S390_POOL_CHUNK_MAX 0xe00
6477 static struct constant_pool
*
6478 s390_chunkify_start (void)
6480 struct constant_pool
*curr_pool
= NULL
, *pool_list
= NULL
;
6483 rtx pending_ltrel
= NULL_RTX
;
6486 rtx (*gen_reload_base
) (rtx
, rtx
) =
6487 TARGET_CPU_ZARCH
? gen_reload_base_64
: gen_reload_base_31
;
6490 /* We need correct insn addresses. */
6492 shorten_branches (get_insns ());
6494 /* Scan all insns and move literals to pool chunks. */
6496 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
6498 bool section_switch_p
= false;
6500 /* Check for pending LTREL_BASE. */
6503 rtx ltrel_base
= find_ltrel_base (PATTERN (insn
));
6506 gcc_assert (ltrel_base
== pending_ltrel
);
6507 pending_ltrel
= NULL_RTX
;
6511 if (!TARGET_CPU_ZARCH
&& s390_execute_label (insn
))
6514 curr_pool
= s390_start_pool (&pool_list
, insn
);
6516 s390_add_execute (curr_pool
, insn
);
6517 s390_add_pool_insn (curr_pool
, insn
);
6519 else if (GET_CODE (insn
) == INSN
|| GET_CODE (insn
) == CALL_INSN
)
6521 rtx pool_ref
= NULL_RTX
;
6522 find_constant_pool_ref (PATTERN (insn
), &pool_ref
);
6525 rtx constant
= get_pool_constant (pool_ref
);
6526 enum machine_mode mode
= get_pool_mode (pool_ref
);
6529 curr_pool
= s390_start_pool (&pool_list
, insn
);
6531 s390_add_constant (curr_pool
, constant
, mode
);
6532 s390_add_pool_insn (curr_pool
, insn
);
6534 /* Don't split the pool chunk between a LTREL_OFFSET load
6535 and the corresponding LTREL_BASE. */
6536 if (GET_CODE (constant
) == CONST
6537 && GET_CODE (XEXP (constant
, 0)) == UNSPEC
6538 && XINT (XEXP (constant
, 0), 1) == UNSPEC_LTREL_OFFSET
)
6540 gcc_assert (!pending_ltrel
);
6541 pending_ltrel
= pool_ref
;
6546 if (GET_CODE (insn
) == JUMP_INSN
|| GET_CODE (insn
) == CODE_LABEL
)
6549 s390_add_pool_insn (curr_pool
, insn
);
6550 /* An LTREL_BASE must follow within the same basic block. */
6551 gcc_assert (!pending_ltrel
);
6554 if (NOTE_P (insn
) && NOTE_KIND (insn
) == NOTE_INSN_SWITCH_TEXT_SECTIONS
)
6555 section_switch_p
= true;
6558 || INSN_ADDRESSES_SIZE () <= (size_t) INSN_UID (insn
)
6559 || INSN_ADDRESSES (INSN_UID (insn
)) == -1)
6562 if (TARGET_CPU_ZARCH
)
6564 if (curr_pool
->size
< S390_POOL_CHUNK_MAX
)
6567 s390_end_pool (curr_pool
, NULL_RTX
);
6572 int chunk_size
= INSN_ADDRESSES (INSN_UID (insn
))
6573 - INSN_ADDRESSES (INSN_UID (curr_pool
->first_insn
))
6576 /* We will later have to insert base register reload insns.
6577 Those will have an effect on code size, which we need to
6578 consider here. This calculation makes rather pessimistic
6579 worst-case assumptions. */
6580 if (GET_CODE (insn
) == CODE_LABEL
)
6583 if (chunk_size
< S390_POOL_CHUNK_MIN
6584 && curr_pool
->size
< S390_POOL_CHUNK_MIN
6585 && !section_switch_p
)
6588 /* Pool chunks can only be inserted after BARRIERs ... */
6589 if (GET_CODE (insn
) == BARRIER
)
6591 s390_end_pool (curr_pool
, insn
);
6596 /* ... so if we don't find one in time, create one. */
6597 else if (chunk_size
> S390_POOL_CHUNK_MAX
6598 || curr_pool
->size
> S390_POOL_CHUNK_MAX
6599 || section_switch_p
)
6601 rtx label
, jump
, barrier
;
6603 if (!section_switch_p
)
6605 /* We can insert the barrier only after a 'real' insn. */
6606 if (GET_CODE (insn
) != INSN
&& GET_CODE (insn
) != CALL_INSN
)
6608 if (get_attr_length (insn
) == 0)
6610 /* Don't separate LTREL_BASE from the corresponding
6611 LTREL_OFFSET load. */
6617 gcc_assert (!pending_ltrel
);
6619 /* The old pool has to end before the section switch
6620 note in order to make it part of the current
6622 insn
= PREV_INSN (insn
);
6625 label
= gen_label_rtx ();
6626 jump
= emit_jump_insn_after (gen_jump (label
), insn
);
6627 barrier
= emit_barrier_after (jump
);
6628 insn
= emit_label_after (label
, barrier
);
6629 JUMP_LABEL (jump
) = label
;
6630 LABEL_NUSES (label
) = 1;
6632 INSN_ADDRESSES_NEW (jump
, -1);
6633 INSN_ADDRESSES_NEW (barrier
, -1);
6634 INSN_ADDRESSES_NEW (insn
, -1);
6636 s390_end_pool (curr_pool
, barrier
);
6644 s390_end_pool (curr_pool
, NULL_RTX
);
6645 gcc_assert (!pending_ltrel
);
6647 /* Find all labels that are branched into
6648 from an insn belonging to a different chunk. */
6650 far_labels
= BITMAP_ALLOC (NULL
);
6652 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
6654 /* Labels marked with LABEL_PRESERVE_P can be target
6655 of non-local jumps, so we have to mark them.
6656 The same holds for named labels.
6658 Don't do that, however, if it is the label before
6661 if (GET_CODE (insn
) == CODE_LABEL
6662 && (LABEL_PRESERVE_P (insn
) || LABEL_NAME (insn
)))
6664 rtx vec_insn
= next_real_insn (insn
);
6665 rtx vec_pat
= vec_insn
&& GET_CODE (vec_insn
) == JUMP_INSN
?
6666 PATTERN (vec_insn
) : NULL_RTX
;
6668 || !(GET_CODE (vec_pat
) == ADDR_VEC
6669 || GET_CODE (vec_pat
) == ADDR_DIFF_VEC
))
6670 bitmap_set_bit (far_labels
, CODE_LABEL_NUMBER (insn
));
6673 /* If we have a direct jump (conditional or unconditional)
6674 or a casesi jump, check all potential targets. */
6675 else if (GET_CODE (insn
) == JUMP_INSN
)
6677 rtx pat
= PATTERN (insn
);
6678 if (GET_CODE (pat
) == PARALLEL
&& XVECLEN (pat
, 0) > 2)
6679 pat
= XVECEXP (pat
, 0, 0);
6681 if (GET_CODE (pat
) == SET
)
6683 rtx label
= JUMP_LABEL (insn
);
6686 if (s390_find_pool (pool_list
, label
)
6687 != s390_find_pool (pool_list
, insn
))
6688 bitmap_set_bit (far_labels
, CODE_LABEL_NUMBER (label
));
6691 else if (GET_CODE (pat
) == PARALLEL
6692 && XVECLEN (pat
, 0) == 2
6693 && GET_CODE (XVECEXP (pat
, 0, 0)) == SET
6694 && GET_CODE (XVECEXP (pat
, 0, 1)) == USE
6695 && GET_CODE (XEXP (XVECEXP (pat
, 0, 1), 0)) == LABEL_REF
)
6697 /* Find the jump table used by this casesi jump. */
6698 rtx vec_label
= XEXP (XEXP (XVECEXP (pat
, 0, 1), 0), 0);
6699 rtx vec_insn
= next_real_insn (vec_label
);
6700 rtx vec_pat
= vec_insn
&& GET_CODE (vec_insn
) == JUMP_INSN
?
6701 PATTERN (vec_insn
) : NULL_RTX
;
6703 && (GET_CODE (vec_pat
) == ADDR_VEC
6704 || GET_CODE (vec_pat
) == ADDR_DIFF_VEC
))
6706 int i
, diff_p
= GET_CODE (vec_pat
) == ADDR_DIFF_VEC
;
6708 for (i
= 0; i
< XVECLEN (vec_pat
, diff_p
); i
++)
6710 rtx label
= XEXP (XVECEXP (vec_pat
, diff_p
, i
), 0);
6712 if (s390_find_pool (pool_list
, label
)
6713 != s390_find_pool (pool_list
, insn
))
6714 bitmap_set_bit (far_labels
, CODE_LABEL_NUMBER (label
));
6721 /* Insert base register reload insns before every pool. */
6723 for (curr_pool
= pool_list
; curr_pool
; curr_pool
= curr_pool
->next
)
6725 rtx new_insn
= gen_reload_base (cfun
->machine
->base_reg
,
6727 rtx insn
= curr_pool
->first_insn
;
6728 INSN_ADDRESSES_NEW (emit_insn_before (new_insn
, insn
), -1);
6731 /* Insert base register reload insns at every far label. */
6733 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
6734 if (GET_CODE (insn
) == CODE_LABEL
6735 && bitmap_bit_p (far_labels
, CODE_LABEL_NUMBER (insn
)))
6737 struct constant_pool
*pool
= s390_find_pool (pool_list
, insn
);
6740 rtx new_insn
= gen_reload_base (cfun
->machine
->base_reg
,
6742 INSN_ADDRESSES_NEW (emit_insn_after (new_insn
, insn
), -1);
6747 BITMAP_FREE (far_labels
);
6750 /* Recompute insn addresses. */
6752 init_insn_lengths ();
6753 shorten_branches (get_insns ());
6758 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
6759 After we have decided to use this list, finish implementing
6760 all changes to the current function as required. */
6763 s390_chunkify_finish (struct constant_pool
*pool_list
)
6765 struct constant_pool
*curr_pool
= NULL
;
6769 /* Replace all literal pool references. */
6771 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
6774 replace_ltrel_base (&PATTERN (insn
));
6776 curr_pool
= s390_find_pool (pool_list
, insn
);
6780 if (GET_CODE (insn
) == INSN
|| GET_CODE (insn
) == CALL_INSN
)
6782 rtx addr
, pool_ref
= NULL_RTX
;
6783 find_constant_pool_ref (PATTERN (insn
), &pool_ref
);
6786 if (s390_execute_label (insn
))
6787 addr
= s390_find_execute (curr_pool
, insn
);
6789 addr
= s390_find_constant (curr_pool
,
6790 get_pool_constant (pool_ref
),
6791 get_pool_mode (pool_ref
));
6793 replace_constant_pool_ref (&PATTERN (insn
), pool_ref
, addr
);
6794 INSN_CODE (insn
) = -1;
6799 /* Dump out all literal pools. */
6801 for (curr_pool
= pool_list
; curr_pool
; curr_pool
= curr_pool
->next
)
6802 s390_dump_pool (curr_pool
, 0);
6804 /* Free pool list. */
6808 struct constant_pool
*next
= pool_list
->next
;
6809 s390_free_pool (pool_list
);
6814 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
6815 We have decided we cannot use this list, so revert all changes
6816 to the current function that were done by s390_chunkify_start. */
6819 s390_chunkify_cancel (struct constant_pool
*pool_list
)
6821 struct constant_pool
*curr_pool
= NULL
;
6824 /* Remove all pool placeholder insns. */
6826 for (curr_pool
= pool_list
; curr_pool
; curr_pool
= curr_pool
->next
)
6828 /* Did we insert an extra barrier? Remove it. */
6829 rtx barrier
= PREV_INSN (curr_pool
->pool_insn
);
6830 rtx jump
= barrier
? PREV_INSN (barrier
) : NULL_RTX
;
6831 rtx label
= NEXT_INSN (curr_pool
->pool_insn
);
6833 if (jump
&& GET_CODE (jump
) == JUMP_INSN
6834 && barrier
&& GET_CODE (barrier
) == BARRIER
6835 && label
&& GET_CODE (label
) == CODE_LABEL
6836 && GET_CODE (PATTERN (jump
)) == SET
6837 && SET_DEST (PATTERN (jump
)) == pc_rtx
6838 && GET_CODE (SET_SRC (PATTERN (jump
))) == LABEL_REF
6839 && XEXP (SET_SRC (PATTERN (jump
)), 0) == label
)
6842 remove_insn (barrier
);
6843 remove_insn (label
);
6846 remove_insn (curr_pool
->pool_insn
);
6849 /* Remove all base register reload insns. */
6851 for (insn
= get_insns (); insn
; )
6853 rtx next_insn
= NEXT_INSN (insn
);
6855 if (GET_CODE (insn
) == INSN
6856 && GET_CODE (PATTERN (insn
)) == SET
6857 && GET_CODE (SET_SRC (PATTERN (insn
))) == UNSPEC
6858 && XINT (SET_SRC (PATTERN (insn
)), 1) == UNSPEC_RELOAD_BASE
)
6864 /* Free pool list. */
6868 struct constant_pool
*next
= pool_list
->next
;
6869 s390_free_pool (pool_list
);
6874 /* Output the constant pool entry EXP in mode MODE with alignment ALIGN. */
6877 s390_output_pool_entry (rtx exp
, enum machine_mode mode
, unsigned int align
)
6881 switch (GET_MODE_CLASS (mode
))
6884 case MODE_DECIMAL_FLOAT
:
6885 gcc_assert (GET_CODE (exp
) == CONST_DOUBLE
);
6887 REAL_VALUE_FROM_CONST_DOUBLE (r
, exp
);
6888 assemble_real (r
, mode
, align
);
6892 assemble_integer (exp
, GET_MODE_SIZE (mode
), align
, 1);
6893 mark_symbol_refs_as_used (exp
);
6902 /* Return an RTL expression representing the value of the return address
6903 for the frame COUNT steps up from the current frame. FRAME is the
6904 frame pointer of that frame. */
6907 s390_return_addr_rtx (int count
, rtx frame ATTRIBUTE_UNUSED
)
6912 /* Without backchain, we fail for all but the current frame. */
6914 if (!TARGET_BACKCHAIN
&& count
> 0)
6917 /* For the current frame, we need to make sure the initial
6918 value of RETURN_REGNUM is actually saved. */
6922 /* On non-z architectures branch splitting could overwrite r14. */
6923 if (TARGET_CPU_ZARCH
)
6924 return get_hard_reg_initial_val (Pmode
, RETURN_REGNUM
);
6927 cfun_frame_layout
.save_return_addr_p
= true;
6928 return gen_rtx_MEM (Pmode
, return_address_pointer_rtx
);
6932 if (TARGET_PACKED_STACK
)
6933 offset
= -2 * UNITS_PER_LONG
;
6935 offset
= RETURN_REGNUM
* UNITS_PER_LONG
;
6937 addr
= plus_constant (frame
, offset
);
6938 addr
= memory_address (Pmode
, addr
);
6939 return gen_rtx_MEM (Pmode
, addr
);
6942 /* Return an RTL expression representing the back chain stored in
6943 the current stack frame. */
6946 s390_back_chain_rtx (void)
6950 gcc_assert (TARGET_BACKCHAIN
);
6952 if (TARGET_PACKED_STACK
)
6953 chain
= plus_constant (stack_pointer_rtx
,
6954 STACK_POINTER_OFFSET
- UNITS_PER_LONG
);
6956 chain
= stack_pointer_rtx
;
6958 chain
= gen_rtx_MEM (Pmode
, chain
);
6962 /* Find first call clobbered register unused in a function.
6963 This could be used as base register in a leaf function
6964 or for holding the return address before epilogue. */
6967 find_unused_clobbered_reg (void)
6970 for (i
= 0; i
< 6; i
++)
6971 if (!df_regs_ever_live_p (i
))
6977 /* Helper function for s390_regs_ever_clobbered. Sets the fields in DATA for all
6978 clobbered hard regs in SETREG. */
6981 s390_reg_clobbered_rtx (rtx setreg
, const_rtx set_insn ATTRIBUTE_UNUSED
, void *data
)
6983 int *regs_ever_clobbered
= (int *)data
;
6984 unsigned int i
, regno
;
6985 enum machine_mode mode
= GET_MODE (setreg
);
6987 if (GET_CODE (setreg
) == SUBREG
)
6989 rtx inner
= SUBREG_REG (setreg
);
6990 if (!GENERAL_REG_P (inner
))
6992 regno
= subreg_regno (setreg
);
6994 else if (GENERAL_REG_P (setreg
))
6995 regno
= REGNO (setreg
);
7000 i
< regno
+ HARD_REGNO_NREGS (regno
, mode
);
7002 regs_ever_clobbered
[i
] = 1;
7005 /* Walks through all basic blocks of the current function looking
7006 for clobbered hard regs using s390_reg_clobbered_rtx. The fields
7007 of the passed integer array REGS_EVER_CLOBBERED are set to one for
7008 each of those regs. */
7011 s390_regs_ever_clobbered (int *regs_ever_clobbered
)
7017 memset (regs_ever_clobbered
, 0, 16 * sizeof (int));
7019 /* For non-leaf functions we have to consider all call clobbered regs to be
7021 if (!current_function_is_leaf
)
7023 for (i
= 0; i
< 16; i
++)
7024 regs_ever_clobbered
[i
] = call_really_used_regs
[i
];
7027 /* Make the "magic" eh_return registers live if necessary. For regs_ever_live
7028 this work is done by liveness analysis (mark_regs_live_at_end).
7029 Special care is needed for functions containing landing pads. Landing pads
7030 may use the eh registers, but the code which sets these registers is not
7031 contained in that function. Hence s390_regs_ever_clobbered is not able to
7032 deal with this automatically. */
7033 if (crtl
->calls_eh_return
|| cfun
->machine
->has_landing_pad_p
)
7034 for (i
= 0; EH_RETURN_DATA_REGNO (i
) != INVALID_REGNUM
; i
++)
7035 if (crtl
->calls_eh_return
7036 || (cfun
->machine
->has_landing_pad_p
7037 && df_regs_ever_live_p (EH_RETURN_DATA_REGNO (i
))))
7038 regs_ever_clobbered
[EH_RETURN_DATA_REGNO (i
)] = 1;
7040 /* For nonlocal gotos all call-saved registers have to be saved.
7041 This flag is also set for the unwinding code in libgcc.
7042 See expand_builtin_unwind_init. For regs_ever_live this is done by
7044 if (cfun
->has_nonlocal_label
)
7045 for (i
= 0; i
< 16; i
++)
7046 if (!call_really_used_regs
[i
])
7047 regs_ever_clobbered
[i
] = 1;
7049 FOR_EACH_BB (cur_bb
)
7051 FOR_BB_INSNS (cur_bb
, cur_insn
)
7053 if (INSN_P (cur_insn
))
7054 note_stores (PATTERN (cur_insn
),
7055 s390_reg_clobbered_rtx
,
7056 regs_ever_clobbered
);
7061 /* Determine the frame area which actually has to be accessed
7062 in the function epilogue. The values are stored at the
7063 given pointers AREA_BOTTOM (address of the lowest used stack
7064 address) and AREA_TOP (address of the first item which does
7065 not belong to the stack frame). */
7068 s390_frame_area (int *area_bottom
, int *area_top
)
7076 if (cfun_frame_layout
.first_restore_gpr
!= -1)
7078 b
= (cfun_frame_layout
.gprs_offset
7079 + cfun_frame_layout
.first_restore_gpr
* UNITS_PER_LONG
);
7080 t
= b
+ (cfun_frame_layout
.last_restore_gpr
7081 - cfun_frame_layout
.first_restore_gpr
+ 1) * UNITS_PER_LONG
;
7084 if (TARGET_64BIT
&& cfun_save_high_fprs_p
)
7086 b
= MIN (b
, cfun_frame_layout
.f8_offset
);
7087 t
= MAX (t
, (cfun_frame_layout
.f8_offset
7088 + cfun_frame_layout
.high_fprs
* 8));
7092 for (i
= 2; i
< 4; i
++)
7093 if (cfun_fpr_bit_p (i
))
7095 b
= MIN (b
, cfun_frame_layout
.f4_offset
+ (i
- 2) * 8);
7096 t
= MAX (t
, cfun_frame_layout
.f4_offset
+ (i
- 1) * 8);
7103 /* Fill cfun->machine with info about register usage of current function.
7104 Return in CLOBBERED_REGS which GPRs are currently considered set. */
7107 s390_register_info (int clobbered_regs
[])
7111 /* fprs 8 - 15 are call saved for 64 Bit ABI. */
7112 cfun_frame_layout
.fpr_bitmap
= 0;
7113 cfun_frame_layout
.high_fprs
= 0;
7115 for (i
= 24; i
< 32; i
++)
7116 if (df_regs_ever_live_p (i
) && !global_regs
[i
])
7118 cfun_set_fpr_bit (i
- 16);
7119 cfun_frame_layout
.high_fprs
++;
7122 /* Find first and last gpr to be saved. We trust regs_ever_live
7123 data, except that we don't save and restore global registers.
7125 Also, all registers with special meaning to the compiler need
7126 to be handled extra. */
7128 s390_regs_ever_clobbered (clobbered_regs
);
7130 for (i
= 0; i
< 16; i
++)
7131 clobbered_regs
[i
] = clobbered_regs
[i
] && !global_regs
[i
] && !fixed_regs
[i
];
7133 if (frame_pointer_needed
)
7134 clobbered_regs
[HARD_FRAME_POINTER_REGNUM
] = 1;
7137 clobbered_regs
[PIC_OFFSET_TABLE_REGNUM
]
7138 |= df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM
);
7140 clobbered_regs
[BASE_REGNUM
]
7141 |= (cfun
->machine
->base_reg
7142 && REGNO (cfun
->machine
->base_reg
) == BASE_REGNUM
);
7144 clobbered_regs
[RETURN_REGNUM
]
7145 |= (!current_function_is_leaf
7146 || TARGET_TPF_PROFILING
7147 || cfun
->machine
->split_branches_pending_p
7148 || cfun_frame_layout
.save_return_addr_p
7149 || crtl
->calls_eh_return
7152 clobbered_regs
[STACK_POINTER_REGNUM
]
7153 |= (!current_function_is_leaf
7154 || TARGET_TPF_PROFILING
7155 || cfun_save_high_fprs_p
7156 || get_frame_size () > 0
7157 || cfun
->calls_alloca
7160 for (i
= 6; i
< 16; i
++)
7161 if (df_regs_ever_live_p (i
) || clobbered_regs
[i
])
7163 for (j
= 15; j
> i
; j
--)
7164 if (df_regs_ever_live_p (j
) || clobbered_regs
[j
])
7169 /* Nothing to save/restore. */
7170 cfun_frame_layout
.first_save_gpr_slot
= -1;
7171 cfun_frame_layout
.last_save_gpr_slot
= -1;
7172 cfun_frame_layout
.first_save_gpr
= -1;
7173 cfun_frame_layout
.first_restore_gpr
= -1;
7174 cfun_frame_layout
.last_save_gpr
= -1;
7175 cfun_frame_layout
.last_restore_gpr
= -1;
7179 /* Save slots for gprs from i to j. */
7180 cfun_frame_layout
.first_save_gpr_slot
= i
;
7181 cfun_frame_layout
.last_save_gpr_slot
= j
;
7183 for (i
= cfun_frame_layout
.first_save_gpr_slot
;
7184 i
< cfun_frame_layout
.last_save_gpr_slot
+ 1;
7186 if (clobbered_regs
[i
])
7189 for (j
= cfun_frame_layout
.last_save_gpr_slot
; j
> i
; j
--)
7190 if (clobbered_regs
[j
])
7193 if (i
== cfun_frame_layout
.last_save_gpr_slot
+ 1)
7195 /* Nothing to save/restore. */
7196 cfun_frame_layout
.first_save_gpr
= -1;
7197 cfun_frame_layout
.first_restore_gpr
= -1;
7198 cfun_frame_layout
.last_save_gpr
= -1;
7199 cfun_frame_layout
.last_restore_gpr
= -1;
7203 /* Save / Restore from gpr i to j. */
7204 cfun_frame_layout
.first_save_gpr
= i
;
7205 cfun_frame_layout
.first_restore_gpr
= i
;
7206 cfun_frame_layout
.last_save_gpr
= j
;
7207 cfun_frame_layout
.last_restore_gpr
= j
;
7213 /* Varargs functions need to save gprs 2 to 6. */
7214 if (cfun
->va_list_gpr_size
7215 && crtl
->args
.info
.gprs
< GP_ARG_NUM_REG
)
7217 int min_gpr
= crtl
->args
.info
.gprs
;
7218 int max_gpr
= min_gpr
+ cfun
->va_list_gpr_size
;
7219 if (max_gpr
> GP_ARG_NUM_REG
)
7220 max_gpr
= GP_ARG_NUM_REG
;
7222 if (cfun_frame_layout
.first_save_gpr
== -1
7223 || cfun_frame_layout
.first_save_gpr
> 2 + min_gpr
)
7225 cfun_frame_layout
.first_save_gpr
= 2 + min_gpr
;
7226 cfun_frame_layout
.first_save_gpr_slot
= 2 + min_gpr
;
7229 if (cfun_frame_layout
.last_save_gpr
== -1
7230 || cfun_frame_layout
.last_save_gpr
< 2 + max_gpr
- 1)
7232 cfun_frame_layout
.last_save_gpr
= 2 + max_gpr
- 1;
7233 cfun_frame_layout
.last_save_gpr_slot
= 2 + max_gpr
- 1;
7237 /* Mark f0, f2 for 31 bit and f0-f4 for 64 bit to be saved. */
7238 if (TARGET_HARD_FLOAT
&& cfun
->va_list_fpr_size
7239 && crtl
->args
.info
.fprs
< FP_ARG_NUM_REG
)
7241 int min_fpr
= crtl
->args
.info
.fprs
;
7242 int max_fpr
= min_fpr
+ cfun
->va_list_fpr_size
;
7243 if (max_fpr
> FP_ARG_NUM_REG
)
7244 max_fpr
= FP_ARG_NUM_REG
;
7246 /* ??? This is currently required to ensure proper location
7247 of the fpr save slots within the va_list save area. */
7248 if (TARGET_PACKED_STACK
)
7251 for (i
= min_fpr
; i
< max_fpr
; i
++)
7252 cfun_set_fpr_bit (i
);
7257 for (i
= 2; i
< 4; i
++)
7258 if (df_regs_ever_live_p (i
+ 16) && !global_regs
[i
+ 16])
7259 cfun_set_fpr_bit (i
);
7262 /* Fill cfun->machine with info about frame of current function. */
7265 s390_frame_info (void)
7269 cfun_frame_layout
.frame_size
= get_frame_size ();
7270 if (!TARGET_64BIT
&& cfun_frame_layout
.frame_size
> 0x7fff0000)
7271 fatal_error ("total size of local variables exceeds architecture limit");
7273 if (!TARGET_PACKED_STACK
)
7275 cfun_frame_layout
.backchain_offset
= 0;
7276 cfun_frame_layout
.f0_offset
= 16 * UNITS_PER_LONG
;
7277 cfun_frame_layout
.f4_offset
= cfun_frame_layout
.f0_offset
+ 2 * 8;
7278 cfun_frame_layout
.f8_offset
= -cfun_frame_layout
.high_fprs
* 8;
7279 cfun_frame_layout
.gprs_offset
= (cfun_frame_layout
.first_save_gpr_slot
7282 else if (TARGET_BACKCHAIN
) /* kernel stack layout */
7284 cfun_frame_layout
.backchain_offset
= (STACK_POINTER_OFFSET
7286 cfun_frame_layout
.gprs_offset
7287 = (cfun_frame_layout
.backchain_offset
7288 - (STACK_POINTER_REGNUM
- cfun_frame_layout
.first_save_gpr_slot
+ 1)
7293 cfun_frame_layout
.f4_offset
7294 = (cfun_frame_layout
.gprs_offset
7295 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7297 cfun_frame_layout
.f0_offset
7298 = (cfun_frame_layout
.f4_offset
7299 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7303 /* On 31 bit we have to care about alignment of the
7304 floating point regs to provide fastest access. */
7305 cfun_frame_layout
.f0_offset
7306 = ((cfun_frame_layout
.gprs_offset
7307 & ~(STACK_BOUNDARY
/ BITS_PER_UNIT
- 1))
7308 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7310 cfun_frame_layout
.f4_offset
7311 = (cfun_frame_layout
.f0_offset
7312 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7315 else /* no backchain */
7317 cfun_frame_layout
.f4_offset
7318 = (STACK_POINTER_OFFSET
7319 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7321 cfun_frame_layout
.f0_offset
7322 = (cfun_frame_layout
.f4_offset
7323 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7325 cfun_frame_layout
.gprs_offset
7326 = cfun_frame_layout
.f0_offset
- cfun_gprs_save_area_size
;
7329 if (current_function_is_leaf
7330 && !TARGET_TPF_PROFILING
7331 && cfun_frame_layout
.frame_size
== 0
7332 && !cfun_save_high_fprs_p
7333 && !cfun
->calls_alloca
7337 if (!TARGET_PACKED_STACK
)
7338 cfun_frame_layout
.frame_size
+= (STACK_POINTER_OFFSET
7339 + crtl
->outgoing_args_size
7340 + cfun_frame_layout
.high_fprs
* 8);
7343 if (TARGET_BACKCHAIN
)
7344 cfun_frame_layout
.frame_size
+= UNITS_PER_LONG
;
7346 /* No alignment trouble here because f8-f15 are only saved under
7348 cfun_frame_layout
.f8_offset
= (MIN (MIN (cfun_frame_layout
.f0_offset
,
7349 cfun_frame_layout
.f4_offset
),
7350 cfun_frame_layout
.gprs_offset
)
7351 - cfun_frame_layout
.high_fprs
* 8);
7353 cfun_frame_layout
.frame_size
+= cfun_frame_layout
.high_fprs
* 8;
7355 for (i
= 0; i
< 8; i
++)
7356 if (cfun_fpr_bit_p (i
))
7357 cfun_frame_layout
.frame_size
+= 8;
7359 cfun_frame_layout
.frame_size
+= cfun_gprs_save_area_size
;
7361 /* If under 31 bit an odd number of gprs has to be saved we have to adjust
7362 the frame size to sustain 8 byte alignment of stack frames. */
7363 cfun_frame_layout
.frame_size
= ((cfun_frame_layout
.frame_size
+
7364 STACK_BOUNDARY
/ BITS_PER_UNIT
- 1)
7365 & ~(STACK_BOUNDARY
/ BITS_PER_UNIT
- 1));
7367 cfun_frame_layout
.frame_size
+= crtl
->outgoing_args_size
;
7371 /* Generate frame layout. Fills in register and frame data for the current
7372 function in cfun->machine. This routine can be called multiple times;
7373 it will re-do the complete frame layout every time. */
7376 s390_init_frame_layout (void)
7378 HOST_WIDE_INT frame_size
;
7380 int clobbered_regs
[16];
7382 /* On S/390 machines, we may need to perform branch splitting, which
7383 will require both base and return address register. We have no
7384 choice but to assume we're going to need them until right at the
7385 end of the machine dependent reorg phase. */
7386 if (!TARGET_CPU_ZARCH
)
7387 cfun
->machine
->split_branches_pending_p
= true;
7391 frame_size
= cfun_frame_layout
.frame_size
;
7393 /* Try to predict whether we'll need the base register. */
7394 base_used
= cfun
->machine
->split_branches_pending_p
7395 || crtl
->uses_const_pool
7396 || (!DISP_IN_RANGE (frame_size
)
7397 && !CONST_OK_FOR_K (frame_size
));
7399 /* Decide which register to use as literal pool base. In small
7400 leaf functions, try to use an unused call-clobbered register
7401 as base register to avoid save/restore overhead. */
7403 cfun
->machine
->base_reg
= NULL_RTX
;
7404 else if (current_function_is_leaf
&& !df_regs_ever_live_p (5))
7405 cfun
->machine
->base_reg
= gen_rtx_REG (Pmode
, 5);
7407 cfun
->machine
->base_reg
= gen_rtx_REG (Pmode
, BASE_REGNUM
);
7409 s390_register_info (clobbered_regs
);
7412 while (frame_size
!= cfun_frame_layout
.frame_size
);
7415 /* Update frame layout. Recompute actual register save data based on
7416 current info and update regs_ever_live for the special registers.
7417 May be called multiple times, but may never cause *more* registers
7418 to be saved than s390_init_frame_layout allocated room for. */
7421 s390_update_frame_layout (void)
7423 int clobbered_regs
[16];
7425 s390_register_info (clobbered_regs
);
7427 df_set_regs_ever_live (BASE_REGNUM
,
7428 clobbered_regs
[BASE_REGNUM
] ? true : false);
7429 df_set_regs_ever_live (RETURN_REGNUM
,
7430 clobbered_regs
[RETURN_REGNUM
] ? true : false);
7431 df_set_regs_ever_live (STACK_POINTER_REGNUM
,
7432 clobbered_regs
[STACK_POINTER_REGNUM
] ? true : false);
7434 if (cfun
->machine
->base_reg
)
7435 df_set_regs_ever_live (REGNO (cfun
->machine
->base_reg
), true);
7438 /* Return true if it is legal to put a value with MODE into REGNO. */
7441 s390_hard_regno_mode_ok (unsigned int regno
, enum machine_mode mode
)
7443 switch (REGNO_REG_CLASS (regno
))
7446 if (REGNO_PAIR_OK (regno
, mode
))
7448 if (mode
== SImode
|| mode
== DImode
)
7451 if (FLOAT_MODE_P (mode
) && GET_MODE_CLASS (mode
) != MODE_VECTOR_FLOAT
)
7456 if (FRAME_REGNO_P (regno
) && mode
== Pmode
)
7461 if (REGNO_PAIR_OK (regno
, mode
))
7464 || (mode
!= TFmode
&& mode
!= TCmode
&& mode
!= TDmode
))
7469 if (GET_MODE_CLASS (mode
) == MODE_CC
)
7473 if (REGNO_PAIR_OK (regno
, mode
))
7475 if (mode
== SImode
|| mode
== Pmode
)
7486 /* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */
7489 s390_hard_regno_rename_ok (unsigned int old_reg
, unsigned int new_reg
)
7491 /* Once we've decided upon a register to use as base register, it must
7492 no longer be used for any other purpose. */
7493 if (cfun
->machine
->base_reg
)
7494 if (REGNO (cfun
->machine
->base_reg
) == old_reg
7495 || REGNO (cfun
->machine
->base_reg
) == new_reg
)
7501 /* Maximum number of registers to represent a value of mode MODE
7502 in a register of class RCLASS. */
7505 s390_class_max_nregs (enum reg_class rclass
, enum machine_mode mode
)
7510 if (GET_MODE_CLASS (mode
) == MODE_COMPLEX_FLOAT
)
7511 return 2 * ((GET_MODE_SIZE (mode
) / 2 + 8 - 1) / 8);
7513 return (GET_MODE_SIZE (mode
) + 8 - 1) / 8;
7515 return (GET_MODE_SIZE (mode
) + 4 - 1) / 4;
7519 return (GET_MODE_SIZE (mode
) + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
7522 /* Return true if register FROM can be eliminated via register TO. */
7525 s390_can_eliminate (const int from
, const int to
)
7527 /* On zSeries machines, we have not marked the base register as fixed.
7528 Instead, we have an elimination rule BASE_REGNUM -> BASE_REGNUM.
7529 If a function requires the base register, we say here that this
7530 elimination cannot be performed. This will cause reload to free
7531 up the base register (as if it were fixed). On the other hand,
7532 if the current function does *not* require the base register, we
7533 say here the elimination succeeds, which in turn allows reload
7534 to allocate the base register for any other purpose. */
7535 if (from
== BASE_REGNUM
&& to
== BASE_REGNUM
)
7537 if (TARGET_CPU_ZARCH
)
7539 s390_init_frame_layout ();
7540 return cfun
->machine
->base_reg
== NULL_RTX
;
7546 /* Everything else must point into the stack frame. */
7547 gcc_assert (to
== STACK_POINTER_REGNUM
7548 || to
== HARD_FRAME_POINTER_REGNUM
);
7550 gcc_assert (from
== FRAME_POINTER_REGNUM
7551 || from
== ARG_POINTER_REGNUM
7552 || from
== RETURN_ADDRESS_POINTER_REGNUM
);
7554 /* Make sure we actually saved the return address. */
7555 if (from
== RETURN_ADDRESS_POINTER_REGNUM
)
7556 if (!crtl
->calls_eh_return
7558 && !cfun_frame_layout
.save_return_addr_p
)
7564 /* Return offset between register FROM and TO initially after prolog. */
7567 s390_initial_elimination_offset (int from
, int to
)
7569 HOST_WIDE_INT offset
;
7572 /* ??? Why are we called for non-eliminable pairs? */
7573 if (!s390_can_eliminate (from
, to
))
7578 case FRAME_POINTER_REGNUM
:
7579 offset
= (get_frame_size()
7580 + STACK_POINTER_OFFSET
7581 + crtl
->outgoing_args_size
);
7584 case ARG_POINTER_REGNUM
:
7585 s390_init_frame_layout ();
7586 offset
= cfun_frame_layout
.frame_size
+ STACK_POINTER_OFFSET
;
7589 case RETURN_ADDRESS_POINTER_REGNUM
:
7590 s390_init_frame_layout ();
7591 index
= RETURN_REGNUM
- cfun_frame_layout
.first_save_gpr_slot
;
7592 gcc_assert (index
>= 0);
7593 offset
= cfun_frame_layout
.frame_size
+ cfun_frame_layout
.gprs_offset
;
7594 offset
+= index
* UNITS_PER_LONG
;
7608 /* Emit insn to save fpr REGNUM at offset OFFSET relative
7609 to register BASE. Return generated insn. */
7612 save_fpr (rtx base
, int offset
, int regnum
)
7615 addr
= gen_rtx_MEM (DFmode
, plus_constant (base
, offset
));
7617 if (regnum
>= 16 && regnum
<= (16 + FP_ARG_NUM_REG
))
7618 set_mem_alias_set (addr
, get_varargs_alias_set ());
7620 set_mem_alias_set (addr
, get_frame_alias_set ());
7622 return emit_move_insn (addr
, gen_rtx_REG (DFmode
, regnum
));
7625 /* Emit insn to restore fpr REGNUM from offset OFFSET relative
7626 to register BASE. Return generated insn. */
7629 restore_fpr (rtx base
, int offset
, int regnum
)
7632 addr
= gen_rtx_MEM (DFmode
, plus_constant (base
, offset
));
7633 set_mem_alias_set (addr
, get_frame_alias_set ());
7635 return emit_move_insn (gen_rtx_REG (DFmode
, regnum
), addr
);
7638 /* Return true if REGNO is a global register, but not one
7639 of the special ones that need to be saved/restored in anyway. */
7642 global_not_special_regno_p (int regno
)
7644 return (global_regs
[regno
]
7645 /* These registers are special and need to be
7646 restored in any case. */
7647 && !(regno
== STACK_POINTER_REGNUM
7648 || regno
== RETURN_REGNUM
7649 || regno
== BASE_REGNUM
7650 || (flag_pic
&& regno
== (int)PIC_OFFSET_TABLE_REGNUM
)));
7653 /* Generate insn to save registers FIRST to LAST into
7654 the register save area located at offset OFFSET
7655 relative to register BASE. */
7658 save_gprs (rtx base
, int offset
, int first
, int last
)
7660 rtx addr
, insn
, note
;
7663 addr
= plus_constant (base
, offset
);
7664 addr
= gen_rtx_MEM (Pmode
, addr
);
7666 set_mem_alias_set (addr
, get_frame_alias_set ());
7668 /* Special-case single register. */
7672 insn
= gen_movdi (addr
, gen_rtx_REG (Pmode
, first
));
7674 insn
= gen_movsi (addr
, gen_rtx_REG (Pmode
, first
));
7676 if (!global_not_special_regno_p (first
))
7677 RTX_FRAME_RELATED_P (insn
) = 1;
7682 insn
= gen_store_multiple (addr
,
7683 gen_rtx_REG (Pmode
, first
),
7684 GEN_INT (last
- first
+ 1));
7686 if (first
<= 6 && cfun
->stdarg
)
7687 for (i
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
7689 rtx mem
= XEXP (XVECEXP (PATTERN (insn
), 0, i
), 0);
7692 set_mem_alias_set (mem
, get_varargs_alias_set ());
7695 /* We need to set the FRAME_RELATED flag on all SETs
7696 inside the store-multiple pattern.
7698 However, we must not emit DWARF records for registers 2..5
7699 if they are stored for use by variable arguments ...
7701 ??? Unfortunately, it is not enough to simply not the
7702 FRAME_RELATED flags for those SETs, because the first SET
7703 of the PARALLEL is always treated as if it had the flag
7704 set, even if it does not. Therefore we emit a new pattern
7705 without those registers as REG_FRAME_RELATED_EXPR note. */
7707 if (first
>= 6 && !global_not_special_regno_p (first
))
7709 rtx pat
= PATTERN (insn
);
7711 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
7712 if (GET_CODE (XVECEXP (pat
, 0, i
)) == SET
7713 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (pat
,
7715 RTX_FRAME_RELATED_P (XVECEXP (pat
, 0, i
)) = 1;
7717 RTX_FRAME_RELATED_P (insn
) = 1;
7723 for (start
= first
>= 6 ? first
: 6; start
<= last
; start
++)
7724 if (!global_not_special_regno_p (start
))
7730 addr
= plus_constant (base
, offset
+ (start
- first
) * UNITS_PER_LONG
);
7731 note
= gen_store_multiple (gen_rtx_MEM (Pmode
, addr
),
7732 gen_rtx_REG (Pmode
, start
),
7733 GEN_INT (last
- start
+ 1));
7734 note
= PATTERN (note
);
7736 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, note
);
7738 for (i
= 0; i
< XVECLEN (note
, 0); i
++)
7739 if (GET_CODE (XVECEXP (note
, 0, i
)) == SET
7740 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (note
,
7742 RTX_FRAME_RELATED_P (XVECEXP (note
, 0, i
)) = 1;
7744 RTX_FRAME_RELATED_P (insn
) = 1;
7750 /* Generate insn to restore registers FIRST to LAST from
7751 the register save area located at offset OFFSET
7752 relative to register BASE. */
7755 restore_gprs (rtx base
, int offset
, int first
, int last
)
7759 addr
= plus_constant (base
, offset
);
7760 addr
= gen_rtx_MEM (Pmode
, addr
);
7761 set_mem_alias_set (addr
, get_frame_alias_set ());
7763 /* Special-case single register. */
7767 insn
= gen_movdi (gen_rtx_REG (Pmode
, first
), addr
);
7769 insn
= gen_movsi (gen_rtx_REG (Pmode
, first
), addr
);
7774 insn
= gen_load_multiple (gen_rtx_REG (Pmode
, first
),
7776 GEN_INT (last
- first
+ 1));
7780 /* Return insn sequence to load the GOT register. */
7782 static GTY(()) rtx got_symbol
;
7784 s390_load_got (void)
7790 got_symbol
= gen_rtx_SYMBOL_REF (Pmode
, "_GLOBAL_OFFSET_TABLE_");
7791 SYMBOL_REF_FLAGS (got_symbol
) = SYMBOL_FLAG_LOCAL
;
7796 if (TARGET_CPU_ZARCH
)
7798 emit_move_insn (pic_offset_table_rtx
, got_symbol
);
7804 offset
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, got_symbol
),
7805 UNSPEC_LTREL_OFFSET
);
7806 offset
= gen_rtx_CONST (Pmode
, offset
);
7807 offset
= force_const_mem (Pmode
, offset
);
7809 emit_move_insn (pic_offset_table_rtx
, offset
);
7811 offset
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, XEXP (offset
, 0)),
7813 offset
= gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
, offset
);
7815 emit_move_insn (pic_offset_table_rtx
, offset
);
7818 insns
= get_insns ();
7823 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
7824 and the change to the stack pointer. */
7827 s390_emit_stack_tie (void)
7829 rtx mem
= gen_frame_mem (BLKmode
,
7830 gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
));
7832 emit_insn (gen_stack_tie (mem
));
7835 /* Expand the prologue into a bunch of separate insns. */
7838 s390_emit_prologue (void)
7846 /* Complete frame layout. */
7848 s390_update_frame_layout ();
7850 /* Annotate all constant pool references to let the scheduler know
7851 they implicitly use the base register. */
7853 push_topmost_sequence ();
7855 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
7858 annotate_constant_pool_refs (&PATTERN (insn
));
7859 df_insn_rescan (insn
);
7862 pop_topmost_sequence ();
7864 /* Choose best register to use for temp use within prologue.
7865 See below for why TPF must use the register 1. */
7867 if (!has_hard_reg_initial_val (Pmode
, RETURN_REGNUM
)
7868 && !current_function_is_leaf
7869 && !TARGET_TPF_PROFILING
)
7870 temp_reg
= gen_rtx_REG (Pmode
, RETURN_REGNUM
);
7872 temp_reg
= gen_rtx_REG (Pmode
, 1);
7874 /* Save call saved gprs. */
7875 if (cfun_frame_layout
.first_save_gpr
!= -1)
7877 insn
= save_gprs (stack_pointer_rtx
,
7878 cfun_frame_layout
.gprs_offset
+
7879 UNITS_PER_LONG
* (cfun_frame_layout
.first_save_gpr
7880 - cfun_frame_layout
.first_save_gpr_slot
),
7881 cfun_frame_layout
.first_save_gpr
,
7882 cfun_frame_layout
.last_save_gpr
);
7886 /* Dummy insn to mark literal pool slot. */
7888 if (cfun
->machine
->base_reg
)
7889 emit_insn (gen_main_pool (cfun
->machine
->base_reg
));
7891 offset
= cfun_frame_layout
.f0_offset
;
7893 /* Save f0 and f2. */
7894 for (i
= 0; i
< 2; i
++)
7896 if (cfun_fpr_bit_p (i
))
7898 save_fpr (stack_pointer_rtx
, offset
, i
+ 16);
7901 else if (!TARGET_PACKED_STACK
)
7905 /* Save f4 and f6. */
7906 offset
= cfun_frame_layout
.f4_offset
;
7907 for (i
= 2; i
< 4; i
++)
7909 if (cfun_fpr_bit_p (i
))
7911 insn
= save_fpr (stack_pointer_rtx
, offset
, i
+ 16);
7914 /* If f4 and f6 are call clobbered they are saved due to stdargs and
7915 therefore are not frame related. */
7916 if (!call_really_used_regs
[i
+ 16])
7917 RTX_FRAME_RELATED_P (insn
) = 1;
7919 else if (!TARGET_PACKED_STACK
)
7923 if (TARGET_PACKED_STACK
7924 && cfun_save_high_fprs_p
7925 && cfun_frame_layout
.f8_offset
+ cfun_frame_layout
.high_fprs
* 8 > 0)
7927 offset
= (cfun_frame_layout
.f8_offset
7928 + (cfun_frame_layout
.high_fprs
- 1) * 8);
7930 for (i
= 15; i
> 7 && offset
>= 0; i
--)
7931 if (cfun_fpr_bit_p (i
))
7933 insn
= save_fpr (stack_pointer_rtx
, offset
, i
+ 16);
7935 RTX_FRAME_RELATED_P (insn
) = 1;
7938 if (offset
>= cfun_frame_layout
.f8_offset
)
7942 if (!TARGET_PACKED_STACK
)
7943 next_fpr
= cfun_save_high_fprs_p
? 31 : 0;
7945 /* Decrement stack pointer. */
7947 if (cfun_frame_layout
.frame_size
> 0)
7949 rtx frame_off
= GEN_INT (-cfun_frame_layout
.frame_size
);
7952 if (s390_stack_size
)
7954 HOST_WIDE_INT stack_guard
;
7956 if (s390_stack_guard
)
7957 stack_guard
= s390_stack_guard
;
7960 /* If no value for stack guard is provided the smallest power of 2
7961 larger than the current frame size is chosen. */
7963 while (stack_guard
< cfun_frame_layout
.frame_size
)
7967 if (cfun_frame_layout
.frame_size
>= s390_stack_size
)
7969 warning (0, "frame size of function %qs is "
7970 HOST_WIDE_INT_PRINT_DEC
7971 " bytes exceeding user provided stack limit of "
7972 HOST_WIDE_INT_PRINT_DEC
" bytes. "
7973 "An unconditional trap is added.",
7974 current_function_name(), cfun_frame_layout
.frame_size
,
7976 emit_insn (gen_trap ());
7980 /* stack_guard has to be smaller than s390_stack_size.
7981 Otherwise we would emit an AND with zero which would
7982 not match the test under mask pattern. */
7983 if (stack_guard
>= s390_stack_size
)
7985 warning (0, "frame size of function %qs is "
7986 HOST_WIDE_INT_PRINT_DEC
7987 " bytes which is more than half the stack size. "
7988 "The dynamic check would not be reliable. "
7989 "No check emitted for this function.",
7990 current_function_name(),
7991 cfun_frame_layout
.frame_size
);
7995 HOST_WIDE_INT stack_check_mask
= ((s390_stack_size
- 1)
7996 & ~(stack_guard
- 1));
7998 rtx t
= gen_rtx_AND (Pmode
, stack_pointer_rtx
,
7999 GEN_INT (stack_check_mask
));
8001 emit_insn (gen_ctrapdi4 (gen_rtx_EQ (VOIDmode
,
8003 t
, const0_rtx
, const0_rtx
));
8005 emit_insn (gen_ctrapsi4 (gen_rtx_EQ (VOIDmode
,
8007 t
, const0_rtx
, const0_rtx
));
8012 if (s390_warn_framesize
> 0
8013 && cfun_frame_layout
.frame_size
>= s390_warn_framesize
)
8014 warning (0, "frame size of %qs is " HOST_WIDE_INT_PRINT_DEC
" bytes",
8015 current_function_name (), cfun_frame_layout
.frame_size
);
8017 if (s390_warn_dynamicstack_p
&& cfun
->calls_alloca
)
8018 warning (0, "%qs uses dynamic stack allocation", current_function_name ());
8020 /* Save incoming stack pointer into temp reg. */
8021 if (TARGET_BACKCHAIN
|| next_fpr
)
8022 insn
= emit_insn (gen_move_insn (temp_reg
, stack_pointer_rtx
));
8024 /* Subtract frame size from stack pointer. */
8026 if (DISP_IN_RANGE (INTVAL (frame_off
)))
8028 insn
= gen_rtx_SET (VOIDmode
, stack_pointer_rtx
,
8029 gen_rtx_PLUS (Pmode
, stack_pointer_rtx
,
8031 insn
= emit_insn (insn
);
8035 if (!CONST_OK_FOR_K (INTVAL (frame_off
)))
8036 frame_off
= force_const_mem (Pmode
, frame_off
);
8038 insn
= emit_insn (gen_add2_insn (stack_pointer_rtx
, frame_off
));
8039 annotate_constant_pool_refs (&PATTERN (insn
));
8042 RTX_FRAME_RELATED_P (insn
) = 1;
8043 real_frame_off
= GEN_INT (-cfun_frame_layout
.frame_size
);
8044 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
8045 gen_rtx_SET (VOIDmode
, stack_pointer_rtx
,
8046 gen_rtx_PLUS (Pmode
, stack_pointer_rtx
,
8049 /* Set backchain. */
8051 if (TARGET_BACKCHAIN
)
8053 if (cfun_frame_layout
.backchain_offset
)
8054 addr
= gen_rtx_MEM (Pmode
,
8055 plus_constant (stack_pointer_rtx
,
8056 cfun_frame_layout
.backchain_offset
));
8058 addr
= gen_rtx_MEM (Pmode
, stack_pointer_rtx
);
8059 set_mem_alias_set (addr
, get_frame_alias_set ());
8060 insn
= emit_insn (gen_move_insn (addr
, temp_reg
));
8063 /* If we support non-call exceptions (e.g. for Java),
8064 we need to make sure the backchain pointer is set up
8065 before any possibly trapping memory access. */
8066 if (TARGET_BACKCHAIN
&& cfun
->can_throw_non_call_exceptions
)
8068 addr
= gen_rtx_MEM (BLKmode
, gen_rtx_SCRATCH (VOIDmode
));
8069 emit_clobber (addr
);
8073 /* Save fprs 8 - 15 (64 bit ABI). */
8075 if (cfun_save_high_fprs_p
&& next_fpr
)
8077 /* If the stack might be accessed through a different register
8078 we have to make sure that the stack pointer decrement is not
8079 moved below the use of the stack slots. */
8080 s390_emit_stack_tie ();
8082 insn
= emit_insn (gen_add2_insn (temp_reg
,
8083 GEN_INT (cfun_frame_layout
.f8_offset
)));
8087 for (i
= 24; i
<= next_fpr
; i
++)
8088 if (cfun_fpr_bit_p (i
- 16))
8090 rtx addr
= plus_constant (stack_pointer_rtx
,
8091 cfun_frame_layout
.frame_size
8092 + cfun_frame_layout
.f8_offset
8095 insn
= save_fpr (temp_reg
, offset
, i
);
8097 RTX_FRAME_RELATED_P (insn
) = 1;
8098 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
8099 gen_rtx_SET (VOIDmode
,
8100 gen_rtx_MEM (DFmode
, addr
),
8101 gen_rtx_REG (DFmode
, i
)));
8105 /* Set frame pointer, if needed. */
8107 if (frame_pointer_needed
)
8109 insn
= emit_move_insn (hard_frame_pointer_rtx
, stack_pointer_rtx
);
8110 RTX_FRAME_RELATED_P (insn
) = 1;
8113 /* Set up got pointer, if needed. */
8115 if (flag_pic
&& df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM
))
8117 rtx insns
= s390_load_got ();
8119 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
8120 annotate_constant_pool_refs (&PATTERN (insn
));
8125 if (TARGET_TPF_PROFILING
)
8127 /* Generate a BAS instruction to serve as a function
8128 entry intercept to facilitate the use of tracing
8129 algorithms located at the branch target. */
8130 emit_insn (gen_prologue_tpf ());
8132 /* Emit a blockage here so that all code
8133 lies between the profiling mechanisms. */
8134 emit_insn (gen_blockage ());
8138 /* Expand the epilogue into a bunch of separate insns. */
8141 s390_emit_epilogue (bool sibcall
)
8143 rtx frame_pointer
, return_reg
, cfa_restores
= NULL_RTX
;
8144 int area_bottom
, area_top
, offset
= 0;
8149 if (TARGET_TPF_PROFILING
)
8152 /* Generate a BAS instruction to serve as a function
8153 entry intercept to facilitate the use of tracing
8154 algorithms located at the branch target. */
8156 /* Emit a blockage here so that all code
8157 lies between the profiling mechanisms. */
8158 emit_insn (gen_blockage ());
8160 emit_insn (gen_epilogue_tpf ());
8163 /* Check whether to use frame or stack pointer for restore. */
8165 frame_pointer
= (frame_pointer_needed
8166 ? hard_frame_pointer_rtx
: stack_pointer_rtx
);
8168 s390_frame_area (&area_bottom
, &area_top
);
8170 /* Check whether we can access the register save area.
8171 If not, increment the frame pointer as required. */
8173 if (area_top
<= area_bottom
)
8175 /* Nothing to restore. */
8177 else if (DISP_IN_RANGE (cfun_frame_layout
.frame_size
+ area_bottom
)
8178 && DISP_IN_RANGE (cfun_frame_layout
.frame_size
+ area_top
- 1))
8180 /* Area is in range. */
8181 offset
= cfun_frame_layout
.frame_size
;
8185 rtx insn
, frame_off
, cfa
;
8187 offset
= area_bottom
< 0 ? -area_bottom
: 0;
8188 frame_off
= GEN_INT (cfun_frame_layout
.frame_size
- offset
);
8190 cfa
= gen_rtx_SET (VOIDmode
, frame_pointer
,
8191 gen_rtx_PLUS (Pmode
, frame_pointer
, frame_off
));
8192 if (DISP_IN_RANGE (INTVAL (frame_off
)))
8194 insn
= gen_rtx_SET (VOIDmode
, frame_pointer
,
8195 gen_rtx_PLUS (Pmode
, frame_pointer
, frame_off
));
8196 insn
= emit_insn (insn
);
8200 if (!CONST_OK_FOR_K (INTVAL (frame_off
)))
8201 frame_off
= force_const_mem (Pmode
, frame_off
);
8203 insn
= emit_insn (gen_add2_insn (frame_pointer
, frame_off
));
8204 annotate_constant_pool_refs (&PATTERN (insn
));
8206 add_reg_note (insn
, REG_CFA_ADJUST_CFA
, cfa
);
8207 RTX_FRAME_RELATED_P (insn
) = 1;
8210 /* Restore call saved fprs. */
8214 if (cfun_save_high_fprs_p
)
8216 next_offset
= cfun_frame_layout
.f8_offset
;
8217 for (i
= 24; i
< 32; i
++)
8219 if (cfun_fpr_bit_p (i
- 16))
8221 restore_fpr (frame_pointer
,
8222 offset
+ next_offset
, i
);
8224 = alloc_reg_note (REG_CFA_RESTORE
,
8225 gen_rtx_REG (DFmode
, i
), cfa_restores
);
8234 next_offset
= cfun_frame_layout
.f4_offset
;
8235 for (i
= 18; i
< 20; i
++)
8237 if (cfun_fpr_bit_p (i
- 16))
8239 restore_fpr (frame_pointer
,
8240 offset
+ next_offset
, i
);
8242 = alloc_reg_note (REG_CFA_RESTORE
,
8243 gen_rtx_REG (DFmode
, i
), cfa_restores
);
8246 else if (!TARGET_PACKED_STACK
)
8252 /* Return register. */
8254 return_reg
= gen_rtx_REG (Pmode
, RETURN_REGNUM
);
8256 /* Restore call saved gprs. */
8258 if (cfun_frame_layout
.first_restore_gpr
!= -1)
8263 /* Check for global register and save them
8264 to stack location from where they get restored. */
8266 for (i
= cfun_frame_layout
.first_restore_gpr
;
8267 i
<= cfun_frame_layout
.last_restore_gpr
;
8270 if (global_not_special_regno_p (i
))
8272 addr
= plus_constant (frame_pointer
,
8273 offset
+ cfun_frame_layout
.gprs_offset
8274 + (i
- cfun_frame_layout
.first_save_gpr_slot
)
8276 addr
= gen_rtx_MEM (Pmode
, addr
);
8277 set_mem_alias_set (addr
, get_frame_alias_set ());
8278 emit_move_insn (addr
, gen_rtx_REG (Pmode
, i
));
8282 = alloc_reg_note (REG_CFA_RESTORE
,
8283 gen_rtx_REG (Pmode
, i
), cfa_restores
);
8288 /* Fetch return address from stack before load multiple,
8289 this will do good for scheduling. */
8291 if (cfun_frame_layout
.save_return_addr_p
8292 || (cfun_frame_layout
.first_restore_gpr
< BASE_REGNUM
8293 && cfun_frame_layout
.last_restore_gpr
> RETURN_REGNUM
))
8295 int return_regnum
= find_unused_clobbered_reg();
8298 return_reg
= gen_rtx_REG (Pmode
, return_regnum
);
8300 addr
= plus_constant (frame_pointer
,
8301 offset
+ cfun_frame_layout
.gprs_offset
8303 - cfun_frame_layout
.first_save_gpr_slot
)
8305 addr
= gen_rtx_MEM (Pmode
, addr
);
8306 set_mem_alias_set (addr
, get_frame_alias_set ());
8307 emit_move_insn (return_reg
, addr
);
8311 insn
= restore_gprs (frame_pointer
,
8312 offset
+ cfun_frame_layout
.gprs_offset
8313 + (cfun_frame_layout
.first_restore_gpr
8314 - cfun_frame_layout
.first_save_gpr_slot
)
8316 cfun_frame_layout
.first_restore_gpr
,
8317 cfun_frame_layout
.last_restore_gpr
);
8318 insn
= emit_insn (insn
);
8319 REG_NOTES (insn
) = cfa_restores
;
8320 add_reg_note (insn
, REG_CFA_DEF_CFA
,
8321 plus_constant (stack_pointer_rtx
, STACK_POINTER_OFFSET
));
8322 RTX_FRAME_RELATED_P (insn
) = 1;
8328 /* Return to caller. */
8330 p
= rtvec_alloc (2);
8332 RTVEC_ELT (p
, 0) = gen_rtx_RETURN (VOIDmode
);
8333 RTVEC_ELT (p
, 1) = gen_rtx_USE (VOIDmode
, return_reg
);
8334 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
8339 /* Return the size in bytes of a function argument of
8340 type TYPE and/or mode MODE. At least one of TYPE or
8341 MODE must be specified. */
8344 s390_function_arg_size (enum machine_mode mode
, const_tree type
)
8347 return int_size_in_bytes (type
);
8349 /* No type info available for some library calls ... */
8350 if (mode
!= BLKmode
)
8351 return GET_MODE_SIZE (mode
);
8353 /* If we have neither type nor mode, abort */
8357 /* Return true if a function argument of type TYPE and mode MODE
8358 is to be passed in a floating-point register, if available. */
8361 s390_function_arg_float (enum machine_mode mode
, const_tree type
)
8363 int size
= s390_function_arg_size (mode
, type
);
8367 /* Soft-float changes the ABI: no floating-point registers are used. */
8368 if (TARGET_SOFT_FLOAT
)
8371 /* No type info available for some library calls ... */
8373 return mode
== SFmode
|| mode
== DFmode
|| mode
== SDmode
|| mode
== DDmode
;
8375 /* The ABI says that record types with a single member are treated
8376 just like that member would be. */
8377 while (TREE_CODE (type
) == RECORD_TYPE
)
8379 tree field
, single
= NULL_TREE
;
8381 for (field
= TYPE_FIELDS (type
); field
; field
= DECL_CHAIN (field
))
8383 if (TREE_CODE (field
) != FIELD_DECL
)
8386 if (single
== NULL_TREE
)
8387 single
= TREE_TYPE (field
);
8392 if (single
== NULL_TREE
)
8398 return TREE_CODE (type
) == REAL_TYPE
;
8401 /* Return true if a function argument of type TYPE and mode MODE
8402 is to be passed in an integer register, or a pair of integer
8403 registers, if available. */
8406 s390_function_arg_integer (enum machine_mode mode
, const_tree type
)
8408 int size
= s390_function_arg_size (mode
, type
);
8412 /* No type info available for some library calls ... */
8414 return GET_MODE_CLASS (mode
) == MODE_INT
8415 || (TARGET_SOFT_FLOAT
&& SCALAR_FLOAT_MODE_P (mode
));
8417 /* We accept small integral (and similar) types. */
8418 if (INTEGRAL_TYPE_P (type
)
8419 || POINTER_TYPE_P (type
)
8420 || TREE_CODE (type
) == OFFSET_TYPE
8421 || (TARGET_SOFT_FLOAT
&& TREE_CODE (type
) == REAL_TYPE
))
8424 /* We also accept structs of size 1, 2, 4, 8 that are not
8425 passed in floating-point registers. */
8426 if (AGGREGATE_TYPE_P (type
)
8427 && exact_log2 (size
) >= 0
8428 && !s390_function_arg_float (mode
, type
))
8434 /* Return 1 if a function argument of type TYPE and mode MODE
8435 is to be passed by reference. The ABI specifies that only
8436 structures of size 1, 2, 4, or 8 bytes are passed by value,
8437 all other structures (and complex numbers) are passed by
8441 s390_pass_by_reference (CUMULATIVE_ARGS
*ca ATTRIBUTE_UNUSED
,
8442 enum machine_mode mode
, const_tree type
,
8443 bool named ATTRIBUTE_UNUSED
)
8445 int size
= s390_function_arg_size (mode
, type
);
8451 if (AGGREGATE_TYPE_P (type
) && exact_log2 (size
) < 0)
8454 if (TREE_CODE (type
) == COMPLEX_TYPE
8455 || TREE_CODE (type
) == VECTOR_TYPE
)
8462 /* Update the data in CUM to advance over an argument of mode MODE and
8463 data type TYPE. (TYPE is null for libcalls where that information
8464 may not be available.). The boolean NAMED specifies whether the
8465 argument is a named argument (as opposed to an unnamed argument
8466 matching an ellipsis). */
8469 s390_function_arg_advance (CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
8470 const_tree type
, bool named ATTRIBUTE_UNUSED
)
8472 if (s390_function_arg_float (mode
, type
))
8476 else if (s390_function_arg_integer (mode
, type
))
8478 int size
= s390_function_arg_size (mode
, type
);
8479 cum
->gprs
+= ((size
+ UNITS_PER_LONG
- 1) / UNITS_PER_LONG
);
8485 /* Define where to put the arguments to a function.
8486 Value is zero to push the argument on the stack,
8487 or a hard register in which to store the argument.
8489 MODE is the argument's machine mode.
8490 TYPE is the data type of the argument (as a tree).
8491 This is null for libcalls where that information may
8493 CUM is a variable of type CUMULATIVE_ARGS which gives info about
8494 the preceding args and about the function being called.
8495 NAMED is nonzero if this argument is a named parameter
8496 (otherwise it is an extra parameter matching an ellipsis).
8498 On S/390, we use general purpose registers 2 through 6 to
8499 pass integer, pointer, and certain structure arguments, and
8500 floating point registers 0 and 2 (0, 2, 4, and 6 on 64-bit)
8501 to pass floating point arguments. All remaining arguments
8502 are pushed to the stack. */
8505 s390_function_arg (CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
8506 const_tree type
, bool named ATTRIBUTE_UNUSED
)
8508 if (s390_function_arg_float (mode
, type
))
8510 if (cum
->fprs
+ 1 > FP_ARG_NUM_REG
)
8513 return gen_rtx_REG (mode
, cum
->fprs
+ 16);
8515 else if (s390_function_arg_integer (mode
, type
))
8517 int size
= s390_function_arg_size (mode
, type
);
8518 int n_gprs
= (size
+ UNITS_PER_LONG
- 1) / UNITS_PER_LONG
;
8520 if (cum
->gprs
+ n_gprs
> GP_ARG_NUM_REG
)
8522 else if (n_gprs
== 1 || UNITS_PER_WORD
== UNITS_PER_LONG
)
8523 return gen_rtx_REG (mode
, cum
->gprs
+ 2);
8524 else if (n_gprs
== 2)
8526 rtvec p
= rtvec_alloc (2);
8529 = gen_rtx_EXPR_LIST (SImode
, gen_rtx_REG (SImode
, cum
->gprs
+ 2),
8532 = gen_rtx_EXPR_LIST (SImode
, gen_rtx_REG (SImode
, cum
->gprs
+ 3),
8535 return gen_rtx_PARALLEL (mode
, p
);
8539 /* After the real arguments, expand_call calls us once again
8540 with a void_type_node type. Whatever we return here is
8541 passed as operand 2 to the call expanders.
8543 We don't need this feature ... */
8544 else if (type
== void_type_node
)
8550 /* Return true if return values of type TYPE should be returned
8551 in a memory buffer whose address is passed by the caller as
8552 hidden first argument. */
8555 s390_return_in_memory (const_tree type
, const_tree fundecl ATTRIBUTE_UNUSED
)
8557 /* We accept small integral (and similar) types. */
8558 if (INTEGRAL_TYPE_P (type
)
8559 || POINTER_TYPE_P (type
)
8560 || TREE_CODE (type
) == OFFSET_TYPE
8561 || TREE_CODE (type
) == REAL_TYPE
)
8562 return int_size_in_bytes (type
) > 8;
8564 /* Aggregates and similar constructs are always returned
8566 if (AGGREGATE_TYPE_P (type
)
8567 || TREE_CODE (type
) == COMPLEX_TYPE
8568 || TREE_CODE (type
) == VECTOR_TYPE
)
8571 /* ??? We get called on all sorts of random stuff from
8572 aggregate_value_p. We can't abort, but it's not clear
8573 what's safe to return. Pretend it's a struct I guess. */
8577 /* Function arguments and return values are promoted to word size. */
8579 static enum machine_mode
8580 s390_promote_function_mode (const_tree type
, enum machine_mode mode
,
8582 const_tree fntype ATTRIBUTE_UNUSED
,
8583 int for_return ATTRIBUTE_UNUSED
)
8585 if (INTEGRAL_MODE_P (mode
)
8586 && GET_MODE_SIZE (mode
) < UNITS_PER_LONG
)
8588 if (POINTER_TYPE_P (type
))
8589 *punsignedp
= POINTERS_EXTEND_UNSIGNED
;
8596 /* Define where to return a (scalar) value of type TYPE.
8597 If TYPE is null, define where to return a (scalar)
8598 value of mode MODE from a libcall. */
8601 s390_function_value (const_tree type
, const_tree fn
, enum machine_mode mode
)
8605 int unsignedp
= TYPE_UNSIGNED (type
);
8606 mode
= promote_function_mode (type
, TYPE_MODE (type
), &unsignedp
, fn
, 1);
8609 gcc_assert (GET_MODE_CLASS (mode
) == MODE_INT
|| SCALAR_FLOAT_MODE_P (mode
));
8610 gcc_assert (GET_MODE_SIZE (mode
) <= 8);
8612 if (TARGET_HARD_FLOAT
&& SCALAR_FLOAT_MODE_P (mode
))
8613 return gen_rtx_REG (mode
, 16);
8614 else if (GET_MODE_SIZE (mode
) <= UNITS_PER_LONG
8615 || UNITS_PER_LONG
== UNITS_PER_WORD
)
8616 return gen_rtx_REG (mode
, 2);
8617 else if (GET_MODE_SIZE (mode
) == 2 * UNITS_PER_LONG
)
8619 rtvec p
= rtvec_alloc (2);
8622 = gen_rtx_EXPR_LIST (SImode
, gen_rtx_REG (SImode
, 2), const0_rtx
);
8624 = gen_rtx_EXPR_LIST (SImode
, gen_rtx_REG (SImode
, 3), GEN_INT (4));
8626 return gen_rtx_PARALLEL (mode
, p
);
8633 /* Create and return the va_list datatype.
8635 On S/390, va_list is an array type equivalent to
8637 typedef struct __va_list_tag
8641 void *__overflow_arg_area;
8642 void *__reg_save_area;
8645 where __gpr and __fpr hold the number of general purpose
8646 or floating point arguments used up to now, respectively,
8647 __overflow_arg_area points to the stack location of the
8648 next argument passed on the stack, and __reg_save_area
8649 always points to the start of the register area in the
8650 call frame of the current function. The function prologue
8651 saves all registers used for argument passing into this
8652 area if the function uses variable arguments. */
8655 s390_build_builtin_va_list (void)
8657 tree f_gpr
, f_fpr
, f_ovf
, f_sav
, record
, type_decl
;
8659 record
= lang_hooks
.types
.make_type (RECORD_TYPE
);
8662 build_decl (BUILTINS_LOCATION
,
8663 TYPE_DECL
, get_identifier ("__va_list_tag"), record
);
8665 f_gpr
= build_decl (BUILTINS_LOCATION
,
8666 FIELD_DECL
, get_identifier ("__gpr"),
8667 long_integer_type_node
);
8668 f_fpr
= build_decl (BUILTINS_LOCATION
,
8669 FIELD_DECL
, get_identifier ("__fpr"),
8670 long_integer_type_node
);
8671 f_ovf
= build_decl (BUILTINS_LOCATION
,
8672 FIELD_DECL
, get_identifier ("__overflow_arg_area"),
8674 f_sav
= build_decl (BUILTINS_LOCATION
,
8675 FIELD_DECL
, get_identifier ("__reg_save_area"),
8678 va_list_gpr_counter_field
= f_gpr
;
8679 va_list_fpr_counter_field
= f_fpr
;
8681 DECL_FIELD_CONTEXT (f_gpr
) = record
;
8682 DECL_FIELD_CONTEXT (f_fpr
) = record
;
8683 DECL_FIELD_CONTEXT (f_ovf
) = record
;
8684 DECL_FIELD_CONTEXT (f_sav
) = record
;
8686 TREE_CHAIN (record
) = type_decl
;
8687 TYPE_NAME (record
) = type_decl
;
8688 TYPE_FIELDS (record
) = f_gpr
;
8689 DECL_CHAIN (f_gpr
) = f_fpr
;
8690 DECL_CHAIN (f_fpr
) = f_ovf
;
8691 DECL_CHAIN (f_ovf
) = f_sav
;
8693 layout_type (record
);
8695 /* The correct type is an array type of one element. */
8696 return build_array_type (record
, build_index_type (size_zero_node
));
8699 /* Implement va_start by filling the va_list structure VALIST.
8700 STDARG_P is always true, and ignored.
8701 NEXTARG points to the first anonymous stack argument.
8703 The following global variables are used to initialize
8704 the va_list structure:
8707 holds number of gprs and fprs used for named arguments.
8708 crtl->args.arg_offset_rtx:
8709 holds the offset of the first anonymous stack argument
8710 (relative to the virtual arg pointer). */
8713 s390_va_start (tree valist
, rtx nextarg ATTRIBUTE_UNUSED
)
8715 HOST_WIDE_INT n_gpr
, n_fpr
;
8717 tree f_gpr
, f_fpr
, f_ovf
, f_sav
;
8718 tree gpr
, fpr
, ovf
, sav
, t
;
8720 f_gpr
= TYPE_FIELDS (TREE_TYPE (va_list_type_node
));
8721 f_fpr
= DECL_CHAIN (f_gpr
);
8722 f_ovf
= DECL_CHAIN (f_fpr
);
8723 f_sav
= DECL_CHAIN (f_ovf
);
8725 valist
= build_va_arg_indirect_ref (valist
);
8726 gpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_gpr
), valist
, f_gpr
, NULL_TREE
);
8727 fpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_fpr
), valist
, f_fpr
, NULL_TREE
);
8728 ovf
= build3 (COMPONENT_REF
, TREE_TYPE (f_ovf
), valist
, f_ovf
, NULL_TREE
);
8729 sav
= build3 (COMPONENT_REF
, TREE_TYPE (f_sav
), valist
, f_sav
, NULL_TREE
);
8731 /* Count number of gp and fp argument registers used. */
8733 n_gpr
= crtl
->args
.info
.gprs
;
8734 n_fpr
= crtl
->args
.info
.fprs
;
8736 if (cfun
->va_list_gpr_size
)
8738 t
= build2 (MODIFY_EXPR
, TREE_TYPE (gpr
), gpr
,
8739 build_int_cst (NULL_TREE
, n_gpr
));
8740 TREE_SIDE_EFFECTS (t
) = 1;
8741 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
8744 if (cfun
->va_list_fpr_size
)
8746 t
= build2 (MODIFY_EXPR
, TREE_TYPE (fpr
), fpr
,
8747 build_int_cst (NULL_TREE
, n_fpr
));
8748 TREE_SIDE_EFFECTS (t
) = 1;
8749 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
8752 /* Find the overflow area. */
8753 if (n_gpr
+ cfun
->va_list_gpr_size
> GP_ARG_NUM_REG
8754 || n_fpr
+ cfun
->va_list_fpr_size
> FP_ARG_NUM_REG
)
8756 t
= make_tree (TREE_TYPE (ovf
), virtual_incoming_args_rtx
);
8758 off
= INTVAL (crtl
->args
.arg_offset_rtx
);
8759 off
= off
< 0 ? 0 : off
;
8760 if (TARGET_DEBUG_ARG
)
8761 fprintf (stderr
, "va_start: n_gpr = %d, n_fpr = %d off %d\n",
8762 (int)n_gpr
, (int)n_fpr
, off
);
8764 t
= build2 (POINTER_PLUS_EXPR
, TREE_TYPE (ovf
), t
, size_int (off
));
8766 t
= build2 (MODIFY_EXPR
, TREE_TYPE (ovf
), ovf
, t
);
8767 TREE_SIDE_EFFECTS (t
) = 1;
8768 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
8771 /* Find the register save area. */
8772 if ((cfun
->va_list_gpr_size
&& n_gpr
< GP_ARG_NUM_REG
)
8773 || (cfun
->va_list_fpr_size
&& n_fpr
< FP_ARG_NUM_REG
))
8775 t
= make_tree (TREE_TYPE (sav
), return_address_pointer_rtx
);
8776 t
= build2 (POINTER_PLUS_EXPR
, TREE_TYPE (sav
), t
,
8777 size_int (-RETURN_REGNUM
* UNITS_PER_LONG
));
8779 t
= build2 (MODIFY_EXPR
, TREE_TYPE (sav
), sav
, t
);
8780 TREE_SIDE_EFFECTS (t
) = 1;
8781 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
8785 /* Implement va_arg by updating the va_list structure
8786 VALIST as required to retrieve an argument of type
8787 TYPE, and returning that argument.
8789 Generates code equivalent to:
8791 if (integral value) {
8792 if (size <= 4 && args.gpr < 5 ||
8793 size > 4 && args.gpr < 4 )
8794 ret = args.reg_save_area[args.gpr+8]
8796 ret = *args.overflow_arg_area++;
8797 } else if (float value) {
8799 ret = args.reg_save_area[args.fpr+64]
8801 ret = *args.overflow_arg_area++;
8802 } else if (aggregate value) {
8804 ret = *args.reg_save_area[args.gpr]
8806 ret = **args.overflow_arg_area++;
8810 s390_gimplify_va_arg (tree valist
, tree type
, gimple_seq
*pre_p
,
8811 gimple_seq
*post_p ATTRIBUTE_UNUSED
)
8813 tree f_gpr
, f_fpr
, f_ovf
, f_sav
;
8814 tree gpr
, fpr
, ovf
, sav
, reg
, t
, u
;
8815 int indirect_p
, size
, n_reg
, sav_ofs
, sav_scale
, max_reg
;
8816 tree lab_false
, lab_over
, addr
;
8818 f_gpr
= TYPE_FIELDS (TREE_TYPE (va_list_type_node
));
8819 f_fpr
= DECL_CHAIN (f_gpr
);
8820 f_ovf
= DECL_CHAIN (f_fpr
);
8821 f_sav
= DECL_CHAIN (f_ovf
);
8823 valist
= build_va_arg_indirect_ref (valist
);
8824 gpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_gpr
), valist
, f_gpr
, NULL_TREE
);
8825 fpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_fpr
), valist
, f_fpr
, NULL_TREE
);
8826 sav
= build3 (COMPONENT_REF
, TREE_TYPE (f_sav
), valist
, f_sav
, NULL_TREE
);
8828 /* The tree for args* cannot be shared between gpr/fpr and ovf since
8829 both appear on a lhs. */
8830 valist
= unshare_expr (valist
);
8831 ovf
= build3 (COMPONENT_REF
, TREE_TYPE (f_ovf
), valist
, f_ovf
, NULL_TREE
);
8833 size
= int_size_in_bytes (type
);
8835 if (pass_by_reference (NULL
, TYPE_MODE (type
), type
, false))
8837 if (TARGET_DEBUG_ARG
)
8839 fprintf (stderr
, "va_arg: aggregate type");
8843 /* Aggregates are passed by reference. */
8848 /* kernel stack layout on 31 bit: It is assumed here that no padding
8849 will be added by s390_frame_info because for va_args always an even
8850 number of gprs has to be saved r15-r2 = 14 regs. */
8851 sav_ofs
= 2 * UNITS_PER_LONG
;
8852 sav_scale
= UNITS_PER_LONG
;
8853 size
= UNITS_PER_LONG
;
8854 max_reg
= GP_ARG_NUM_REG
- n_reg
;
8856 else if (s390_function_arg_float (TYPE_MODE (type
), type
))
8858 if (TARGET_DEBUG_ARG
)
8860 fprintf (stderr
, "va_arg: float type");
8864 /* FP args go in FP registers, if present. */
8868 sav_ofs
= 16 * UNITS_PER_LONG
;
8870 max_reg
= FP_ARG_NUM_REG
- n_reg
;
8874 if (TARGET_DEBUG_ARG
)
8876 fprintf (stderr
, "va_arg: other type");
8880 /* Otherwise into GP registers. */
8883 n_reg
= (size
+ UNITS_PER_LONG
- 1) / UNITS_PER_LONG
;
8885 /* kernel stack layout on 31 bit: It is assumed here that no padding
8886 will be added by s390_frame_info because for va_args always an even
8887 number of gprs has to be saved r15-r2 = 14 regs. */
8888 sav_ofs
= 2 * UNITS_PER_LONG
;
8890 if (size
< UNITS_PER_LONG
)
8891 sav_ofs
+= UNITS_PER_LONG
- size
;
8893 sav_scale
= UNITS_PER_LONG
;
8894 max_reg
= GP_ARG_NUM_REG
- n_reg
;
8897 /* Pull the value out of the saved registers ... */
8899 lab_false
= create_artificial_label (UNKNOWN_LOCATION
);
8900 lab_over
= create_artificial_label (UNKNOWN_LOCATION
);
8901 addr
= create_tmp_var (ptr_type_node
, "addr");
8903 t
= fold_convert (TREE_TYPE (reg
), size_int (max_reg
));
8904 t
= build2 (GT_EXPR
, boolean_type_node
, reg
, t
);
8905 u
= build1 (GOTO_EXPR
, void_type_node
, lab_false
);
8906 t
= build3 (COND_EXPR
, void_type_node
, t
, u
, NULL_TREE
);
8907 gimplify_and_add (t
, pre_p
);
8909 t
= build2 (POINTER_PLUS_EXPR
, ptr_type_node
, sav
,
8910 size_int (sav_ofs
));
8911 u
= build2 (MULT_EXPR
, TREE_TYPE (reg
), reg
,
8912 fold_convert (TREE_TYPE (reg
), size_int (sav_scale
)));
8913 t
= build2 (POINTER_PLUS_EXPR
, ptr_type_node
, t
, fold_convert (sizetype
, u
));
8915 gimplify_assign (addr
, t
, pre_p
);
8917 gimple_seq_add_stmt (pre_p
, gimple_build_goto (lab_over
));
8919 gimple_seq_add_stmt (pre_p
, gimple_build_label (lab_false
));
8922 /* ... Otherwise out of the overflow area. */
8925 if (size
< UNITS_PER_LONG
)
8926 t
= build2 (POINTER_PLUS_EXPR
, ptr_type_node
, t
,
8927 size_int (UNITS_PER_LONG
- size
));
8929 gimplify_expr (&t
, pre_p
, NULL
, is_gimple_val
, fb_rvalue
);
8931 gimplify_assign (addr
, t
, pre_p
);
8933 t
= build2 (POINTER_PLUS_EXPR
, ptr_type_node
, t
,
8935 gimplify_assign (ovf
, t
, pre_p
);
8937 gimple_seq_add_stmt (pre_p
, gimple_build_label (lab_over
));
8940 /* Increment register save count. */
8942 u
= build2 (PREINCREMENT_EXPR
, TREE_TYPE (reg
), reg
,
8943 fold_convert (TREE_TYPE (reg
), size_int (n_reg
)));
8944 gimplify_and_add (u
, pre_p
);
8948 t
= build_pointer_type_for_mode (build_pointer_type (type
),
8950 addr
= fold_convert (t
, addr
);
8951 addr
= build_va_arg_indirect_ref (addr
);
8955 t
= build_pointer_type_for_mode (type
, ptr_mode
, true);
8956 addr
= fold_convert (t
, addr
);
8959 return build_va_arg_indirect_ref (addr
);
8967 S390_BUILTIN_THREAD_POINTER
,
8968 S390_BUILTIN_SET_THREAD_POINTER
,
8973 static enum insn_code
const code_for_builtin_64
[S390_BUILTIN_max
] = {
8978 static enum insn_code
const code_for_builtin_31
[S390_BUILTIN_max
] = {
8984 s390_init_builtins (void)
8988 ftype
= build_function_type (ptr_type_node
, void_list_node
);
8989 add_builtin_function ("__builtin_thread_pointer", ftype
,
8990 S390_BUILTIN_THREAD_POINTER
, BUILT_IN_MD
,
8993 ftype
= build_function_type_list (void_type_node
, ptr_type_node
, NULL_TREE
);
8994 add_builtin_function ("__builtin_set_thread_pointer", ftype
,
8995 S390_BUILTIN_SET_THREAD_POINTER
, BUILT_IN_MD
,
8999 /* Expand an expression EXP that calls a built-in function,
9000 with result going to TARGET if that's convenient
9001 (and in mode MODE if that's convenient).
9002 SUBTARGET may be used as the target for computing one of EXP's operands.
9003 IGNORE is nonzero if the value is to be ignored. */
9006 s390_expand_builtin (tree exp
, rtx target
, rtx subtarget ATTRIBUTE_UNUSED
,
9007 enum machine_mode mode ATTRIBUTE_UNUSED
,
9008 int ignore ATTRIBUTE_UNUSED
)
9012 enum insn_code
const *code_for_builtin
=
9013 TARGET_64BIT
? code_for_builtin_64
: code_for_builtin_31
;
9015 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
9016 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
9017 enum insn_code icode
;
9018 rtx op
[MAX_ARGS
], pat
;
9022 call_expr_arg_iterator iter
;
9024 if (fcode
>= S390_BUILTIN_max
)
9025 internal_error ("bad builtin fcode");
9026 icode
= code_for_builtin
[fcode
];
9028 internal_error ("bad builtin fcode");
9030 nonvoid
= TREE_TYPE (TREE_TYPE (fndecl
)) != void_type_node
;
9033 FOR_EACH_CALL_EXPR_ARG (arg
, iter
, exp
)
9035 const struct insn_operand_data
*insn_op
;
9037 if (arg
== error_mark_node
)
9039 if (arity
> MAX_ARGS
)
9042 insn_op
= &insn_data
[icode
].operand
[arity
+ nonvoid
];
9044 op
[arity
] = expand_expr (arg
, NULL_RTX
, insn_op
->mode
, EXPAND_NORMAL
);
9046 if (!(*insn_op
->predicate
) (op
[arity
], insn_op
->mode
))
9047 op
[arity
] = copy_to_mode_reg (insn_op
->mode
, op
[arity
]);
9053 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
9055 || GET_MODE (target
) != tmode
9056 || !(*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
9057 target
= gen_reg_rtx (tmode
);
9063 pat
= GEN_FCN (icode
) (target
);
9067 pat
= GEN_FCN (icode
) (target
, op
[0]);
9069 pat
= GEN_FCN (icode
) (op
[0]);
9072 pat
= GEN_FCN (icode
) (target
, op
[0], op
[1]);
9088 /* Output assembly code for the trampoline template to
9091 On S/390, we use gpr 1 internally in the trampoline code;
9092 gpr 0 is used to hold the static chain. */
9095 s390_asm_trampoline_template (FILE *file
)
9098 op
[0] = gen_rtx_REG (Pmode
, 0);
9099 op
[1] = gen_rtx_REG (Pmode
, 1);
9103 output_asm_insn ("basr\t%1,0", op
);
9104 output_asm_insn ("lmg\t%0,%1,14(%1)", op
);
9105 output_asm_insn ("br\t%1", op
);
9106 ASM_OUTPUT_SKIP (file
, (HOST_WIDE_INT
)(TRAMPOLINE_SIZE
- 10));
9110 output_asm_insn ("basr\t%1,0", op
);
9111 output_asm_insn ("lm\t%0,%1,6(%1)", op
);
9112 output_asm_insn ("br\t%1", op
);
9113 ASM_OUTPUT_SKIP (file
, (HOST_WIDE_INT
)(TRAMPOLINE_SIZE
- 8));
9117 /* Emit RTL insns to initialize the variable parts of a trampoline.
9118 FNADDR is an RTX for the address of the function's pure code.
9119 CXT is an RTX for the static chain value for the function. */
9122 s390_trampoline_init (rtx m_tramp
, tree fndecl
, rtx cxt
)
9124 rtx fnaddr
= XEXP (DECL_RTL (fndecl
), 0);
9127 emit_block_move (m_tramp
, assemble_trampoline_template (),
9128 GEN_INT (2*UNITS_PER_WORD
), BLOCK_OP_NORMAL
);
9130 mem
= adjust_address (m_tramp
, Pmode
, 2*UNITS_PER_WORD
);
9131 emit_move_insn (mem
, cxt
);
9132 mem
= adjust_address (m_tramp
, Pmode
, 3*UNITS_PER_WORD
);
9133 emit_move_insn (mem
, fnaddr
);
9136 /* Output assembler code to FILE to increment profiler label # LABELNO
9137 for profiling a function entry. */
9140 s390_function_profiler (FILE *file
, int labelno
)
9145 ASM_GENERATE_INTERNAL_LABEL (label
, "LP", labelno
);
9147 fprintf (file
, "# function profiler \n");
9149 op
[0] = gen_rtx_REG (Pmode
, RETURN_REGNUM
);
9150 op
[1] = gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
9151 op
[1] = gen_rtx_MEM (Pmode
, plus_constant (op
[1], UNITS_PER_LONG
));
9153 op
[2] = gen_rtx_REG (Pmode
, 1);
9154 op
[3] = gen_rtx_SYMBOL_REF (Pmode
, label
);
9155 SYMBOL_REF_FLAGS (op
[3]) = SYMBOL_FLAG_LOCAL
;
9157 op
[4] = gen_rtx_SYMBOL_REF (Pmode
, "_mcount");
9160 op
[4] = gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, op
[4]), UNSPEC_PLT
);
9161 op
[4] = gen_rtx_CONST (Pmode
, op
[4]);
9166 output_asm_insn ("stg\t%0,%1", op
);
9167 output_asm_insn ("larl\t%2,%3", op
);
9168 output_asm_insn ("brasl\t%0,%4", op
);
9169 output_asm_insn ("lg\t%0,%1", op
);
9173 op
[6] = gen_label_rtx ();
9175 output_asm_insn ("st\t%0,%1", op
);
9176 output_asm_insn ("bras\t%2,%l6", op
);
9177 output_asm_insn (".long\t%4", op
);
9178 output_asm_insn (".long\t%3", op
);
9179 targetm
.asm_out
.internal_label (file
, "L", CODE_LABEL_NUMBER (op
[6]));
9180 output_asm_insn ("l\t%0,0(%2)", op
);
9181 output_asm_insn ("l\t%2,4(%2)", op
);
9182 output_asm_insn ("basr\t%0,%0", op
);
9183 output_asm_insn ("l\t%0,%1", op
);
9187 op
[5] = gen_label_rtx ();
9188 op
[6] = gen_label_rtx ();
9190 output_asm_insn ("st\t%0,%1", op
);
9191 output_asm_insn ("bras\t%2,%l6", op
);
9192 targetm
.asm_out
.internal_label (file
, "L", CODE_LABEL_NUMBER (op
[5]));
9193 output_asm_insn (".long\t%4-%l5", op
);
9194 output_asm_insn (".long\t%3-%l5", op
);
9195 targetm
.asm_out
.internal_label (file
, "L", CODE_LABEL_NUMBER (op
[6]));
9196 output_asm_insn ("lr\t%0,%2", op
);
9197 output_asm_insn ("a\t%0,0(%2)", op
);
9198 output_asm_insn ("a\t%2,4(%2)", op
);
9199 output_asm_insn ("basr\t%0,%0", op
);
9200 output_asm_insn ("l\t%0,%1", op
);
9204 /* Encode symbol attributes (local vs. global, tls model) of a SYMBOL_REF
9205 into its SYMBOL_REF_FLAGS. */
9208 s390_encode_section_info (tree decl
, rtx rtl
, int first
)
9210 default_encode_section_info (decl
, rtl
, first
);
9212 if (TREE_CODE (decl
) == VAR_DECL
)
9214 /* If a variable has a forced alignment to < 2 bytes, mark it
9215 with SYMBOL_FLAG_ALIGN1 to prevent it from being used as LARL
9217 if (DECL_USER_ALIGN (decl
) && DECL_ALIGN (decl
) < 16)
9218 SYMBOL_REF_FLAGS (XEXP (rtl
, 0)) |= SYMBOL_FLAG_ALIGN1
;
9219 if (!DECL_SIZE (decl
)
9220 || !DECL_ALIGN (decl
)
9221 || !host_integerp (DECL_SIZE (decl
), 0)
9222 || (DECL_ALIGN (decl
) <= 64
9223 && DECL_ALIGN (decl
) != tree_low_cst (DECL_SIZE (decl
), 0)))
9224 SYMBOL_REF_FLAGS (XEXP (rtl
, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED
;
9227 /* Literal pool references don't have a decl so they are handled
9228 differently here. We rely on the information in the MEM_ALIGN
9229 entry to decide upon natural alignment. */
9231 && GET_CODE (XEXP (rtl
, 0)) == SYMBOL_REF
9232 && TREE_CONSTANT_POOL_ADDRESS_P (XEXP (rtl
, 0))
9233 && (MEM_ALIGN (rtl
) == 0
9234 || GET_MODE_BITSIZE (GET_MODE (rtl
)) == 0
9235 || MEM_ALIGN (rtl
) < GET_MODE_BITSIZE (GET_MODE (rtl
))))
9236 SYMBOL_REF_FLAGS (XEXP (rtl
, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED
;
9239 /* Output thunk to FILE that implements a C++ virtual function call (with
9240 multiple inheritance) to FUNCTION. The thunk adjusts the this pointer
9241 by DELTA, and unless VCALL_OFFSET is zero, applies an additional adjustment
9242 stored at VCALL_OFFSET in the vtable whose address is located at offset 0
9243 relative to the resulting this pointer. */
9246 s390_output_mi_thunk (FILE *file
, tree thunk ATTRIBUTE_UNUSED
,
9247 HOST_WIDE_INT delta
, HOST_WIDE_INT vcall_offset
,
9253 /* Make sure unwind info is emitted for the thunk if needed. */
9254 final_start_function (emit_barrier (), file
, 1);
9256 /* Operand 0 is the target function. */
9257 op
[0] = XEXP (DECL_RTL (function
), 0);
9258 if (flag_pic
&& !SYMBOL_REF_LOCAL_P (op
[0]))
9261 op
[0] = gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, op
[0]),
9262 TARGET_64BIT
? UNSPEC_PLT
: UNSPEC_GOT
);
9263 op
[0] = gen_rtx_CONST (Pmode
, op
[0]);
9266 /* Operand 1 is the 'this' pointer. */
9267 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function
)), function
))
9268 op
[1] = gen_rtx_REG (Pmode
, 3);
9270 op
[1] = gen_rtx_REG (Pmode
, 2);
9272 /* Operand 2 is the delta. */
9273 op
[2] = GEN_INT (delta
);
9275 /* Operand 3 is the vcall_offset. */
9276 op
[3] = GEN_INT (vcall_offset
);
9278 /* Operand 4 is the temporary register. */
9279 op
[4] = gen_rtx_REG (Pmode
, 1);
9281 /* Operands 5 to 8 can be used as labels. */
9287 /* Operand 9 can be used for temporary register. */
9290 /* Generate code. */
9293 /* Setup literal pool pointer if required. */
9294 if ((!DISP_IN_RANGE (delta
)
9295 && !CONST_OK_FOR_K (delta
)
9296 && !CONST_OK_FOR_Os (delta
))
9297 || (!DISP_IN_RANGE (vcall_offset
)
9298 && !CONST_OK_FOR_K (vcall_offset
)
9299 && !CONST_OK_FOR_Os (vcall_offset
)))
9301 op
[5] = gen_label_rtx ();
9302 output_asm_insn ("larl\t%4,%5", op
);
9305 /* Add DELTA to this pointer. */
9308 if (CONST_OK_FOR_J (delta
))
9309 output_asm_insn ("la\t%1,%2(%1)", op
);
9310 else if (DISP_IN_RANGE (delta
))
9311 output_asm_insn ("lay\t%1,%2(%1)", op
);
9312 else if (CONST_OK_FOR_K (delta
))
9313 output_asm_insn ("aghi\t%1,%2", op
);
9314 else if (CONST_OK_FOR_Os (delta
))
9315 output_asm_insn ("agfi\t%1,%2", op
);
9318 op
[6] = gen_label_rtx ();
9319 output_asm_insn ("agf\t%1,%6-%5(%4)", op
);
9323 /* Perform vcall adjustment. */
9326 if (DISP_IN_RANGE (vcall_offset
))
9328 output_asm_insn ("lg\t%4,0(%1)", op
);
9329 output_asm_insn ("ag\t%1,%3(%4)", op
);
9331 else if (CONST_OK_FOR_K (vcall_offset
))
9333 output_asm_insn ("lghi\t%4,%3", op
);
9334 output_asm_insn ("ag\t%4,0(%1)", op
);
9335 output_asm_insn ("ag\t%1,0(%4)", op
);
9337 else if (CONST_OK_FOR_Os (vcall_offset
))
9339 output_asm_insn ("lgfi\t%4,%3", op
);
9340 output_asm_insn ("ag\t%4,0(%1)", op
);
9341 output_asm_insn ("ag\t%1,0(%4)", op
);
9345 op
[7] = gen_label_rtx ();
9346 output_asm_insn ("llgf\t%4,%7-%5(%4)", op
);
9347 output_asm_insn ("ag\t%4,0(%1)", op
);
9348 output_asm_insn ("ag\t%1,0(%4)", op
);
9352 /* Jump to target. */
9353 output_asm_insn ("jg\t%0", op
);
9355 /* Output literal pool if required. */
9358 output_asm_insn (".align\t4", op
);
9359 targetm
.asm_out
.internal_label (file
, "L",
9360 CODE_LABEL_NUMBER (op
[5]));
9364 targetm
.asm_out
.internal_label (file
, "L",
9365 CODE_LABEL_NUMBER (op
[6]));
9366 output_asm_insn (".long\t%2", op
);
9370 targetm
.asm_out
.internal_label (file
, "L",
9371 CODE_LABEL_NUMBER (op
[7]));
9372 output_asm_insn (".long\t%3", op
);
9377 /* Setup base pointer if required. */
9379 || (!DISP_IN_RANGE (delta
)
9380 && !CONST_OK_FOR_K (delta
)
9381 && !CONST_OK_FOR_Os (delta
))
9382 || (!DISP_IN_RANGE (delta
)
9383 && !CONST_OK_FOR_K (vcall_offset
)
9384 && !CONST_OK_FOR_Os (vcall_offset
)))
9386 op
[5] = gen_label_rtx ();
9387 output_asm_insn ("basr\t%4,0", op
);
9388 targetm
.asm_out
.internal_label (file
, "L",
9389 CODE_LABEL_NUMBER (op
[5]));
9392 /* Add DELTA to this pointer. */
9395 if (CONST_OK_FOR_J (delta
))
9396 output_asm_insn ("la\t%1,%2(%1)", op
);
9397 else if (DISP_IN_RANGE (delta
))
9398 output_asm_insn ("lay\t%1,%2(%1)", op
);
9399 else if (CONST_OK_FOR_K (delta
))
9400 output_asm_insn ("ahi\t%1,%2", op
);
9401 else if (CONST_OK_FOR_Os (delta
))
9402 output_asm_insn ("afi\t%1,%2", op
);
9405 op
[6] = gen_label_rtx ();
9406 output_asm_insn ("a\t%1,%6-%5(%4)", op
);
9410 /* Perform vcall adjustment. */
9413 if (CONST_OK_FOR_J (vcall_offset
))
9415 output_asm_insn ("l\t%4,0(%1)", op
);
9416 output_asm_insn ("a\t%1,%3(%4)", op
);
9418 else if (DISP_IN_RANGE (vcall_offset
))
9420 output_asm_insn ("l\t%4,0(%1)", op
);
9421 output_asm_insn ("ay\t%1,%3(%4)", op
);
9423 else if (CONST_OK_FOR_K (vcall_offset
))
9425 output_asm_insn ("lhi\t%4,%3", op
);
9426 output_asm_insn ("a\t%4,0(%1)", op
);
9427 output_asm_insn ("a\t%1,0(%4)", op
);
9429 else if (CONST_OK_FOR_Os (vcall_offset
))
9431 output_asm_insn ("iilf\t%4,%3", op
);
9432 output_asm_insn ("a\t%4,0(%1)", op
);
9433 output_asm_insn ("a\t%1,0(%4)", op
);
9437 op
[7] = gen_label_rtx ();
9438 output_asm_insn ("l\t%4,%7-%5(%4)", op
);
9439 output_asm_insn ("a\t%4,0(%1)", op
);
9440 output_asm_insn ("a\t%1,0(%4)", op
);
9443 /* We had to clobber the base pointer register.
9444 Re-setup the base pointer (with a different base). */
9445 op
[5] = gen_label_rtx ();
9446 output_asm_insn ("basr\t%4,0", op
);
9447 targetm
.asm_out
.internal_label (file
, "L",
9448 CODE_LABEL_NUMBER (op
[5]));
9451 /* Jump to target. */
9452 op
[8] = gen_label_rtx ();
9455 output_asm_insn ("l\t%4,%8-%5(%4)", op
);
9457 output_asm_insn ("a\t%4,%8-%5(%4)", op
);
9458 /* We cannot call through .plt, since .plt requires %r12 loaded. */
9459 else if (flag_pic
== 1)
9461 output_asm_insn ("a\t%4,%8-%5(%4)", op
);
9462 output_asm_insn ("l\t%4,%0(%4)", op
);
9464 else if (flag_pic
== 2)
9466 op
[9] = gen_rtx_REG (Pmode
, 0);
9467 output_asm_insn ("l\t%9,%8-4-%5(%4)", op
);
9468 output_asm_insn ("a\t%4,%8-%5(%4)", op
);
9469 output_asm_insn ("ar\t%4,%9", op
);
9470 output_asm_insn ("l\t%4,0(%4)", op
);
9473 output_asm_insn ("br\t%4", op
);
9475 /* Output literal pool. */
9476 output_asm_insn (".align\t4", op
);
9478 if (nonlocal
&& flag_pic
== 2)
9479 output_asm_insn (".long\t%0", op
);
9482 op
[0] = gen_rtx_SYMBOL_REF (Pmode
, "_GLOBAL_OFFSET_TABLE_");
9483 SYMBOL_REF_FLAGS (op
[0]) = SYMBOL_FLAG_LOCAL
;
9486 targetm
.asm_out
.internal_label (file
, "L", CODE_LABEL_NUMBER (op
[8]));
9488 output_asm_insn (".long\t%0", op
);
9490 output_asm_insn (".long\t%0-%5", op
);
9494 targetm
.asm_out
.internal_label (file
, "L",
9495 CODE_LABEL_NUMBER (op
[6]));
9496 output_asm_insn (".long\t%2", op
);
9500 targetm
.asm_out
.internal_label (file
, "L",
9501 CODE_LABEL_NUMBER (op
[7]));
9502 output_asm_insn (".long\t%3", op
);
9505 final_end_function ();
9509 s390_valid_pointer_mode (enum machine_mode mode
)
9511 return (mode
== SImode
|| (TARGET_64BIT
&& mode
== DImode
));
9514 /* Checks whether the given CALL_EXPR would use a caller
9515 saved register. This is used to decide whether sibling call
9516 optimization could be performed on the respective function
9520 s390_call_saved_register_used (tree call_expr
)
9522 CUMULATIVE_ARGS cum
;
9524 enum machine_mode mode
;
9529 INIT_CUMULATIVE_ARGS (cum
, NULL
, NULL
, 0, 0);
9531 for (i
= 0; i
< call_expr_nargs (call_expr
); i
++)
9533 parameter
= CALL_EXPR_ARG (call_expr
, i
);
9534 gcc_assert (parameter
);
9536 /* For an undeclared variable passed as parameter we will get
9537 an ERROR_MARK node here. */
9538 if (TREE_CODE (parameter
) == ERROR_MARK
)
9541 type
= TREE_TYPE (parameter
);
9544 mode
= TYPE_MODE (type
);
9547 if (pass_by_reference (&cum
, mode
, type
, true))
9550 type
= build_pointer_type (type
);
9553 parm_rtx
= s390_function_arg (&cum
, mode
, type
, 0);
9555 s390_function_arg_advance (&cum
, mode
, type
, 0);
9560 if (REG_P (parm_rtx
))
9563 reg
< HARD_REGNO_NREGS (REGNO (parm_rtx
), GET_MODE (parm_rtx
));
9565 if (!call_used_regs
[reg
+ REGNO (parm_rtx
)])
9569 if (GET_CODE (parm_rtx
) == PARALLEL
)
9573 for (i
= 0; i
< XVECLEN (parm_rtx
, 0); i
++)
9575 rtx r
= XEXP (XVECEXP (parm_rtx
, 0, i
), 0);
9577 gcc_assert (REG_P (r
));
9580 reg
< HARD_REGNO_NREGS (REGNO (r
), GET_MODE (r
));
9582 if (!call_used_regs
[reg
+ REGNO (r
)])
9591 /* Return true if the given call expression can be
9592 turned into a sibling call.
9593 DECL holds the declaration of the function to be called whereas
9594 EXP is the call expression itself. */
9597 s390_function_ok_for_sibcall (tree decl
, tree exp
)
9599 /* The TPF epilogue uses register 1. */
9600 if (TARGET_TPF_PROFILING
)
9603 /* The 31 bit PLT code uses register 12 (GOT pointer - caller saved)
9604 which would have to be restored before the sibcall. */
9605 if (!TARGET_64BIT
&& flag_pic
&& decl
&& !targetm
.binds_local_p (decl
))
9608 /* Register 6 on s390 is available as an argument register but unfortunately
9609 "caller saved". This makes functions needing this register for arguments
9610 not suitable for sibcalls. */
9611 return !s390_call_saved_register_used (exp
);
9614 /* Return the fixed registers used for condition codes. */
9617 s390_fixed_condition_code_regs (unsigned int *p1
, unsigned int *p2
)
9620 *p2
= INVALID_REGNUM
;
9625 /* This function is used by the call expanders of the machine description.
9626 It emits the call insn itself together with the necessary operations
9627 to adjust the target address and returns the emitted insn.
9628 ADDR_LOCATION is the target address rtx
9629 TLS_CALL the location of the thread-local symbol
9630 RESULT_REG the register where the result of the call should be stored
9631 RETADDR_REG the register where the return address should be stored
9632 If this parameter is NULL_RTX the call is considered
9633 to be a sibling call. */
9636 s390_emit_call (rtx addr_location
, rtx tls_call
, rtx result_reg
,
9639 bool plt_call
= false;
9645 /* Direct function calls need special treatment. */
9646 if (GET_CODE (addr_location
) == SYMBOL_REF
)
9648 /* When calling a global routine in PIC mode, we must
9649 replace the symbol itself with the PLT stub. */
9650 if (flag_pic
&& !SYMBOL_REF_LOCAL_P (addr_location
))
9652 if (retaddr_reg
!= NULL_RTX
)
9654 addr_location
= gen_rtx_UNSPEC (Pmode
,
9655 gen_rtvec (1, addr_location
),
9657 addr_location
= gen_rtx_CONST (Pmode
, addr_location
);
9661 /* For -fpic code the PLT entries might use r12 which is
9662 call-saved. Therefore we cannot do a sibcall when
9663 calling directly using a symbol ref. When reaching
9664 this point we decided (in s390_function_ok_for_sibcall)
9665 to do a sibcall for a function pointer but one of the
9666 optimizers was able to get rid of the function pointer
9667 by propagating the symbol ref into the call. This
9668 optimization is illegal for S/390 so we turn the direct
9669 call into a indirect call again. */
9670 addr_location
= force_reg (Pmode
, addr_location
);
9673 /* Unless we can use the bras(l) insn, force the
9674 routine address into a register. */
9675 if (!TARGET_SMALL_EXEC
&& !TARGET_CPU_ZARCH
)
9678 addr_location
= legitimize_pic_address (addr_location
, 0);
9680 addr_location
= force_reg (Pmode
, addr_location
);
9684 /* If it is already an indirect call or the code above moved the
9685 SYMBOL_REF to somewhere else make sure the address can be found in
9687 if (retaddr_reg
== NULL_RTX
9688 && GET_CODE (addr_location
) != SYMBOL_REF
9691 emit_move_insn (gen_rtx_REG (Pmode
, SIBCALL_REGNUM
), addr_location
);
9692 addr_location
= gen_rtx_REG (Pmode
, SIBCALL_REGNUM
);
9695 addr_location
= gen_rtx_MEM (QImode
, addr_location
);
9696 call
= gen_rtx_CALL (VOIDmode
, addr_location
, const0_rtx
);
9698 if (result_reg
!= NULL_RTX
)
9699 call
= gen_rtx_SET (VOIDmode
, result_reg
, call
);
9701 if (retaddr_reg
!= NULL_RTX
)
9703 clobber
= gen_rtx_CLOBBER (VOIDmode
, retaddr_reg
);
9705 if (tls_call
!= NULL_RTX
)
9706 vec
= gen_rtvec (3, call
, clobber
,
9707 gen_rtx_USE (VOIDmode
, tls_call
));
9709 vec
= gen_rtvec (2, call
, clobber
);
9711 call
= gen_rtx_PARALLEL (VOIDmode
, vec
);
9714 insn
= emit_call_insn (call
);
9716 /* 31-bit PLT stubs and tls calls use the GOT register implicitly. */
9717 if ((!TARGET_64BIT
&& plt_call
) || tls_call
!= NULL_RTX
)
9719 /* s390_function_ok_for_sibcall should
9720 have denied sibcalls in this case. */
9721 gcc_assert (retaddr_reg
!= NULL_RTX
);
9723 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), pic_offset_table_rtx
);
9728 /* Implement CONDITIONAL_REGISTER_USAGE. */
9731 s390_conditional_register_usage (void)
9737 fixed_regs
[PIC_OFFSET_TABLE_REGNUM
] = 1;
9738 call_used_regs
[PIC_OFFSET_TABLE_REGNUM
] = 1;
9740 if (TARGET_CPU_ZARCH
)
9742 fixed_regs
[BASE_REGNUM
] = 0;
9743 call_used_regs
[BASE_REGNUM
] = 0;
9744 fixed_regs
[RETURN_REGNUM
] = 0;
9745 call_used_regs
[RETURN_REGNUM
] = 0;
9749 for (i
= 24; i
< 32; i
++)
9750 call_used_regs
[i
] = call_really_used_regs
[i
] = 0;
9754 for (i
= 18; i
< 20; i
++)
9755 call_used_regs
[i
] = call_really_used_regs
[i
] = 0;
9758 if (TARGET_SOFT_FLOAT
)
9760 for (i
= 16; i
< 32; i
++)
9761 call_used_regs
[i
] = fixed_regs
[i
] = 1;
9765 /* Corresponding function to eh_return expander. */
9767 static GTY(()) rtx s390_tpf_eh_return_symbol
;
9769 s390_emit_tpf_eh_return (rtx target
)
9773 if (!s390_tpf_eh_return_symbol
)
9774 s390_tpf_eh_return_symbol
= gen_rtx_SYMBOL_REF (Pmode
, "__tpf_eh_return");
9776 reg
= gen_rtx_REG (Pmode
, 2);
9778 emit_move_insn (reg
, target
);
9779 insn
= s390_emit_call (s390_tpf_eh_return_symbol
, NULL_RTX
, reg
,
9780 gen_rtx_REG (Pmode
, RETURN_REGNUM
));
9781 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), reg
);
9783 emit_move_insn (EH_RETURN_HANDLER_RTX
, reg
);
9786 /* Rework the prologue/epilogue to avoid saving/restoring
9787 registers unnecessarily. */
9790 s390_optimize_prologue (void)
9792 rtx insn
, new_insn
, next_insn
;
9794 /* Do a final recompute of the frame-related data. */
9796 s390_update_frame_layout ();
9798 /* If all special registers are in fact used, there's nothing we
9799 can do, so no point in walking the insn list. */
9801 if (cfun_frame_layout
.first_save_gpr
<= BASE_REGNUM
9802 && cfun_frame_layout
.last_save_gpr
>= BASE_REGNUM
9803 && (TARGET_CPU_ZARCH
9804 || (cfun_frame_layout
.first_save_gpr
<= RETURN_REGNUM
9805 && cfun_frame_layout
.last_save_gpr
>= RETURN_REGNUM
)))
9808 /* Search for prologue/epilogue insns and replace them. */
9810 for (insn
= get_insns (); insn
; insn
= next_insn
)
9812 int first
, last
, off
;
9813 rtx set
, base
, offset
;
9815 next_insn
= NEXT_INSN (insn
);
9817 if (GET_CODE (insn
) != INSN
)
9820 if (GET_CODE (PATTERN (insn
)) == PARALLEL
9821 && store_multiple_operation (PATTERN (insn
), VOIDmode
))
9823 set
= XVECEXP (PATTERN (insn
), 0, 0);
9824 first
= REGNO (SET_SRC (set
));
9825 last
= first
+ XVECLEN (PATTERN (insn
), 0) - 1;
9826 offset
= const0_rtx
;
9827 base
= eliminate_constant_term (XEXP (SET_DEST (set
), 0), &offset
);
9828 off
= INTVAL (offset
);
9830 if (GET_CODE (base
) != REG
|| off
< 0)
9832 if (cfun_frame_layout
.first_save_gpr
!= -1
9833 && (cfun_frame_layout
.first_save_gpr
< first
9834 || cfun_frame_layout
.last_save_gpr
> last
))
9836 if (REGNO (base
) != STACK_POINTER_REGNUM
9837 && REGNO (base
) != HARD_FRAME_POINTER_REGNUM
)
9839 if (first
> BASE_REGNUM
|| last
< BASE_REGNUM
)
9842 if (cfun_frame_layout
.first_save_gpr
!= -1)
9844 new_insn
= save_gprs (base
,
9845 off
+ (cfun_frame_layout
.first_save_gpr
9846 - first
) * UNITS_PER_LONG
,
9847 cfun_frame_layout
.first_save_gpr
,
9848 cfun_frame_layout
.last_save_gpr
);
9849 new_insn
= emit_insn_before (new_insn
, insn
);
9850 INSN_ADDRESSES_NEW (new_insn
, -1);
9857 if (cfun_frame_layout
.first_save_gpr
== -1
9858 && GET_CODE (PATTERN (insn
)) == SET
9859 && GET_CODE (SET_SRC (PATTERN (insn
))) == REG
9860 && (REGNO (SET_SRC (PATTERN (insn
))) == BASE_REGNUM
9861 || (!TARGET_CPU_ZARCH
9862 && REGNO (SET_SRC (PATTERN (insn
))) == RETURN_REGNUM
))
9863 && GET_CODE (SET_DEST (PATTERN (insn
))) == MEM
)
9865 set
= PATTERN (insn
);
9866 first
= REGNO (SET_SRC (set
));
9867 offset
= const0_rtx
;
9868 base
= eliminate_constant_term (XEXP (SET_DEST (set
), 0), &offset
);
9869 off
= INTVAL (offset
);
9871 if (GET_CODE (base
) != REG
|| off
< 0)
9873 if (REGNO (base
) != STACK_POINTER_REGNUM
9874 && REGNO (base
) != HARD_FRAME_POINTER_REGNUM
)
9881 if (GET_CODE (PATTERN (insn
)) == PARALLEL
9882 && load_multiple_operation (PATTERN (insn
), VOIDmode
))
9884 set
= XVECEXP (PATTERN (insn
), 0, 0);
9885 first
= REGNO (SET_DEST (set
));
9886 last
= first
+ XVECLEN (PATTERN (insn
), 0) - 1;
9887 offset
= const0_rtx
;
9888 base
= eliminate_constant_term (XEXP (SET_SRC (set
), 0), &offset
);
9889 off
= INTVAL (offset
);
9891 if (GET_CODE (base
) != REG
|| off
< 0)
9893 if (cfun_frame_layout
.first_restore_gpr
!= -1
9894 && (cfun_frame_layout
.first_restore_gpr
< first
9895 || cfun_frame_layout
.last_restore_gpr
> last
))
9897 if (REGNO (base
) != STACK_POINTER_REGNUM
9898 && REGNO (base
) != HARD_FRAME_POINTER_REGNUM
)
9900 if (first
> BASE_REGNUM
|| last
< BASE_REGNUM
)
9903 if (cfun_frame_layout
.first_restore_gpr
!= -1)
9905 new_insn
= restore_gprs (base
,
9906 off
+ (cfun_frame_layout
.first_restore_gpr
9907 - first
) * UNITS_PER_LONG
,
9908 cfun_frame_layout
.first_restore_gpr
,
9909 cfun_frame_layout
.last_restore_gpr
);
9910 new_insn
= emit_insn_before (new_insn
, insn
);
9911 INSN_ADDRESSES_NEW (new_insn
, -1);
9918 if (cfun_frame_layout
.first_restore_gpr
== -1
9919 && GET_CODE (PATTERN (insn
)) == SET
9920 && GET_CODE (SET_DEST (PATTERN (insn
))) == REG
9921 && (REGNO (SET_DEST (PATTERN (insn
))) == BASE_REGNUM
9922 || (!TARGET_CPU_ZARCH
9923 && REGNO (SET_DEST (PATTERN (insn
))) == RETURN_REGNUM
))
9924 && GET_CODE (SET_SRC (PATTERN (insn
))) == MEM
)
9926 set
= PATTERN (insn
);
9927 first
= REGNO (SET_DEST (set
));
9928 offset
= const0_rtx
;
9929 base
= eliminate_constant_term (XEXP (SET_SRC (set
), 0), &offset
);
9930 off
= INTVAL (offset
);
9932 if (GET_CODE (base
) != REG
|| off
< 0)
9934 if (REGNO (base
) != STACK_POINTER_REGNUM
9935 && REGNO (base
) != HARD_FRAME_POINTER_REGNUM
)
9944 /* On z10 and later the dynamic branch prediction must see the
9945 backward jump within a certain windows. If not it falls back to
9946 the static prediction. This function rearranges the loop backward
9947 branch in a way which makes the static prediction always correct.
9948 The function returns true if it added an instruction. */
9950 s390_fix_long_loop_prediction (rtx insn
)
9952 rtx set
= single_set (insn
);
9953 rtx code_label
, label_ref
, new_label
;
9959 /* This will exclude branch on count and branch on index patterns
9960 since these are correctly statically predicted. */
9962 || SET_DEST (set
) != pc_rtx
9963 || GET_CODE (SET_SRC(set
)) != IF_THEN_ELSE
)
9966 label_ref
= (GET_CODE (XEXP (SET_SRC (set
), 1)) == LABEL_REF
?
9967 XEXP (SET_SRC (set
), 1) : XEXP (SET_SRC (set
), 2));
9969 gcc_assert (GET_CODE (label_ref
) == LABEL_REF
);
9971 code_label
= XEXP (label_ref
, 0);
9973 if (INSN_ADDRESSES (INSN_UID (code_label
)) == -1
9974 || INSN_ADDRESSES (INSN_UID (insn
)) == -1
9975 || (INSN_ADDRESSES (INSN_UID (insn
))
9976 - INSN_ADDRESSES (INSN_UID (code_label
)) < PREDICT_DISTANCE
))
9979 for (distance
= 0, cur_insn
= PREV_INSN (insn
);
9980 distance
< PREDICT_DISTANCE
- 6;
9981 distance
+= get_attr_length (cur_insn
), cur_insn
= PREV_INSN (cur_insn
))
9982 if (!cur_insn
|| JUMP_P (cur_insn
) || LABEL_P (cur_insn
))
9985 new_label
= gen_label_rtx ();
9986 uncond_jump
= emit_jump_insn_after (
9987 gen_rtx_SET (VOIDmode
, pc_rtx
,
9988 gen_rtx_LABEL_REF (VOIDmode
, code_label
)),
9990 emit_label_after (new_label
, uncond_jump
);
9992 tmp
= XEXP (SET_SRC (set
), 1);
9993 XEXP (SET_SRC (set
), 1) = XEXP (SET_SRC (set
), 2);
9994 XEXP (SET_SRC (set
), 2) = tmp
;
9995 INSN_CODE (insn
) = -1;
9997 XEXP (label_ref
, 0) = new_label
;
9998 JUMP_LABEL (insn
) = new_label
;
9999 JUMP_LABEL (uncond_jump
) = code_label
;
10004 /* Returns 1 if INSN reads the value of REG for purposes not related
10005 to addressing of memory, and 0 otherwise. */
10007 s390_non_addr_reg_read_p (rtx reg
, rtx insn
)
10009 return reg_referenced_p (reg
, PATTERN (insn
))
10010 && !reg_used_in_mem_p (REGNO (reg
), PATTERN (insn
));
10013 /* Starting from INSN find_cond_jump looks downwards in the insn
10014 stream for a single jump insn which is the last user of the
10015 condition code set in INSN. */
10017 find_cond_jump (rtx insn
)
10019 for (; insn
; insn
= NEXT_INSN (insn
))
10023 if (LABEL_P (insn
))
10026 if (!JUMP_P (insn
))
10028 if (reg_mentioned_p (gen_rtx_REG (CCmode
, CC_REGNUM
), insn
))
10033 /* This will be triggered by a return. */
10034 if (GET_CODE (PATTERN (insn
)) != SET
)
10037 gcc_assert (SET_DEST (PATTERN (insn
)) == pc_rtx
);
10038 ite
= SET_SRC (PATTERN (insn
));
10040 if (GET_CODE (ite
) != IF_THEN_ELSE
)
10043 cc
= XEXP (XEXP (ite
, 0), 0);
10044 if (!REG_P (cc
) || !CC_REGNO_P (REGNO (cc
)))
10047 if (find_reg_note (insn
, REG_DEAD
, cc
))
10055 /* Swap the condition in COND and the operands in OP0 and OP1 so that
10056 the semantics does not change. If NULL_RTX is passed as COND the
10057 function tries to find the conditional jump starting with INSN. */
10059 s390_swap_cmp (rtx cond
, rtx
*op0
, rtx
*op1
, rtx insn
)
10063 if (cond
== NULL_RTX
)
10065 rtx jump
= find_cond_jump (NEXT_INSN (insn
));
10066 jump
= jump
? single_set (jump
) : NULL_RTX
;
10068 if (jump
== NULL_RTX
)
10071 cond
= XEXP (XEXP (jump
, 1), 0);
10076 PUT_CODE (cond
, swap_condition (GET_CODE (cond
)));
10079 /* On z10, instructions of the compare-and-branch family have the
10080 property to access the register occurring as second operand with
10081 its bits complemented. If such a compare is grouped with a second
10082 instruction that accesses the same register non-complemented, and
10083 if that register's value is delivered via a bypass, then the
10084 pipeline recycles, thereby causing significant performance decline.
10085 This function locates such situations and exchanges the two
10086 operands of the compare. The function return true whenever it
10089 s390_z10_optimize_cmp (rtx insn
)
10091 rtx prev_insn
, next_insn
;
10092 bool insn_added_p
= false;
10093 rtx cond
, *op0
, *op1
;
10095 if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
10097 /* Handle compare and branch and branch on count
10099 rtx pattern
= single_set (insn
);
10102 || SET_DEST (pattern
) != pc_rtx
10103 || GET_CODE (SET_SRC (pattern
)) != IF_THEN_ELSE
)
10106 cond
= XEXP (SET_SRC (pattern
), 0);
10107 op0
= &XEXP (cond
, 0);
10108 op1
= &XEXP (cond
, 1);
10110 else if (GET_CODE (PATTERN (insn
)) == SET
)
10114 /* Handle normal compare instructions. */
10115 src
= SET_SRC (PATTERN (insn
));
10116 dest
= SET_DEST (PATTERN (insn
));
10119 || !CC_REGNO_P (REGNO (dest
))
10120 || GET_CODE (src
) != COMPARE
)
10123 /* s390_swap_cmp will try to find the conditional
10124 jump when passing NULL_RTX as condition. */
10126 op0
= &XEXP (src
, 0);
10127 op1
= &XEXP (src
, 1);
10132 if (!REG_P (*op0
) || !REG_P (*op1
))
10135 if (GET_MODE_CLASS (GET_MODE (*op0
)) != MODE_INT
)
10138 /* Swap the COMPARE arguments and its mask if there is a
10139 conflicting access in the previous insn. */
10140 prev_insn
= prev_active_insn (insn
);
10141 if (prev_insn
!= NULL_RTX
&& INSN_P (prev_insn
)
10142 && reg_referenced_p (*op1
, PATTERN (prev_insn
)))
10143 s390_swap_cmp (cond
, op0
, op1
, insn
);
10145 /* Check if there is a conflict with the next insn. If there
10146 was no conflict with the previous insn, then swap the
10147 COMPARE arguments and its mask. If we already swapped
10148 the operands, or if swapping them would cause a conflict
10149 with the previous insn, issue a NOP after the COMPARE in
10150 order to separate the two instuctions. */
10151 next_insn
= next_active_insn (insn
);
10152 if (next_insn
!= NULL_RTX
&& INSN_P (next_insn
)
10153 && s390_non_addr_reg_read_p (*op1
, next_insn
))
10155 if (prev_insn
!= NULL_RTX
&& INSN_P (prev_insn
)
10156 && s390_non_addr_reg_read_p (*op0
, prev_insn
))
10158 if (REGNO (*op1
) == 0)
10159 emit_insn_after (gen_nop1 (), insn
);
10161 emit_insn_after (gen_nop (), insn
);
10162 insn_added_p
= true;
10165 s390_swap_cmp (cond
, op0
, op1
, insn
);
10167 return insn_added_p
;
10170 /* Perform machine-dependent processing. */
10175 bool pool_overflow
= false;
10177 /* Make sure all splits have been performed; splits after
10178 machine_dependent_reorg might confuse insn length counts. */
10179 split_all_insns_noflow ();
10181 /* Install the main literal pool and the associated base
10182 register load insns.
10184 In addition, there are two problematic situations we need
10187 - the literal pool might be > 4096 bytes in size, so that
10188 some of its elements cannot be directly accessed
10190 - a branch target might be > 64K away from the branch, so that
10191 it is not possible to use a PC-relative instruction.
10193 To fix those, we split the single literal pool into multiple
10194 pool chunks, reloading the pool base register at various
10195 points throughout the function to ensure it always points to
10196 the pool chunk the following code expects, and / or replace
10197 PC-relative branches by absolute branches.
10199 However, the two problems are interdependent: splitting the
10200 literal pool can move a branch further away from its target,
10201 causing the 64K limit to overflow, and on the other hand,
10202 replacing a PC-relative branch by an absolute branch means
10203 we need to put the branch target address into the literal
10204 pool, possibly causing it to overflow.
10206 So, we loop trying to fix up both problems until we manage
10207 to satisfy both conditions at the same time. Note that the
10208 loop is guaranteed to terminate as every pass of the loop
10209 strictly decreases the total number of PC-relative branches
10210 in the function. (This is not completely true as there
10211 might be branch-over-pool insns introduced by chunkify_start.
10212 Those never need to be split however.) */
10216 struct constant_pool
*pool
= NULL
;
10218 /* Collect the literal pool. */
10219 if (!pool_overflow
)
10221 pool
= s390_mainpool_start ();
10223 pool_overflow
= true;
10226 /* If literal pool overflowed, start to chunkify it. */
10228 pool
= s390_chunkify_start ();
10230 /* Split out-of-range branches. If this has created new
10231 literal pool entries, cancel current chunk list and
10232 recompute it. zSeries machines have large branch
10233 instructions, so we never need to split a branch. */
10234 if (!TARGET_CPU_ZARCH
&& s390_split_branches ())
10237 s390_chunkify_cancel (pool
);
10239 s390_mainpool_cancel (pool
);
10244 /* If we made it up to here, both conditions are satisfied.
10245 Finish up literal pool related changes. */
10247 s390_chunkify_finish (pool
);
10249 s390_mainpool_finish (pool
);
10251 /* We're done splitting branches. */
10252 cfun
->machine
->split_branches_pending_p
= false;
10256 /* Generate out-of-pool execute target insns. */
10257 if (TARGET_CPU_ZARCH
)
10259 rtx insn
, label
, target
;
10261 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
10263 label
= s390_execute_label (insn
);
10267 gcc_assert (label
!= const0_rtx
);
10269 target
= emit_label (XEXP (label
, 0));
10270 INSN_ADDRESSES_NEW (target
, -1);
10272 target
= emit_insn (s390_execute_target (insn
));
10273 INSN_ADDRESSES_NEW (target
, -1);
10277 /* Try to optimize prologue and epilogue further. */
10278 s390_optimize_prologue ();
10280 /* Walk over the insns and do some >=z10 specific changes. */
10281 if (s390_tune
== PROCESSOR_2097_Z10
10282 || s390_tune
== PROCESSOR_2817_Z196
)
10285 bool insn_added_p
= false;
10287 /* The insn lengths and addresses have to be up to date for the
10288 following manipulations. */
10289 shorten_branches (get_insns ());
10291 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
10293 if (!INSN_P (insn
) || INSN_CODE (insn
) <= 0)
10297 insn_added_p
|= s390_fix_long_loop_prediction (insn
);
10299 if ((GET_CODE (PATTERN (insn
)) == PARALLEL
10300 || GET_CODE (PATTERN (insn
)) == SET
)
10301 && s390_tune
== PROCESSOR_2097_Z10
)
10302 insn_added_p
|= s390_z10_optimize_cmp (insn
);
10305 /* Adjust branches if we added new instructions. */
10307 shorten_branches (get_insns ());
10311 /* Return true if INSN is a fp load insn writing register REGNO. */
10313 s390_fpload_toreg (rtx insn
, unsigned int regno
)
10316 enum attr_type flag
= s390_safe_attr_type (insn
);
10318 if (flag
!= TYPE_FLOADSF
&& flag
!= TYPE_FLOADDF
)
10321 set
= single_set (insn
);
10323 if (set
== NULL_RTX
)
10326 if (!REG_P (SET_DEST (set
)) || !MEM_P (SET_SRC (set
)))
10329 if (REGNO (SET_DEST (set
)) != regno
)
10335 /* This value describes the distance to be avoided between an
10336 aritmetic fp instruction and an fp load writing the same register.
10337 Z10_EARLYLOAD_DISTANCE - 1 as well as Z10_EARLYLOAD_DISTANCE + 1 is
10338 fine but the exact value has to be avoided. Otherwise the FP
10339 pipeline will throw an exception causing a major penalty. */
10340 #define Z10_EARLYLOAD_DISTANCE 7
10342 /* Rearrange the ready list in order to avoid the situation described
10343 for Z10_EARLYLOAD_DISTANCE. A problematic load instruction is
10344 moved to the very end of the ready list. */
10346 s390_z10_prevent_earlyload_conflicts (rtx
*ready
, int *nready_p
)
10348 unsigned int regno
;
10349 int nready
= *nready_p
;
10354 enum attr_type flag
;
10357 /* Skip DISTANCE - 1 active insns. */
10358 for (insn
= last_scheduled_insn
, distance
= Z10_EARLYLOAD_DISTANCE
- 1;
10359 distance
> 0 && insn
!= NULL_RTX
;
10360 distance
--, insn
= prev_active_insn (insn
))
10361 if (CALL_P (insn
) || JUMP_P (insn
))
10364 if (insn
== NULL_RTX
)
10367 set
= single_set (insn
);
10369 if (set
== NULL_RTX
|| !REG_P (SET_DEST (set
))
10370 || GET_MODE_CLASS (GET_MODE (SET_DEST (set
))) != MODE_FLOAT
)
10373 flag
= s390_safe_attr_type (insn
);
10375 if (flag
== TYPE_FLOADSF
|| flag
== TYPE_FLOADDF
)
10378 regno
= REGNO (SET_DEST (set
));
10381 while (!s390_fpload_toreg (ready
[i
], regno
) && i
> 0)
10388 memmove (&ready
[1], &ready
[0], sizeof (rtx
) * i
);
10392 /* This function is called via hook TARGET_SCHED_REORDER before
10393 issueing one insn from list READY which contains *NREADYP entries.
10394 For target z10 it reorders load instructions to avoid early load
10395 conflicts in the floating point pipeline */
10397 s390_sched_reorder (FILE *file ATTRIBUTE_UNUSED
, int verbose ATTRIBUTE_UNUSED
,
10398 rtx
*ready
, int *nreadyp
, int clock ATTRIBUTE_UNUSED
)
10400 if (s390_tune
== PROCESSOR_2097_Z10
)
10401 if (reload_completed
&& *nreadyp
> 1)
10402 s390_z10_prevent_earlyload_conflicts (ready
, nreadyp
);
10404 return s390_issue_rate ();
10407 /* This function is called via hook TARGET_SCHED_VARIABLE_ISSUE after
10408 the scheduler has issued INSN. It stores the last issued insn into
10409 last_scheduled_insn in order to make it available for
10410 s390_sched_reorder. */
10412 s390_sched_variable_issue (FILE *file ATTRIBUTE_UNUSED
,
10413 int verbose ATTRIBUTE_UNUSED
,
10414 rtx insn
, int more
)
10416 last_scheduled_insn
= insn
;
10418 if (GET_CODE (PATTERN (insn
)) != USE
10419 && GET_CODE (PATTERN (insn
)) != CLOBBER
)
10426 s390_sched_init (FILE *file ATTRIBUTE_UNUSED
,
10427 int verbose ATTRIBUTE_UNUSED
,
10428 int max_ready ATTRIBUTE_UNUSED
)
10430 last_scheduled_insn
= NULL_RTX
;
10433 /* This function checks the whole of insn X for memory references. The
10434 function always returns zero because the framework it is called
10435 from would stop recursively analyzing the insn upon a return value
10436 other than zero. The real result of this function is updating
10437 counter variable MEM_COUNT. */
10439 check_dpu (rtx
*x
, unsigned *mem_count
)
10441 if (*x
!= NULL_RTX
&& MEM_P (*x
))
10446 /* This target hook implementation for TARGET_LOOP_UNROLL_ADJUST calculates
10447 a new number struct loop *loop should be unrolled if tuned for cpus with
10448 a built-in stride prefetcher.
10449 The loop is analyzed for memory accesses by calling check_dpu for
10450 each rtx of the loop. Depending on the loop_depth and the amount of
10451 memory accesses a new number <=nunroll is returned to improve the
10452 behaviour of the hardware prefetch unit. */
10454 s390_loop_unroll_adjust (unsigned nunroll
, struct loop
*loop
)
10459 unsigned mem_count
= 0;
10461 if (s390_tune
!= PROCESSOR_2097_Z10
&& s390_tune
!= PROCESSOR_2817_Z196
)
10464 /* Count the number of memory references within the loop body. */
10465 bbs
= get_loop_body (loop
);
10466 for (i
= 0; i
< loop
->num_nodes
; i
++)
10468 for (insn
= BB_HEAD (bbs
[i
]); insn
!= BB_END (bbs
[i
]); insn
= NEXT_INSN (insn
))
10469 if (INSN_P (insn
) && INSN_CODE (insn
) != -1)
10470 for_each_rtx (&insn
, (rtx_function
) check_dpu
, &mem_count
);
10474 /* Prevent division by zero, and we do not need to adjust nunroll in this case. */
10475 if (mem_count
== 0)
10478 switch (loop_depth(loop
))
10481 return MIN (nunroll
, 28 / mem_count
);
10483 return MIN (nunroll
, 22 / mem_count
);
10485 return MIN (nunroll
, 16 / mem_count
);
10489 /* Initialize GCC target structure. */
10491 #undef TARGET_ASM_ALIGNED_HI_OP
10492 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
10493 #undef TARGET_ASM_ALIGNED_DI_OP
10494 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
10495 #undef TARGET_ASM_INTEGER
10496 #define TARGET_ASM_INTEGER s390_assemble_integer
10498 #undef TARGET_ASM_OPEN_PAREN
10499 #define TARGET_ASM_OPEN_PAREN ""
10501 #undef TARGET_ASM_CLOSE_PAREN
10502 #define TARGET_ASM_CLOSE_PAREN ""
10504 #undef TARGET_DEFAULT_TARGET_FLAGS
10505 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | MASK_FUSED_MADD)
10507 #undef TARGET_HANDLE_OPTION
10508 #define TARGET_HANDLE_OPTION s390_handle_option
10510 #undef TARGET_OPTION_OVERRIDE
10511 #define TARGET_OPTION_OVERRIDE s390_option_override
10513 #undef TARGET_OPTION_OPTIMIZATION
10514 #define TARGET_OPTION_OPTIMIZATION s390_option_optimization
10516 #undef TARGET_OPTION_INIT_STRUCT
10517 #define TARGET_OPTION_INIT_STRUCT s390_option_init_struct
10519 #undef TARGET_ENCODE_SECTION_INFO
10520 #define TARGET_ENCODE_SECTION_INFO s390_encode_section_info
10522 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10523 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
10526 #undef TARGET_HAVE_TLS
10527 #define TARGET_HAVE_TLS true
10529 #undef TARGET_CANNOT_FORCE_CONST_MEM
10530 #define TARGET_CANNOT_FORCE_CONST_MEM s390_cannot_force_const_mem
10532 #undef TARGET_DELEGITIMIZE_ADDRESS
10533 #define TARGET_DELEGITIMIZE_ADDRESS s390_delegitimize_address
10535 #undef TARGET_LEGITIMIZE_ADDRESS
10536 #define TARGET_LEGITIMIZE_ADDRESS s390_legitimize_address
10538 #undef TARGET_RETURN_IN_MEMORY
10539 #define TARGET_RETURN_IN_MEMORY s390_return_in_memory
10541 #undef TARGET_INIT_BUILTINS
10542 #define TARGET_INIT_BUILTINS s390_init_builtins
10543 #undef TARGET_EXPAND_BUILTIN
10544 #define TARGET_EXPAND_BUILTIN s390_expand_builtin
10546 #undef TARGET_ASM_OUTPUT_MI_THUNK
10547 #define TARGET_ASM_OUTPUT_MI_THUNK s390_output_mi_thunk
10548 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
10549 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
10551 #undef TARGET_SCHED_ADJUST_PRIORITY
10552 #define TARGET_SCHED_ADJUST_PRIORITY s390_adjust_priority
10553 #undef TARGET_SCHED_ISSUE_RATE
10554 #define TARGET_SCHED_ISSUE_RATE s390_issue_rate
10555 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
10556 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD s390_first_cycle_multipass_dfa_lookahead
10558 #undef TARGET_SCHED_VARIABLE_ISSUE
10559 #define TARGET_SCHED_VARIABLE_ISSUE s390_sched_variable_issue
10560 #undef TARGET_SCHED_REORDER
10561 #define TARGET_SCHED_REORDER s390_sched_reorder
10562 #undef TARGET_SCHED_INIT
10563 #define TARGET_SCHED_INIT s390_sched_init
10565 #undef TARGET_CANNOT_COPY_INSN_P
10566 #define TARGET_CANNOT_COPY_INSN_P s390_cannot_copy_insn_p
10567 #undef TARGET_RTX_COSTS
10568 #define TARGET_RTX_COSTS s390_rtx_costs
10569 #undef TARGET_ADDRESS_COST
10570 #define TARGET_ADDRESS_COST s390_address_cost
10572 #undef TARGET_MACHINE_DEPENDENT_REORG
10573 #define TARGET_MACHINE_DEPENDENT_REORG s390_reorg
10575 #undef TARGET_VALID_POINTER_MODE
10576 #define TARGET_VALID_POINTER_MODE s390_valid_pointer_mode
10578 #undef TARGET_BUILD_BUILTIN_VA_LIST
10579 #define TARGET_BUILD_BUILTIN_VA_LIST s390_build_builtin_va_list
10580 #undef TARGET_EXPAND_BUILTIN_VA_START
10581 #define TARGET_EXPAND_BUILTIN_VA_START s390_va_start
10582 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
10583 #define TARGET_GIMPLIFY_VA_ARG_EXPR s390_gimplify_va_arg
10585 #undef TARGET_PROMOTE_FUNCTION_MODE
10586 #define TARGET_PROMOTE_FUNCTION_MODE s390_promote_function_mode
10587 #undef TARGET_PASS_BY_REFERENCE
10588 #define TARGET_PASS_BY_REFERENCE s390_pass_by_reference
10590 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
10591 #define TARGET_FUNCTION_OK_FOR_SIBCALL s390_function_ok_for_sibcall
10592 #undef TARGET_FUNCTION_ARG
10593 #define TARGET_FUNCTION_ARG s390_function_arg
10594 #undef TARGET_FUNCTION_ARG_ADVANCE
10595 #define TARGET_FUNCTION_ARG_ADVANCE s390_function_arg_advance
10597 #undef TARGET_FIXED_CONDITION_CODE_REGS
10598 #define TARGET_FIXED_CONDITION_CODE_REGS s390_fixed_condition_code_regs
10600 #undef TARGET_CC_MODES_COMPATIBLE
10601 #define TARGET_CC_MODES_COMPATIBLE s390_cc_modes_compatible
10603 #undef TARGET_INVALID_WITHIN_DOLOOP
10604 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_null
10607 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
10608 #define TARGET_ASM_OUTPUT_DWARF_DTPREL s390_output_dwarf_dtprel
10611 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
10612 #undef TARGET_MANGLE_TYPE
10613 #define TARGET_MANGLE_TYPE s390_mangle_type
10616 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10617 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
10619 #undef TARGET_SECONDARY_RELOAD
10620 #define TARGET_SECONDARY_RELOAD s390_secondary_reload
10622 #undef TARGET_LIBGCC_CMP_RETURN_MODE
10623 #define TARGET_LIBGCC_CMP_RETURN_MODE s390_libgcc_cmp_return_mode
10625 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
10626 #define TARGET_LIBGCC_SHIFT_COUNT_MODE s390_libgcc_shift_count_mode
10628 #undef TARGET_LEGITIMATE_ADDRESS_P
10629 #define TARGET_LEGITIMATE_ADDRESS_P s390_legitimate_address_p
10631 #undef TARGET_CAN_ELIMINATE
10632 #define TARGET_CAN_ELIMINATE s390_can_eliminate
10634 #undef TARGET_LOOP_UNROLL_ADJUST
10635 #define TARGET_LOOP_UNROLL_ADJUST s390_loop_unroll_adjust
10637 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
10638 #define TARGET_ASM_TRAMPOLINE_TEMPLATE s390_asm_trampoline_template
10639 #undef TARGET_TRAMPOLINE_INIT
10640 #define TARGET_TRAMPOLINE_INIT s390_trampoline_init
10642 #undef TARGET_UNWIND_WORD_MODE
10643 #define TARGET_UNWIND_WORD_MODE s390_unwind_word_mode
10645 struct gcc_target targetm
= TARGET_INITIALIZER
;
10647 #include "gt-s390.h"