S/390: Fix missing .size directives.
[official-gcc.git] / gcc / config / s390 / s390.c
blob3be64de3570e15bd3b7ece74693bf060794d8a24
1 /* Subroutines used for code generation on IBM S/390 and zSeries
2 Copyright (C) 1999-2016 Free Software Foundation, Inc.
3 Contributed by Hartmut Penner (hpenner@de.ibm.com) and
4 Ulrich Weigand (uweigand@de.ibm.com) and
5 Andreas Krebbel (Andreas.Krebbel@de.ibm.com).
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "backend.h"
27 #include "target.h"
28 #include "target-globals.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "gimple.h"
32 #include "cfghooks.h"
33 #include "cfgloop.h"
34 #include "df.h"
35 #include "tm_p.h"
36 #include "stringpool.h"
37 #include "expmed.h"
38 #include "optabs.h"
39 #include "regs.h"
40 #include "emit-rtl.h"
41 #include "recog.h"
42 #include "cgraph.h"
43 #include "diagnostic-core.h"
44 #include "diagnostic.h"
45 #include "alias.h"
46 #include "fold-const.h"
47 #include "print-tree.h"
48 #include "stor-layout.h"
49 #include "varasm.h"
50 #include "calls.h"
51 #include "conditions.h"
52 #include "output.h"
53 #include "insn-attr.h"
54 #include "flags.h"
55 #include "except.h"
56 #include "dojump.h"
57 #include "explow.h"
58 #include "stmt.h"
59 #include "expr.h"
60 #include "reload.h"
61 #include "cfgrtl.h"
62 #include "cfganal.h"
63 #include "lcm.h"
64 #include "cfgbuild.h"
65 #include "cfgcleanup.h"
66 #include "debug.h"
67 #include "langhooks.h"
68 #include "internal-fn.h"
69 #include "gimple-fold.h"
70 #include "tree-eh.h"
71 #include "gimplify.h"
72 #include "params.h"
73 #include "opts.h"
74 #include "tree-pass.h"
75 #include "context.h"
76 #include "builtins.h"
77 #include "rtl-iter.h"
78 #include "intl.h"
79 #include "tm-constrs.h"
81 /* This file should be included last. */
82 #include "target-def.h"
84 /* Remember the last target of s390_set_current_function. */
85 static GTY(()) tree s390_previous_fndecl;
87 /* Define the specific costs for a given cpu. */
89 struct processor_costs
91 /* multiplication */
92 const int m; /* cost of an M instruction. */
93 const int mghi; /* cost of an MGHI instruction. */
94 const int mh; /* cost of an MH instruction. */
95 const int mhi; /* cost of an MHI instruction. */
96 const int ml; /* cost of an ML instruction. */
97 const int mr; /* cost of an MR instruction. */
98 const int ms; /* cost of an MS instruction. */
99 const int msg; /* cost of an MSG instruction. */
100 const int msgf; /* cost of an MSGF instruction. */
101 const int msgfr; /* cost of an MSGFR instruction. */
102 const int msgr; /* cost of an MSGR instruction. */
103 const int msr; /* cost of an MSR instruction. */
104 const int mult_df; /* cost of multiplication in DFmode. */
105 const int mxbr;
106 /* square root */
107 const int sqxbr; /* cost of square root in TFmode. */
108 const int sqdbr; /* cost of square root in DFmode. */
109 const int sqebr; /* cost of square root in SFmode. */
110 /* multiply and add */
111 const int madbr; /* cost of multiply and add in DFmode. */
112 const int maebr; /* cost of multiply and add in SFmode. */
113 /* division */
114 const int dxbr;
115 const int ddbr;
116 const int debr;
117 const int dlgr;
118 const int dlr;
119 const int dr;
120 const int dsgfr;
121 const int dsgr;
124 #define s390_cost ((const struct processor_costs *)(s390_cost_pointer))
126 static const
127 struct processor_costs z900_cost =
129 COSTS_N_INSNS (5), /* M */
130 COSTS_N_INSNS (10), /* MGHI */
131 COSTS_N_INSNS (5), /* MH */
132 COSTS_N_INSNS (4), /* MHI */
133 COSTS_N_INSNS (5), /* ML */
134 COSTS_N_INSNS (5), /* MR */
135 COSTS_N_INSNS (4), /* MS */
136 COSTS_N_INSNS (15), /* MSG */
137 COSTS_N_INSNS (7), /* MSGF */
138 COSTS_N_INSNS (7), /* MSGFR */
139 COSTS_N_INSNS (10), /* MSGR */
140 COSTS_N_INSNS (4), /* MSR */
141 COSTS_N_INSNS (7), /* multiplication in DFmode */
142 COSTS_N_INSNS (13), /* MXBR */
143 COSTS_N_INSNS (136), /* SQXBR */
144 COSTS_N_INSNS (44), /* SQDBR */
145 COSTS_N_INSNS (35), /* SQEBR */
146 COSTS_N_INSNS (18), /* MADBR */
147 COSTS_N_INSNS (13), /* MAEBR */
148 COSTS_N_INSNS (134), /* DXBR */
149 COSTS_N_INSNS (30), /* DDBR */
150 COSTS_N_INSNS (27), /* DEBR */
151 COSTS_N_INSNS (220), /* DLGR */
152 COSTS_N_INSNS (34), /* DLR */
153 COSTS_N_INSNS (34), /* DR */
154 COSTS_N_INSNS (32), /* DSGFR */
155 COSTS_N_INSNS (32), /* DSGR */
158 static const
159 struct processor_costs z990_cost =
161 COSTS_N_INSNS (4), /* M */
162 COSTS_N_INSNS (2), /* MGHI */
163 COSTS_N_INSNS (2), /* MH */
164 COSTS_N_INSNS (2), /* MHI */
165 COSTS_N_INSNS (4), /* ML */
166 COSTS_N_INSNS (4), /* MR */
167 COSTS_N_INSNS (5), /* MS */
168 COSTS_N_INSNS (6), /* MSG */
169 COSTS_N_INSNS (4), /* MSGF */
170 COSTS_N_INSNS (4), /* MSGFR */
171 COSTS_N_INSNS (4), /* MSGR */
172 COSTS_N_INSNS (4), /* MSR */
173 COSTS_N_INSNS (1), /* multiplication in DFmode */
174 COSTS_N_INSNS (28), /* MXBR */
175 COSTS_N_INSNS (130), /* SQXBR */
176 COSTS_N_INSNS (66), /* SQDBR */
177 COSTS_N_INSNS (38), /* SQEBR */
178 COSTS_N_INSNS (1), /* MADBR */
179 COSTS_N_INSNS (1), /* MAEBR */
180 COSTS_N_INSNS (60), /* DXBR */
181 COSTS_N_INSNS (40), /* DDBR */
182 COSTS_N_INSNS (26), /* DEBR */
183 COSTS_N_INSNS (176), /* DLGR */
184 COSTS_N_INSNS (31), /* DLR */
185 COSTS_N_INSNS (31), /* DR */
186 COSTS_N_INSNS (31), /* DSGFR */
187 COSTS_N_INSNS (31), /* DSGR */
190 static const
191 struct processor_costs z9_109_cost =
193 COSTS_N_INSNS (4), /* M */
194 COSTS_N_INSNS (2), /* MGHI */
195 COSTS_N_INSNS (2), /* MH */
196 COSTS_N_INSNS (2), /* MHI */
197 COSTS_N_INSNS (4), /* ML */
198 COSTS_N_INSNS (4), /* MR */
199 COSTS_N_INSNS (5), /* MS */
200 COSTS_N_INSNS (6), /* MSG */
201 COSTS_N_INSNS (4), /* MSGF */
202 COSTS_N_INSNS (4), /* MSGFR */
203 COSTS_N_INSNS (4), /* MSGR */
204 COSTS_N_INSNS (4), /* MSR */
205 COSTS_N_INSNS (1), /* multiplication in DFmode */
206 COSTS_N_INSNS (28), /* MXBR */
207 COSTS_N_INSNS (130), /* SQXBR */
208 COSTS_N_INSNS (66), /* SQDBR */
209 COSTS_N_INSNS (38), /* SQEBR */
210 COSTS_N_INSNS (1), /* MADBR */
211 COSTS_N_INSNS (1), /* MAEBR */
212 COSTS_N_INSNS (60), /* DXBR */
213 COSTS_N_INSNS (40), /* DDBR */
214 COSTS_N_INSNS (26), /* DEBR */
215 COSTS_N_INSNS (30), /* DLGR */
216 COSTS_N_INSNS (23), /* DLR */
217 COSTS_N_INSNS (23), /* DR */
218 COSTS_N_INSNS (24), /* DSGFR */
219 COSTS_N_INSNS (24), /* DSGR */
222 static const
223 struct processor_costs z10_cost =
225 COSTS_N_INSNS (10), /* M */
226 COSTS_N_INSNS (10), /* MGHI */
227 COSTS_N_INSNS (10), /* MH */
228 COSTS_N_INSNS (10), /* MHI */
229 COSTS_N_INSNS (10), /* ML */
230 COSTS_N_INSNS (10), /* MR */
231 COSTS_N_INSNS (10), /* MS */
232 COSTS_N_INSNS (10), /* MSG */
233 COSTS_N_INSNS (10), /* MSGF */
234 COSTS_N_INSNS (10), /* MSGFR */
235 COSTS_N_INSNS (10), /* MSGR */
236 COSTS_N_INSNS (10), /* MSR */
237 COSTS_N_INSNS (1) , /* multiplication in DFmode */
238 COSTS_N_INSNS (50), /* MXBR */
239 COSTS_N_INSNS (120), /* SQXBR */
240 COSTS_N_INSNS (52), /* SQDBR */
241 COSTS_N_INSNS (38), /* SQEBR */
242 COSTS_N_INSNS (1), /* MADBR */
243 COSTS_N_INSNS (1), /* MAEBR */
244 COSTS_N_INSNS (111), /* DXBR */
245 COSTS_N_INSNS (39), /* DDBR */
246 COSTS_N_INSNS (32), /* DEBR */
247 COSTS_N_INSNS (160), /* DLGR */
248 COSTS_N_INSNS (71), /* DLR */
249 COSTS_N_INSNS (71), /* DR */
250 COSTS_N_INSNS (71), /* DSGFR */
251 COSTS_N_INSNS (71), /* DSGR */
254 static const
255 struct processor_costs z196_cost =
257 COSTS_N_INSNS (7), /* M */
258 COSTS_N_INSNS (5), /* MGHI */
259 COSTS_N_INSNS (5), /* MH */
260 COSTS_N_INSNS (5), /* MHI */
261 COSTS_N_INSNS (7), /* ML */
262 COSTS_N_INSNS (7), /* MR */
263 COSTS_N_INSNS (6), /* MS */
264 COSTS_N_INSNS (8), /* MSG */
265 COSTS_N_INSNS (6), /* MSGF */
266 COSTS_N_INSNS (6), /* MSGFR */
267 COSTS_N_INSNS (8), /* MSGR */
268 COSTS_N_INSNS (6), /* MSR */
269 COSTS_N_INSNS (1) , /* multiplication in DFmode */
270 COSTS_N_INSNS (40), /* MXBR B+40 */
271 COSTS_N_INSNS (100), /* SQXBR B+100 */
272 COSTS_N_INSNS (42), /* SQDBR B+42 */
273 COSTS_N_INSNS (28), /* SQEBR B+28 */
274 COSTS_N_INSNS (1), /* MADBR B */
275 COSTS_N_INSNS (1), /* MAEBR B */
276 COSTS_N_INSNS (101), /* DXBR B+101 */
277 COSTS_N_INSNS (29), /* DDBR */
278 COSTS_N_INSNS (22), /* DEBR */
279 COSTS_N_INSNS (160), /* DLGR cracked */
280 COSTS_N_INSNS (160), /* DLR cracked */
281 COSTS_N_INSNS (160), /* DR expanded */
282 COSTS_N_INSNS (160), /* DSGFR cracked */
283 COSTS_N_INSNS (160), /* DSGR cracked */
286 static const
287 struct processor_costs zEC12_cost =
289 COSTS_N_INSNS (7), /* M */
290 COSTS_N_INSNS (5), /* MGHI */
291 COSTS_N_INSNS (5), /* MH */
292 COSTS_N_INSNS (5), /* MHI */
293 COSTS_N_INSNS (7), /* ML */
294 COSTS_N_INSNS (7), /* MR */
295 COSTS_N_INSNS (6), /* MS */
296 COSTS_N_INSNS (8), /* MSG */
297 COSTS_N_INSNS (6), /* MSGF */
298 COSTS_N_INSNS (6), /* MSGFR */
299 COSTS_N_INSNS (8), /* MSGR */
300 COSTS_N_INSNS (6), /* MSR */
301 COSTS_N_INSNS (1) , /* multiplication in DFmode */
302 COSTS_N_INSNS (40), /* MXBR B+40 */
303 COSTS_N_INSNS (100), /* SQXBR B+100 */
304 COSTS_N_INSNS (42), /* SQDBR B+42 */
305 COSTS_N_INSNS (28), /* SQEBR B+28 */
306 COSTS_N_INSNS (1), /* MADBR B */
307 COSTS_N_INSNS (1), /* MAEBR B */
308 COSTS_N_INSNS (131), /* DXBR B+131 */
309 COSTS_N_INSNS (29), /* DDBR */
310 COSTS_N_INSNS (22), /* DEBR */
311 COSTS_N_INSNS (160), /* DLGR cracked */
312 COSTS_N_INSNS (160), /* DLR cracked */
313 COSTS_N_INSNS (160), /* DR expanded */
314 COSTS_N_INSNS (160), /* DSGFR cracked */
315 COSTS_N_INSNS (160), /* DSGR cracked */
318 static struct
320 const char *const name;
321 const enum processor_type processor;
322 const struct processor_costs *cost;
324 const processor_table[] =
326 { "g5", PROCESSOR_9672_G5, &z900_cost },
327 { "g6", PROCESSOR_9672_G6, &z900_cost },
328 { "z900", PROCESSOR_2064_Z900, &z900_cost },
329 { "z990", PROCESSOR_2084_Z990, &z990_cost },
330 { "z9-109", PROCESSOR_2094_Z9_109, &z9_109_cost },
331 { "z9-ec", PROCESSOR_2094_Z9_EC, &z9_109_cost },
332 { "z10", PROCESSOR_2097_Z10, &z10_cost },
333 { "z196", PROCESSOR_2817_Z196, &z196_cost },
334 { "zEC12", PROCESSOR_2827_ZEC12, &zEC12_cost },
335 { "z13", PROCESSOR_2964_Z13, &zEC12_cost },
336 { "native", PROCESSOR_NATIVE, NULL }
339 extern int reload_completed;
341 /* Kept up to date using the SCHED_VARIABLE_ISSUE hook. */
342 static rtx_insn *last_scheduled_insn;
344 /* Structure used to hold the components of a S/390 memory
345 address. A legitimate address on S/390 is of the general
346 form
347 base + index + displacement
348 where any of the components is optional.
350 base and index are registers of the class ADDR_REGS,
351 displacement is an unsigned 12-bit immediate constant. */
353 struct s390_address
355 rtx base;
356 rtx indx;
357 rtx disp;
358 bool pointer;
359 bool literal_pool;
362 /* The following structure is embedded in the machine
363 specific part of struct function. */
365 struct GTY (()) s390_frame_layout
367 /* Offset within stack frame. */
368 HOST_WIDE_INT gprs_offset;
369 HOST_WIDE_INT f0_offset;
370 HOST_WIDE_INT f4_offset;
371 HOST_WIDE_INT f8_offset;
372 HOST_WIDE_INT backchain_offset;
374 /* Number of first and last gpr where slots in the register
375 save area are reserved for. */
376 int first_save_gpr_slot;
377 int last_save_gpr_slot;
379 /* Location (FP register number) where GPRs (r0-r15) should
380 be saved to.
381 0 - does not need to be saved at all
382 -1 - stack slot */
383 signed char gpr_save_slots[16];
385 /* Number of first and last gpr to be saved, restored. */
386 int first_save_gpr;
387 int first_restore_gpr;
388 int last_save_gpr;
389 int last_restore_gpr;
391 /* Bits standing for floating point registers. Set, if the
392 respective register has to be saved. Starting with reg 16 (f0)
393 at the rightmost bit.
394 Bit 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
395 fpr 15 13 11 9 14 12 10 8 7 5 3 1 6 4 2 0
396 reg 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 */
397 unsigned int fpr_bitmap;
399 /* Number of floating point registers f8-f15 which must be saved. */
400 int high_fprs;
402 /* Set if return address needs to be saved.
403 This flag is set by s390_return_addr_rtx if it could not use
404 the initial value of r14 and therefore depends on r14 saved
405 to the stack. */
406 bool save_return_addr_p;
408 /* Size of stack frame. */
409 HOST_WIDE_INT frame_size;
412 /* Define the structure for the machine field in struct function. */
414 struct GTY(()) machine_function
416 struct s390_frame_layout frame_layout;
418 /* Literal pool base register. */
419 rtx base_reg;
421 /* True if we may need to perform branch splitting. */
422 bool split_branches_pending_p;
424 bool has_landing_pad_p;
426 /* True if the current function may contain a tbegin clobbering
427 FPRs. */
428 bool tbegin_p;
431 /* Few accessor macros for struct cfun->machine->s390_frame_layout. */
433 #define cfun_frame_layout (cfun->machine->frame_layout)
434 #define cfun_save_high_fprs_p (!!cfun_frame_layout.high_fprs)
435 #define cfun_save_arg_fprs_p (!!(TARGET_64BIT \
436 ? cfun_frame_layout.fpr_bitmap & 0x0f \
437 : cfun_frame_layout.fpr_bitmap & 0x03))
438 #define cfun_gprs_save_area_size ((cfun_frame_layout.last_save_gpr_slot - \
439 cfun_frame_layout.first_save_gpr_slot + 1) * UNITS_PER_LONG)
440 #define cfun_set_fpr_save(REGNO) (cfun->machine->frame_layout.fpr_bitmap |= \
441 (1 << (REGNO - FPR0_REGNUM)))
442 #define cfun_fpr_save_p(REGNO) (!!(cfun->machine->frame_layout.fpr_bitmap & \
443 (1 << (REGNO - FPR0_REGNUM))))
444 #define cfun_gpr_save_slot(REGNO) \
445 cfun->machine->frame_layout.gpr_save_slots[REGNO]
447 /* Number of GPRs and FPRs used for argument passing. */
448 #define GP_ARG_NUM_REG 5
449 #define FP_ARG_NUM_REG (TARGET_64BIT? 4 : 2)
450 #define VEC_ARG_NUM_REG 8
452 /* A couple of shortcuts. */
453 #define CONST_OK_FOR_J(x) \
454 CONST_OK_FOR_CONSTRAINT_P((x), 'J', "J")
455 #define CONST_OK_FOR_K(x) \
456 CONST_OK_FOR_CONSTRAINT_P((x), 'K', "K")
457 #define CONST_OK_FOR_Os(x) \
458 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Os")
459 #define CONST_OK_FOR_Op(x) \
460 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Op")
461 #define CONST_OK_FOR_On(x) \
462 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "On")
464 #define REGNO_PAIR_OK(REGNO, MODE) \
465 (HARD_REGNO_NREGS ((REGNO), (MODE)) == 1 || !((REGNO) & 1))
467 /* That's the read ahead of the dynamic branch prediction unit in
468 bytes on a z10 (or higher) CPU. */
469 #define PREDICT_DISTANCE (TARGET_Z10 ? 384 : 2048)
472 /* Indicate which ABI has been used for passing vector args.
473 0 - no vector type arguments have been passed where the ABI is relevant
474 1 - the old ABI has been used
475 2 - a vector type argument has been passed either in a vector register
476 or on the stack by value */
477 static int s390_vector_abi = 0;
479 /* Set the vector ABI marker if TYPE is subject to the vector ABI
480 switch. The vector ABI affects only vector data types. There are
481 two aspects of the vector ABI relevant here:
483 1. vectors >= 16 bytes have an alignment of 8 bytes with the new
484 ABI and natural alignment with the old.
486 2. vector <= 16 bytes are passed in VRs or by value on the stack
487 with the new ABI but by reference on the stack with the old.
489 If ARG_P is true TYPE is used for a function argument or return
490 value. The ABI marker then is set for all vector data types. If
491 ARG_P is false only type 1 vectors are being checked. */
493 static void
494 s390_check_type_for_vector_abi (const_tree type, bool arg_p, bool in_struct_p)
496 static hash_set<const_tree> visited_types_hash;
498 if (s390_vector_abi)
499 return;
501 if (type == NULL_TREE || TREE_CODE (type) == ERROR_MARK)
502 return;
504 if (visited_types_hash.contains (type))
505 return;
507 visited_types_hash.add (type);
509 if (VECTOR_TYPE_P (type))
511 int type_size = int_size_in_bytes (type);
513 /* Outside arguments only the alignment is changing and this
514 only happens for vector types >= 16 bytes. */
515 if (!arg_p && type_size < 16)
516 return;
518 /* In arguments vector types > 16 are passed as before (GCC
519 never enforced the bigger alignment for arguments which was
520 required by the old vector ABI). However, it might still be
521 ABI relevant due to the changed alignment if it is a struct
522 member. */
523 if (arg_p && type_size > 16 && !in_struct_p)
524 return;
526 s390_vector_abi = TARGET_VX_ABI ? 2 : 1;
528 else if (POINTER_TYPE_P (type) || TREE_CODE (type) == ARRAY_TYPE)
530 /* ARRAY_TYPE: Since with neither of the ABIs we have more than
531 natural alignment there will never be ABI dependent padding
532 in an array type. That's why we do not set in_struct_p to
533 true here. */
534 s390_check_type_for_vector_abi (TREE_TYPE (type), arg_p, in_struct_p);
536 else if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
538 tree arg_chain;
540 /* Check the return type. */
541 s390_check_type_for_vector_abi (TREE_TYPE (type), true, false);
543 for (arg_chain = TYPE_ARG_TYPES (type);
544 arg_chain;
545 arg_chain = TREE_CHAIN (arg_chain))
546 s390_check_type_for_vector_abi (TREE_VALUE (arg_chain), true, false);
548 else if (RECORD_OR_UNION_TYPE_P (type))
550 tree field;
552 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
554 if (TREE_CODE (field) != FIELD_DECL)
555 continue;
557 s390_check_type_for_vector_abi (TREE_TYPE (field), arg_p, true);
563 /* System z builtins. */
565 #include "s390-builtins.h"
567 const unsigned int bflags_builtin[S390_BUILTIN_MAX + 1] =
569 #undef B_DEF
570 #undef OB_DEF
571 #undef OB_DEF_VAR
572 #define B_DEF(NAME, PATTERN, ATTRS, BFLAGS, ...) BFLAGS,
573 #define OB_DEF(...)
574 #define OB_DEF_VAR(...)
575 #include "s390-builtins.def"
579 const unsigned int opflags_builtin[S390_BUILTIN_MAX + 1] =
581 #undef B_DEF
582 #undef OB_DEF
583 #undef OB_DEF_VAR
584 #define B_DEF(NAME, PATTERN, ATTRS, BFLAGS, OPFLAGS, ...) OPFLAGS,
585 #define OB_DEF(...)
586 #define OB_DEF_VAR(...)
587 #include "s390-builtins.def"
591 const unsigned int bflags_overloaded_builtin[S390_OVERLOADED_BUILTIN_MAX + 1] =
593 #undef B_DEF
594 #undef OB_DEF
595 #undef OB_DEF_VAR
596 #define B_DEF(...)
597 #define OB_DEF(NAME, FIRST_VAR_NAME, LAST_VAR_NAME, BFLAGS, ...) BFLAGS,
598 #define OB_DEF_VAR(...)
599 #include "s390-builtins.def"
603 const unsigned int
604 opflags_overloaded_builtin_var[S390_OVERLOADED_BUILTIN_VAR_MAX + 1] =
606 #undef B_DEF
607 #undef OB_DEF
608 #undef OB_DEF_VAR
609 #define B_DEF(...)
610 #define OB_DEF(...)
611 #define OB_DEF_VAR(NAME, PATTERN, FLAGS, FNTYPE) FLAGS,
612 #include "s390-builtins.def"
616 tree s390_builtin_types[BT_MAX];
617 tree s390_builtin_fn_types[BT_FN_MAX];
618 tree s390_builtin_decls[S390_BUILTIN_MAX +
619 S390_OVERLOADED_BUILTIN_MAX +
620 S390_OVERLOADED_BUILTIN_VAR_MAX];
622 static enum insn_code const code_for_builtin[S390_BUILTIN_MAX + 1] = {
623 #undef B_DEF
624 #undef OB_DEF
625 #undef OB_DEF_VAR
626 #define B_DEF(NAME, PATTERN, ...) CODE_FOR_##PATTERN,
627 #define OB_DEF(...)
628 #define OB_DEF_VAR(...)
630 #include "s390-builtins.def"
631 CODE_FOR_nothing
634 static void
635 s390_init_builtins (void)
637 /* These definitions are being used in s390-builtins.def. */
638 tree returns_twice_attr = tree_cons (get_identifier ("returns_twice"),
639 NULL, NULL);
640 tree noreturn_attr = tree_cons (get_identifier ("noreturn"), NULL, NULL);
641 tree c_uint64_type_node;
643 /* The uint64_type_node from tree.c is not compatible to the C99
644 uint64_t data type. What we want is c_uint64_type_node from
645 c-common.c. But since backend code is not supposed to interface
646 with the frontend we recreate it here. */
647 if (TARGET_64BIT)
648 c_uint64_type_node = long_unsigned_type_node;
649 else
650 c_uint64_type_node = long_long_unsigned_type_node;
652 #undef DEF_TYPE
653 #define DEF_TYPE(INDEX, BFLAGS, NODE, CONST_P) \
654 if (s390_builtin_types[INDEX] == NULL) \
655 s390_builtin_types[INDEX] = (!CONST_P) ? \
656 (NODE) : build_type_variant ((NODE), 1, 0);
658 #undef DEF_POINTER_TYPE
659 #define DEF_POINTER_TYPE(INDEX, BFLAGS, INDEX_BASE) \
660 if (s390_builtin_types[INDEX] == NULL) \
661 s390_builtin_types[INDEX] = \
662 build_pointer_type (s390_builtin_types[INDEX_BASE]);
664 #undef DEF_DISTINCT_TYPE
665 #define DEF_DISTINCT_TYPE(INDEX, BFLAGS, INDEX_BASE) \
666 if (s390_builtin_types[INDEX] == NULL) \
667 s390_builtin_types[INDEX] = \
668 build_distinct_type_copy (s390_builtin_types[INDEX_BASE]);
670 #undef DEF_VECTOR_TYPE
671 #define DEF_VECTOR_TYPE(INDEX, BFLAGS, INDEX_BASE, ELEMENTS) \
672 if (s390_builtin_types[INDEX] == NULL) \
673 s390_builtin_types[INDEX] = \
674 build_vector_type (s390_builtin_types[INDEX_BASE], ELEMENTS);
676 #undef DEF_OPAQUE_VECTOR_TYPE
677 #define DEF_OPAQUE_VECTOR_TYPE(INDEX, BFLAGS, INDEX_BASE, ELEMENTS) \
678 if (s390_builtin_types[INDEX] == NULL) \
679 s390_builtin_types[INDEX] = \
680 build_opaque_vector_type (s390_builtin_types[INDEX_BASE], ELEMENTS);
682 #undef DEF_FN_TYPE
683 #define DEF_FN_TYPE(INDEX, BFLAGS, args...) \
684 if (s390_builtin_fn_types[INDEX] == NULL) \
685 s390_builtin_fn_types[INDEX] = \
686 build_function_type_list (args, NULL_TREE);
687 #undef DEF_OV_TYPE
688 #define DEF_OV_TYPE(...)
689 #include "s390-builtin-types.def"
691 #undef B_DEF
692 #define B_DEF(NAME, PATTERN, ATTRS, BFLAGS, OPFLAGS, FNTYPE) \
693 if (s390_builtin_decls[S390_BUILTIN_##NAME] == NULL) \
694 s390_builtin_decls[S390_BUILTIN_##NAME] = \
695 add_builtin_function ("__builtin_" #NAME, \
696 s390_builtin_fn_types[FNTYPE], \
697 S390_BUILTIN_##NAME, \
698 BUILT_IN_MD, \
699 NULL, \
700 ATTRS);
701 #undef OB_DEF
702 #define OB_DEF(NAME, FIRST_VAR_NAME, LAST_VAR_NAME, BFLAGS, FNTYPE) \
703 if (s390_builtin_decls[S390_OVERLOADED_BUILTIN_##NAME + S390_BUILTIN_MAX] \
704 == NULL) \
705 s390_builtin_decls[S390_OVERLOADED_BUILTIN_##NAME + S390_BUILTIN_MAX] = \
706 add_builtin_function ("__builtin_" #NAME, \
707 s390_builtin_fn_types[FNTYPE], \
708 S390_OVERLOADED_BUILTIN_##NAME + S390_BUILTIN_MAX, \
709 BUILT_IN_MD, \
710 NULL, \
712 #undef OB_DEF_VAR
713 #define OB_DEF_VAR(...)
714 #include "s390-builtins.def"
718 /* Return true if ARG is appropriate as argument number ARGNUM of
719 builtin DECL. The operand flags from s390-builtins.def have to
720 passed as OP_FLAGS. */
721 bool
722 s390_const_operand_ok (tree arg, int argnum, int op_flags, tree decl)
724 if (O_UIMM_P (op_flags))
726 int bitwidths[] = { 1, 2, 3, 4, 5, 8, 12, 16, 32 };
727 int bitwidth = bitwidths[op_flags - O_U1];
729 if (!tree_fits_uhwi_p (arg)
730 || tree_to_uhwi (arg) > ((unsigned HOST_WIDE_INT)1 << bitwidth) - 1)
732 error("constant argument %d for builtin %qF is out of range (0.."
733 HOST_WIDE_INT_PRINT_UNSIGNED ")",
734 argnum, decl,
735 ((unsigned HOST_WIDE_INT)1 << bitwidth) - 1);
736 return false;
740 if (O_SIMM_P (op_flags))
742 int bitwidths[] = { 2, 3, 4, 5, 8, 12, 16, 32 };
743 int bitwidth = bitwidths[op_flags - O_S2];
745 if (!tree_fits_shwi_p (arg)
746 || tree_to_shwi (arg) < -((HOST_WIDE_INT)1 << (bitwidth - 1))
747 || tree_to_shwi (arg) > (((HOST_WIDE_INT)1 << (bitwidth - 1)) - 1))
749 error("constant argument %d for builtin %qF is out of range ("
750 HOST_WIDE_INT_PRINT_DEC ".."
751 HOST_WIDE_INT_PRINT_DEC ")",
752 argnum, decl,
753 -((HOST_WIDE_INT)1 << (bitwidth - 1)),
754 ((HOST_WIDE_INT)1 << (bitwidth - 1)) - 1);
755 return false;
758 return true;
761 /* Expand an expression EXP that calls a built-in function,
762 with result going to TARGET if that's convenient
763 (and in mode MODE if that's convenient).
764 SUBTARGET may be used as the target for computing one of EXP's operands.
765 IGNORE is nonzero if the value is to be ignored. */
767 static rtx
768 s390_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
769 machine_mode mode ATTRIBUTE_UNUSED,
770 int ignore ATTRIBUTE_UNUSED)
772 #define MAX_ARGS 5
774 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
775 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
776 enum insn_code icode;
777 rtx op[MAX_ARGS], pat;
778 int arity;
779 bool nonvoid;
780 tree arg;
781 call_expr_arg_iterator iter;
782 unsigned int all_op_flags = opflags_for_builtin (fcode);
783 machine_mode last_vec_mode = VOIDmode;
785 if (TARGET_DEBUG_ARG)
787 fprintf (stderr,
788 "s390_expand_builtin, code = %4d, %s, bflags = 0x%x\n",
789 (int)fcode, IDENTIFIER_POINTER (DECL_NAME (fndecl)),
790 bflags_for_builtin (fcode));
793 if (S390_USE_TARGET_ATTRIBUTE)
795 unsigned int bflags;
797 bflags = bflags_for_builtin (fcode);
798 if ((bflags & B_HTM) && !TARGET_HTM)
800 error ("Builtin %qF is not supported without -mhtm "
801 "(default with -march=zEC12 and higher).", fndecl);
802 return const0_rtx;
804 if ((bflags & B_VX) && !TARGET_VX)
806 error ("Builtin %qF is not supported without -mvx "
807 "(default with -march=z13 and higher).", fndecl);
808 return const0_rtx;
811 if (fcode >= S390_OVERLOADED_BUILTIN_VAR_OFFSET
812 && fcode < S390_ALL_BUILTIN_MAX)
814 gcc_unreachable ();
816 else if (fcode < S390_OVERLOADED_BUILTIN_OFFSET)
818 icode = code_for_builtin[fcode];
819 /* Set a flag in the machine specific cfun part in order to support
820 saving/restoring of FPRs. */
821 if (fcode == S390_BUILTIN_tbegin || fcode == S390_BUILTIN_tbegin_retry)
822 cfun->machine->tbegin_p = true;
824 else if (fcode < S390_OVERLOADED_BUILTIN_VAR_OFFSET)
826 error ("Unresolved overloaded builtin");
827 return const0_rtx;
829 else
830 internal_error ("bad builtin fcode");
832 if (icode == 0)
833 internal_error ("bad builtin icode");
835 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
837 if (nonvoid)
839 machine_mode tmode = insn_data[icode].operand[0].mode;
840 if (!target
841 || GET_MODE (target) != tmode
842 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
843 target = gen_reg_rtx (tmode);
845 /* There are builtins (e.g. vec_promote) with no vector
846 arguments but an element selector. So we have to also look
847 at the vector return type when emitting the modulo
848 operation. */
849 if (VECTOR_MODE_P (insn_data[icode].operand[0].mode))
850 last_vec_mode = insn_data[icode].operand[0].mode;
853 arity = 0;
854 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
856 const struct insn_operand_data *insn_op;
857 unsigned int op_flags = all_op_flags & ((1 << O_SHIFT) - 1);
859 all_op_flags = all_op_flags >> O_SHIFT;
861 if (arg == error_mark_node)
862 return NULL_RTX;
863 if (arity >= MAX_ARGS)
864 return NULL_RTX;
866 if (O_IMM_P (op_flags)
867 && TREE_CODE (arg) != INTEGER_CST)
869 error ("constant value required for builtin %qF argument %d",
870 fndecl, arity + 1);
871 return const0_rtx;
874 if (!s390_const_operand_ok (arg, arity + 1, op_flags, fndecl))
875 return const0_rtx;
877 insn_op = &insn_data[icode].operand[arity + nonvoid];
878 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
880 /* expand_expr truncates constants to the target mode only if it
881 is "convenient". However, our checks below rely on this
882 being done. */
883 if (CONST_INT_P (op[arity])
884 && SCALAR_INT_MODE_P (insn_op->mode)
885 && GET_MODE (op[arity]) != insn_op->mode)
886 op[arity] = GEN_INT (trunc_int_for_mode (INTVAL (op[arity]),
887 insn_op->mode));
889 /* Wrap the expanded RTX for pointer types into a MEM expr with
890 the proper mode. This allows us to use e.g. (match_operand
891 "memory_operand"..) in the insn patterns instead of (mem
892 (match_operand "address_operand)). This is helpful for
893 patterns not just accepting MEMs. */
894 if (POINTER_TYPE_P (TREE_TYPE (arg))
895 && insn_op->predicate != address_operand)
896 op[arity] = gen_rtx_MEM (insn_op->mode, op[arity]);
898 /* Expand the module operation required on element selectors. */
899 if (op_flags == O_ELEM)
901 gcc_assert (last_vec_mode != VOIDmode);
902 op[arity] = simplify_expand_binop (SImode, code_to_optab (AND),
903 op[arity],
904 GEN_INT (GET_MODE_NUNITS (last_vec_mode) - 1),
905 NULL_RTX, 1, OPTAB_DIRECT);
908 /* Record the vector mode used for an element selector. This assumes:
909 1. There is no builtin with two different vector modes and an element selector
910 2. The element selector comes after the vector type it is referring to.
911 This currently the true for all the builtins but FIXME we
912 should better check for that. */
913 if (VECTOR_MODE_P (insn_op->mode))
914 last_vec_mode = insn_op->mode;
916 if (insn_op->predicate (op[arity], insn_op->mode))
918 arity++;
919 continue;
922 if (MEM_P (op[arity])
923 && insn_op->predicate == memory_operand
924 && (GET_MODE (XEXP (op[arity], 0)) == Pmode
925 || GET_MODE (XEXP (op[arity], 0)) == VOIDmode))
927 op[arity] = replace_equiv_address (op[arity],
928 copy_to_mode_reg (Pmode,
929 XEXP (op[arity], 0)));
931 else if (GET_MODE (op[arity]) == insn_op->mode
932 || GET_MODE (op[arity]) == VOIDmode
933 || (insn_op->predicate == address_operand
934 && GET_MODE (op[arity]) == Pmode))
936 /* An address_operand usually has VOIDmode in the expander
937 so we cannot use this. */
938 machine_mode target_mode =
939 (insn_op->predicate == address_operand
940 ? Pmode : insn_op->mode);
941 op[arity] = copy_to_mode_reg (target_mode, op[arity]);
944 if (!insn_op->predicate (op[arity], insn_op->mode))
946 error ("Invalid argument %d for builtin %qF", arity + 1, fndecl);
947 return const0_rtx;
949 arity++;
952 switch (arity)
954 case 0:
955 pat = GEN_FCN (icode) (target);
956 break;
957 case 1:
958 if (nonvoid)
959 pat = GEN_FCN (icode) (target, op[0]);
960 else
961 pat = GEN_FCN (icode) (op[0]);
962 break;
963 case 2:
964 if (nonvoid)
965 pat = GEN_FCN (icode) (target, op[0], op[1]);
966 else
967 pat = GEN_FCN (icode) (op[0], op[1]);
968 break;
969 case 3:
970 if (nonvoid)
971 pat = GEN_FCN (icode) (target, op[0], op[1], op[2]);
972 else
973 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
974 break;
975 case 4:
976 if (nonvoid)
977 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3]);
978 else
979 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
980 break;
981 case 5:
982 if (nonvoid)
983 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3], op[4]);
984 else
985 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4]);
986 break;
987 case 6:
988 if (nonvoid)
989 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3], op[4], op[5]);
990 else
991 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4], op[5]);
992 break;
993 default:
994 gcc_unreachable ();
996 if (!pat)
997 return NULL_RTX;
998 emit_insn (pat);
1000 if (nonvoid)
1001 return target;
1002 else
1003 return const0_rtx;
1007 static const int s390_hotpatch_hw_max = 1000000;
1008 static int s390_hotpatch_hw_before_label = 0;
1009 static int s390_hotpatch_hw_after_label = 0;
1011 /* Check whether the hotpatch attribute is applied to a function and, if it has
1012 an argument, the argument is valid. */
1014 static tree
1015 s390_handle_hotpatch_attribute (tree *node, tree name, tree args,
1016 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
1018 tree expr;
1019 tree expr2;
1020 int err;
1022 if (TREE_CODE (*node) != FUNCTION_DECL)
1024 warning (OPT_Wattributes, "%qE attribute only applies to functions",
1025 name);
1026 *no_add_attrs = true;
1028 if (args != NULL && TREE_CHAIN (args) != NULL)
1030 expr = TREE_VALUE (args);
1031 expr2 = TREE_VALUE (TREE_CHAIN (args));
1033 if (args == NULL || TREE_CHAIN (args) == NULL)
1034 err = 1;
1035 else if (TREE_CODE (expr) != INTEGER_CST
1036 || !INTEGRAL_TYPE_P (TREE_TYPE (expr))
1037 || wi::gtu_p (expr, s390_hotpatch_hw_max))
1038 err = 1;
1039 else if (TREE_CODE (expr2) != INTEGER_CST
1040 || !INTEGRAL_TYPE_P (TREE_TYPE (expr2))
1041 || wi::gtu_p (expr2, s390_hotpatch_hw_max))
1042 err = 1;
1043 else
1044 err = 0;
1045 if (err)
1047 error ("requested %qE attribute is not a comma separated pair of"
1048 " non-negative integer constants or too large (max. %d)", name,
1049 s390_hotpatch_hw_max);
1050 *no_add_attrs = true;
1053 return NULL_TREE;
1056 /* Expand the s390_vector_bool type attribute. */
1058 static tree
1059 s390_handle_vectorbool_attribute (tree *node, tree name ATTRIBUTE_UNUSED,
1060 tree args ATTRIBUTE_UNUSED,
1061 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
1063 tree type = *node, result = NULL_TREE;
1064 machine_mode mode;
1066 while (POINTER_TYPE_P (type)
1067 || TREE_CODE (type) == FUNCTION_TYPE
1068 || TREE_CODE (type) == METHOD_TYPE
1069 || TREE_CODE (type) == ARRAY_TYPE)
1070 type = TREE_TYPE (type);
1072 mode = TYPE_MODE (type);
1073 switch (mode)
1075 case DImode: case V2DImode: result = s390_builtin_types[BT_BV2DI]; break;
1076 case SImode: case V4SImode: result = s390_builtin_types[BT_BV4SI]; break;
1077 case HImode: case V8HImode: result = s390_builtin_types[BT_BV8HI]; break;
1078 case QImode: case V16QImode: result = s390_builtin_types[BT_BV16QI];
1079 default: break;
1082 *no_add_attrs = true; /* No need to hang on to the attribute. */
1084 if (result)
1085 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
1087 return NULL_TREE;
1090 static const struct attribute_spec s390_attribute_table[] = {
1091 { "hotpatch", 2, 2, true, false, false, s390_handle_hotpatch_attribute, false },
1092 { "s390_vector_bool", 0, 0, false, true, false, s390_handle_vectorbool_attribute, true },
1093 /* End element. */
1094 { NULL, 0, 0, false, false, false, NULL, false }
1097 /* Return the alignment for LABEL. We default to the -falign-labels
1098 value except for the literal pool base label. */
1100 s390_label_align (rtx label)
1102 rtx_insn *prev_insn = prev_active_insn (label);
1103 rtx set, src;
1105 if (prev_insn == NULL_RTX)
1106 goto old;
1108 set = single_set (prev_insn);
1110 if (set == NULL_RTX)
1111 goto old;
1113 src = SET_SRC (set);
1115 /* Don't align literal pool base labels. */
1116 if (GET_CODE (src) == UNSPEC
1117 && XINT (src, 1) == UNSPEC_MAIN_BASE)
1118 return 0;
1120 old:
1121 return align_labels_log;
1124 static machine_mode
1125 s390_libgcc_cmp_return_mode (void)
1127 return TARGET_64BIT ? DImode : SImode;
1130 static machine_mode
1131 s390_libgcc_shift_count_mode (void)
1133 return TARGET_64BIT ? DImode : SImode;
1136 static machine_mode
1137 s390_unwind_word_mode (void)
1139 return TARGET_64BIT ? DImode : SImode;
1142 /* Return true if the back end supports mode MODE. */
1143 static bool
1144 s390_scalar_mode_supported_p (machine_mode mode)
1146 /* In contrast to the default implementation reject TImode constants on 31bit
1147 TARGET_ZARCH for ABI compliance. */
1148 if (!TARGET_64BIT && TARGET_ZARCH && mode == TImode)
1149 return false;
1151 if (DECIMAL_FLOAT_MODE_P (mode))
1152 return default_decimal_float_supported_p ();
1154 return default_scalar_mode_supported_p (mode);
1157 /* Return true if the back end supports vector mode MODE. */
1158 static bool
1159 s390_vector_mode_supported_p (machine_mode mode)
1161 machine_mode inner;
1163 if (!VECTOR_MODE_P (mode)
1164 || !TARGET_VX
1165 || GET_MODE_SIZE (mode) > 16)
1166 return false;
1168 inner = GET_MODE_INNER (mode);
1170 switch (inner)
1172 case QImode:
1173 case HImode:
1174 case SImode:
1175 case DImode:
1176 case TImode:
1177 case SFmode:
1178 case DFmode:
1179 case TFmode:
1180 return true;
1181 default:
1182 return false;
1186 /* Set the has_landing_pad_p flag in struct machine_function to VALUE. */
1188 void
1189 s390_set_has_landing_pad_p (bool value)
1191 cfun->machine->has_landing_pad_p = value;
1194 /* If two condition code modes are compatible, return a condition code
1195 mode which is compatible with both. Otherwise, return
1196 VOIDmode. */
1198 static machine_mode
1199 s390_cc_modes_compatible (machine_mode m1, machine_mode m2)
1201 if (m1 == m2)
1202 return m1;
1204 switch (m1)
1206 case CCZmode:
1207 if (m2 == CCUmode || m2 == CCTmode || m2 == CCZ1mode
1208 || m2 == CCSmode || m2 == CCSRmode || m2 == CCURmode)
1209 return m2;
1210 return VOIDmode;
1212 case CCSmode:
1213 case CCUmode:
1214 case CCTmode:
1215 case CCSRmode:
1216 case CCURmode:
1217 case CCZ1mode:
1218 if (m2 == CCZmode)
1219 return m1;
1221 return VOIDmode;
1223 default:
1224 return VOIDmode;
1226 return VOIDmode;
1229 /* Return true if SET either doesn't set the CC register, or else
1230 the source and destination have matching CC modes and that
1231 CC mode is at least as constrained as REQ_MODE. */
1233 static bool
1234 s390_match_ccmode_set (rtx set, machine_mode req_mode)
1236 machine_mode set_mode;
1238 gcc_assert (GET_CODE (set) == SET);
1240 if (GET_CODE (SET_DEST (set)) != REG || !CC_REGNO_P (REGNO (SET_DEST (set))))
1241 return 1;
1243 set_mode = GET_MODE (SET_DEST (set));
1244 switch (set_mode)
1246 case CCSmode:
1247 case CCSRmode:
1248 case CCUmode:
1249 case CCURmode:
1250 case CCLmode:
1251 case CCL1mode:
1252 case CCL2mode:
1253 case CCL3mode:
1254 case CCT1mode:
1255 case CCT2mode:
1256 case CCT3mode:
1257 case CCVEQmode:
1258 case CCVHmode:
1259 case CCVHUmode:
1260 case CCVFHmode:
1261 case CCVFHEmode:
1262 if (req_mode != set_mode)
1263 return 0;
1264 break;
1266 case CCZmode:
1267 if (req_mode != CCSmode && req_mode != CCUmode && req_mode != CCTmode
1268 && req_mode != CCSRmode && req_mode != CCURmode)
1269 return 0;
1270 break;
1272 case CCAPmode:
1273 case CCANmode:
1274 if (req_mode != CCAmode)
1275 return 0;
1276 break;
1278 default:
1279 gcc_unreachable ();
1282 return (GET_MODE (SET_SRC (set)) == set_mode);
1285 /* Return true if every SET in INSN that sets the CC register
1286 has source and destination with matching CC modes and that
1287 CC mode is at least as constrained as REQ_MODE.
1288 If REQ_MODE is VOIDmode, always return false. */
1290 bool
1291 s390_match_ccmode (rtx_insn *insn, machine_mode req_mode)
1293 int i;
1295 /* s390_tm_ccmode returns VOIDmode to indicate failure. */
1296 if (req_mode == VOIDmode)
1297 return false;
1299 if (GET_CODE (PATTERN (insn)) == SET)
1300 return s390_match_ccmode_set (PATTERN (insn), req_mode);
1302 if (GET_CODE (PATTERN (insn)) == PARALLEL)
1303 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1305 rtx set = XVECEXP (PATTERN (insn), 0, i);
1306 if (GET_CODE (set) == SET)
1307 if (!s390_match_ccmode_set (set, req_mode))
1308 return false;
1311 return true;
1314 /* If a test-under-mask instruction can be used to implement
1315 (compare (and ... OP1) OP2), return the CC mode required
1316 to do that. Otherwise, return VOIDmode.
1317 MIXED is true if the instruction can distinguish between
1318 CC1 and CC2 for mixed selected bits (TMxx), it is false
1319 if the instruction cannot (TM). */
1321 machine_mode
1322 s390_tm_ccmode (rtx op1, rtx op2, bool mixed)
1324 int bit0, bit1;
1326 /* ??? Fixme: should work on CONST_WIDE_INT as well. */
1327 if (GET_CODE (op1) != CONST_INT || GET_CODE (op2) != CONST_INT)
1328 return VOIDmode;
1330 /* Selected bits all zero: CC0.
1331 e.g.: int a; if ((a & (16 + 128)) == 0) */
1332 if (INTVAL (op2) == 0)
1333 return CCTmode;
1335 /* Selected bits all one: CC3.
1336 e.g.: int a; if ((a & (16 + 128)) == 16 + 128) */
1337 if (INTVAL (op2) == INTVAL (op1))
1338 return CCT3mode;
1340 /* Exactly two bits selected, mixed zeroes and ones: CC1 or CC2. e.g.:
1341 int a;
1342 if ((a & (16 + 128)) == 16) -> CCT1
1343 if ((a & (16 + 128)) == 128) -> CCT2 */
1344 if (mixed)
1346 bit1 = exact_log2 (INTVAL (op2));
1347 bit0 = exact_log2 (INTVAL (op1) ^ INTVAL (op2));
1348 if (bit0 != -1 && bit1 != -1)
1349 return bit0 > bit1 ? CCT1mode : CCT2mode;
1352 return VOIDmode;
1355 /* Given a comparison code OP (EQ, NE, etc.) and the operands
1356 OP0 and OP1 of a COMPARE, return the mode to be used for the
1357 comparison. */
1359 machine_mode
1360 s390_select_ccmode (enum rtx_code code, rtx op0, rtx op1)
1362 if (TARGET_VX
1363 && register_operand (op0, DFmode)
1364 && register_operand (op1, DFmode))
1366 /* LT, LE, UNGT, UNGE require swapping OP0 and OP1. Either
1367 s390_emit_compare or s390_canonicalize_comparison will take
1368 care of it. */
1369 switch (code)
1371 case EQ:
1372 case NE:
1373 return CCVEQmode;
1374 case GT:
1375 case UNLE:
1376 return CCVFHmode;
1377 case GE:
1378 case UNLT:
1379 return CCVFHEmode;
1380 default:
1385 switch (code)
1387 case EQ:
1388 case NE:
1389 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
1390 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1391 return CCAPmode;
1392 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
1393 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
1394 return CCAPmode;
1395 if ((GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1396 || GET_CODE (op1) == NEG)
1397 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1398 return CCLmode;
1400 if (GET_CODE (op0) == AND)
1402 /* Check whether we can potentially do it via TM. */
1403 machine_mode ccmode;
1404 ccmode = s390_tm_ccmode (XEXP (op0, 1), op1, 1);
1405 if (ccmode != VOIDmode)
1407 /* Relax CCTmode to CCZmode to allow fall-back to AND
1408 if that turns out to be beneficial. */
1409 return ccmode == CCTmode ? CCZmode : ccmode;
1413 if (register_operand (op0, HImode)
1414 && GET_CODE (op1) == CONST_INT
1415 && (INTVAL (op1) == -1 || INTVAL (op1) == 65535))
1416 return CCT3mode;
1417 if (register_operand (op0, QImode)
1418 && GET_CODE (op1) == CONST_INT
1419 && (INTVAL (op1) == -1 || INTVAL (op1) == 255))
1420 return CCT3mode;
1422 return CCZmode;
1424 case LE:
1425 case LT:
1426 case GE:
1427 case GT:
1428 /* The only overflow condition of NEG and ABS happens when
1429 -INT_MAX is used as parameter, which stays negative. So
1430 we have an overflow from a positive value to a negative.
1431 Using CCAP mode the resulting cc can be used for comparisons. */
1432 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
1433 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1434 return CCAPmode;
1436 /* If constants are involved in an add instruction it is possible to use
1437 the resulting cc for comparisons with zero. Knowing the sign of the
1438 constant the overflow behavior gets predictable. e.g.:
1439 int a, b; if ((b = a + c) > 0)
1440 with c as a constant value: c < 0 -> CCAN and c >= 0 -> CCAP */
1441 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
1442 && (CONST_OK_FOR_K (INTVAL (XEXP (op0, 1)))
1443 || (CONST_OK_FOR_CONSTRAINT_P (INTVAL (XEXP (op0, 1)), 'O', "Os")
1444 /* Avoid INT32_MIN on 32 bit. */
1445 && (!TARGET_ZARCH || INTVAL (XEXP (op0, 1)) != -0x7fffffff - 1))))
1447 if (INTVAL (XEXP((op0), 1)) < 0)
1448 return CCANmode;
1449 else
1450 return CCAPmode;
1452 /* Fall through. */
1453 case UNORDERED:
1454 case ORDERED:
1455 case UNEQ:
1456 case UNLE:
1457 case UNLT:
1458 case UNGE:
1459 case UNGT:
1460 case LTGT:
1461 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1462 && GET_CODE (op1) != CONST_INT)
1463 return CCSRmode;
1464 return CCSmode;
1466 case LTU:
1467 case GEU:
1468 if (GET_CODE (op0) == PLUS
1469 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1470 return CCL1mode;
1472 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1473 && GET_CODE (op1) != CONST_INT)
1474 return CCURmode;
1475 return CCUmode;
1477 case LEU:
1478 case GTU:
1479 if (GET_CODE (op0) == MINUS
1480 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1481 return CCL2mode;
1483 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1484 && GET_CODE (op1) != CONST_INT)
1485 return CCURmode;
1486 return CCUmode;
1488 default:
1489 gcc_unreachable ();
1493 /* Replace the comparison OP0 CODE OP1 by a semantically equivalent one
1494 that we can implement more efficiently. */
1496 static void
1497 s390_canonicalize_comparison (int *code, rtx *op0, rtx *op1,
1498 bool op0_preserve_value)
1500 if (op0_preserve_value)
1501 return;
1503 /* Convert ZERO_EXTRACT back to AND to enable TM patterns. */
1504 if ((*code == EQ || *code == NE)
1505 && *op1 == const0_rtx
1506 && GET_CODE (*op0) == ZERO_EXTRACT
1507 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
1508 && GET_CODE (XEXP (*op0, 2)) == CONST_INT
1509 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
1511 rtx inner = XEXP (*op0, 0);
1512 HOST_WIDE_INT modesize = GET_MODE_BITSIZE (GET_MODE (inner));
1513 HOST_WIDE_INT len = INTVAL (XEXP (*op0, 1));
1514 HOST_WIDE_INT pos = INTVAL (XEXP (*op0, 2));
1516 if (len > 0 && len < modesize
1517 && pos >= 0 && pos + len <= modesize
1518 && modesize <= HOST_BITS_PER_WIDE_INT)
1520 unsigned HOST_WIDE_INT block;
1521 block = ((unsigned HOST_WIDE_INT) 1 << len) - 1;
1522 block <<= modesize - pos - len;
1524 *op0 = gen_rtx_AND (GET_MODE (inner), inner,
1525 gen_int_mode (block, GET_MODE (inner)));
1529 /* Narrow AND of memory against immediate to enable TM. */
1530 if ((*code == EQ || *code == NE)
1531 && *op1 == const0_rtx
1532 && GET_CODE (*op0) == AND
1533 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
1534 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
1536 rtx inner = XEXP (*op0, 0);
1537 rtx mask = XEXP (*op0, 1);
1539 /* Ignore paradoxical SUBREGs if all extra bits are masked out. */
1540 if (GET_CODE (inner) == SUBREG
1541 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (inner)))
1542 && (GET_MODE_SIZE (GET_MODE (inner))
1543 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
1544 && ((INTVAL (mask)
1545 & GET_MODE_MASK (GET_MODE (inner))
1546 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (inner))))
1547 == 0))
1548 inner = SUBREG_REG (inner);
1550 /* Do not change volatile MEMs. */
1551 if (MEM_P (inner) && !MEM_VOLATILE_P (inner))
1553 int part = s390_single_part (XEXP (*op0, 1),
1554 GET_MODE (inner), QImode, 0);
1555 if (part >= 0)
1557 mask = gen_int_mode (s390_extract_part (mask, QImode, 0), QImode);
1558 inner = adjust_address_nv (inner, QImode, part);
1559 *op0 = gen_rtx_AND (QImode, inner, mask);
1564 /* Narrow comparisons against 0xffff to HImode if possible. */
1565 if ((*code == EQ || *code == NE)
1566 && GET_CODE (*op1) == CONST_INT
1567 && INTVAL (*op1) == 0xffff
1568 && SCALAR_INT_MODE_P (GET_MODE (*op0))
1569 && (nonzero_bits (*op0, GET_MODE (*op0))
1570 & ~(unsigned HOST_WIDE_INT) 0xffff) == 0)
1572 *op0 = gen_lowpart (HImode, *op0);
1573 *op1 = constm1_rtx;
1576 /* Remove redundant UNSPEC_STRCMPCC_TO_INT conversions if possible. */
1577 if (GET_CODE (*op0) == UNSPEC
1578 && XINT (*op0, 1) == UNSPEC_STRCMPCC_TO_INT
1579 && XVECLEN (*op0, 0) == 1
1580 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCUmode
1581 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
1582 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
1583 && *op1 == const0_rtx)
1585 enum rtx_code new_code = UNKNOWN;
1586 switch (*code)
1588 case EQ: new_code = EQ; break;
1589 case NE: new_code = NE; break;
1590 case LT: new_code = GTU; break;
1591 case GT: new_code = LTU; break;
1592 case LE: new_code = GEU; break;
1593 case GE: new_code = LEU; break;
1594 default: break;
1597 if (new_code != UNKNOWN)
1599 *op0 = XVECEXP (*op0, 0, 0);
1600 *code = new_code;
1604 /* Remove redundant UNSPEC_CC_TO_INT conversions if possible. */
1605 if (GET_CODE (*op0) == UNSPEC
1606 && XINT (*op0, 1) == UNSPEC_CC_TO_INT
1607 && XVECLEN (*op0, 0) == 1
1608 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
1609 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
1610 && CONST_INT_P (*op1))
1612 enum rtx_code new_code = UNKNOWN;
1613 switch (GET_MODE (XVECEXP (*op0, 0, 0)))
1615 case CCZmode:
1616 case CCRAWmode:
1617 switch (*code)
1619 case EQ: new_code = EQ; break;
1620 case NE: new_code = NE; break;
1621 default: break;
1623 break;
1624 default: break;
1627 if (new_code != UNKNOWN)
1629 /* For CCRAWmode put the required cc mask into the second
1630 operand. */
1631 if (GET_MODE (XVECEXP (*op0, 0, 0)) == CCRAWmode
1632 && INTVAL (*op1) >= 0 && INTVAL (*op1) <= 3)
1633 *op1 = gen_rtx_CONST_INT (VOIDmode, 1 << (3 - INTVAL (*op1)));
1634 *op0 = XVECEXP (*op0, 0, 0);
1635 *code = new_code;
1639 /* Simplify cascaded EQ, NE with const0_rtx. */
1640 if ((*code == NE || *code == EQ)
1641 && (GET_CODE (*op0) == EQ || GET_CODE (*op0) == NE)
1642 && GET_MODE (*op0) == SImode
1643 && GET_MODE (XEXP (*op0, 0)) == CCZ1mode
1644 && REG_P (XEXP (*op0, 0))
1645 && XEXP (*op0, 1) == const0_rtx
1646 && *op1 == const0_rtx)
1648 if ((*code == EQ && GET_CODE (*op0) == NE)
1649 || (*code == NE && GET_CODE (*op0) == EQ))
1650 *code = EQ;
1651 else
1652 *code = NE;
1653 *op0 = XEXP (*op0, 0);
1656 /* Prefer register over memory as first operand. */
1657 if (MEM_P (*op0) && REG_P (*op1))
1659 rtx tem = *op0; *op0 = *op1; *op1 = tem;
1660 *code = (int)swap_condition ((enum rtx_code)*code);
1663 /* Using the scalar variants of vector instructions for 64 bit FP
1664 comparisons might require swapping the operands. */
1665 if (TARGET_VX
1666 && register_operand (*op0, DFmode)
1667 && register_operand (*op1, DFmode)
1668 && (*code == LT || *code == LE || *code == UNGT || *code == UNGE))
1670 rtx tmp;
1672 switch (*code)
1674 case LT: *code = GT; break;
1675 case LE: *code = GE; break;
1676 case UNGT: *code = UNLE; break;
1677 case UNGE: *code = UNLT; break;
1678 default: ;
1680 tmp = *op0; *op0 = *op1; *op1 = tmp;
1684 /* Helper function for s390_emit_compare. If possible emit a 64 bit
1685 FP compare using the single element variant of vector instructions.
1686 Replace CODE with the comparison code to be used in the CC reg
1687 compare and return the condition code register RTX in CC. */
1689 static bool
1690 s390_expand_vec_compare_scalar (enum rtx_code *code, rtx cmp1, rtx cmp2,
1691 rtx *cc)
1693 machine_mode cmp_mode;
1694 bool swap_p = false;
1696 switch (*code)
1698 case EQ: cmp_mode = CCVEQmode; break;
1699 case NE: cmp_mode = CCVEQmode; break;
1700 case GT: cmp_mode = CCVFHmode; break;
1701 case GE: cmp_mode = CCVFHEmode; break;
1702 case UNLE: cmp_mode = CCVFHmode; break;
1703 case UNLT: cmp_mode = CCVFHEmode; break;
1704 case LT: cmp_mode = CCVFHmode; *code = GT; swap_p = true; break;
1705 case LE: cmp_mode = CCVFHEmode; *code = GE; swap_p = true; break;
1706 case UNGE: cmp_mode = CCVFHmode; *code = UNLE; swap_p = true; break;
1707 case UNGT: cmp_mode = CCVFHEmode; *code = UNLT; swap_p = true; break;
1708 default: return false;
1711 if (swap_p)
1713 rtx tmp = cmp2;
1714 cmp2 = cmp1;
1715 cmp1 = tmp;
1717 *cc = gen_rtx_REG (cmp_mode, CC_REGNUM);
1718 emit_insn (gen_rtx_PARALLEL (VOIDmode,
1719 gen_rtvec (2,
1720 gen_rtx_SET (*cc,
1721 gen_rtx_COMPARE (cmp_mode, cmp1,
1722 cmp2)),
1723 gen_rtx_CLOBBER (VOIDmode,
1724 gen_rtx_SCRATCH (V2DImode)))));
1725 return true;
1729 /* Emit a compare instruction suitable to implement the comparison
1730 OP0 CODE OP1. Return the correct condition RTL to be placed in
1731 the IF_THEN_ELSE of the conditional branch testing the result. */
1734 s390_emit_compare (enum rtx_code code, rtx op0, rtx op1)
1736 machine_mode mode = s390_select_ccmode (code, op0, op1);
1737 rtx cc;
1739 if (TARGET_VX
1740 && register_operand (op0, DFmode)
1741 && register_operand (op1, DFmode)
1742 && s390_expand_vec_compare_scalar (&code, op0, op1, &cc))
1744 /* Work has been done by s390_expand_vec_compare_scalar already. */
1746 else if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
1748 /* Do not output a redundant compare instruction if a
1749 compare_and_swap pattern already computed the result and the
1750 machine modes are compatible. */
1751 gcc_assert (s390_cc_modes_compatible (GET_MODE (op0), mode)
1752 == GET_MODE (op0));
1753 cc = op0;
1755 else
1757 cc = gen_rtx_REG (mode, CC_REGNUM);
1758 emit_insn (gen_rtx_SET (cc, gen_rtx_COMPARE (mode, op0, op1)));
1761 return gen_rtx_fmt_ee (code, VOIDmode, cc, const0_rtx);
1764 /* Emit a SImode compare and swap instruction setting MEM to NEW_RTX if OLD
1765 matches CMP.
1766 Return the correct condition RTL to be placed in the IF_THEN_ELSE of the
1767 conditional branch testing the result. */
1769 static rtx
1770 s390_emit_compare_and_swap (enum rtx_code code, rtx old, rtx mem,
1771 rtx cmp, rtx new_rtx)
1773 emit_insn (gen_atomic_compare_and_swapsi_internal (old, mem, cmp, new_rtx));
1774 return s390_emit_compare (code, gen_rtx_REG (CCZ1mode, CC_REGNUM),
1775 const0_rtx);
1778 /* Emit a jump instruction to TARGET and return it. If COND is
1779 NULL_RTX, emit an unconditional jump, else a conditional jump under
1780 condition COND. */
1782 rtx_insn *
1783 s390_emit_jump (rtx target, rtx cond)
1785 rtx insn;
1787 target = gen_rtx_LABEL_REF (VOIDmode, target);
1788 if (cond)
1789 target = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, target, pc_rtx);
1791 insn = gen_rtx_SET (pc_rtx, target);
1792 return emit_jump_insn (insn);
1795 /* Return branch condition mask to implement a branch
1796 specified by CODE. Return -1 for invalid comparisons. */
1799 s390_branch_condition_mask (rtx code)
1801 const int CC0 = 1 << 3;
1802 const int CC1 = 1 << 2;
1803 const int CC2 = 1 << 1;
1804 const int CC3 = 1 << 0;
1806 gcc_assert (GET_CODE (XEXP (code, 0)) == REG);
1807 gcc_assert (REGNO (XEXP (code, 0)) == CC_REGNUM);
1808 gcc_assert (XEXP (code, 1) == const0_rtx
1809 || (GET_MODE (XEXP (code, 0)) == CCRAWmode
1810 && CONST_INT_P (XEXP (code, 1))));
1813 switch (GET_MODE (XEXP (code, 0)))
1815 case CCZmode:
1816 case CCZ1mode:
1817 switch (GET_CODE (code))
1819 case EQ: return CC0;
1820 case NE: return CC1 | CC2 | CC3;
1821 default: return -1;
1823 break;
1825 case CCT1mode:
1826 switch (GET_CODE (code))
1828 case EQ: return CC1;
1829 case NE: return CC0 | CC2 | CC3;
1830 default: return -1;
1832 break;
1834 case CCT2mode:
1835 switch (GET_CODE (code))
1837 case EQ: return CC2;
1838 case NE: return CC0 | CC1 | CC3;
1839 default: return -1;
1841 break;
1843 case CCT3mode:
1844 switch (GET_CODE (code))
1846 case EQ: return CC3;
1847 case NE: return CC0 | CC1 | CC2;
1848 default: return -1;
1850 break;
1852 case CCLmode:
1853 switch (GET_CODE (code))
1855 case EQ: return CC0 | CC2;
1856 case NE: return CC1 | CC3;
1857 default: return -1;
1859 break;
1861 case CCL1mode:
1862 switch (GET_CODE (code))
1864 case LTU: return CC2 | CC3; /* carry */
1865 case GEU: return CC0 | CC1; /* no carry */
1866 default: return -1;
1868 break;
1870 case CCL2mode:
1871 switch (GET_CODE (code))
1873 case GTU: return CC0 | CC1; /* borrow */
1874 case LEU: return CC2 | CC3; /* no borrow */
1875 default: return -1;
1877 break;
1879 case CCL3mode:
1880 switch (GET_CODE (code))
1882 case EQ: return CC0 | CC2;
1883 case NE: return CC1 | CC3;
1884 case LTU: return CC1;
1885 case GTU: return CC3;
1886 case LEU: return CC1 | CC2;
1887 case GEU: return CC2 | CC3;
1888 default: return -1;
1891 case CCUmode:
1892 switch (GET_CODE (code))
1894 case EQ: return CC0;
1895 case NE: return CC1 | CC2 | CC3;
1896 case LTU: return CC1;
1897 case GTU: return CC2;
1898 case LEU: return CC0 | CC1;
1899 case GEU: return CC0 | CC2;
1900 default: return -1;
1902 break;
1904 case CCURmode:
1905 switch (GET_CODE (code))
1907 case EQ: return CC0;
1908 case NE: return CC2 | CC1 | CC3;
1909 case LTU: return CC2;
1910 case GTU: return CC1;
1911 case LEU: return CC0 | CC2;
1912 case GEU: return CC0 | CC1;
1913 default: return -1;
1915 break;
1917 case CCAPmode:
1918 switch (GET_CODE (code))
1920 case EQ: return CC0;
1921 case NE: return CC1 | CC2 | CC3;
1922 case LT: return CC1 | CC3;
1923 case GT: return CC2;
1924 case LE: return CC0 | CC1 | CC3;
1925 case GE: return CC0 | CC2;
1926 default: return -1;
1928 break;
1930 case CCANmode:
1931 switch (GET_CODE (code))
1933 case EQ: return CC0;
1934 case NE: return CC1 | CC2 | CC3;
1935 case LT: return CC1;
1936 case GT: return CC2 | CC3;
1937 case LE: return CC0 | CC1;
1938 case GE: return CC0 | CC2 | CC3;
1939 default: return -1;
1941 break;
1943 case CCSmode:
1944 switch (GET_CODE (code))
1946 case EQ: return CC0;
1947 case NE: return CC1 | CC2 | CC3;
1948 case LT: return CC1;
1949 case GT: return CC2;
1950 case LE: return CC0 | CC1;
1951 case GE: return CC0 | CC2;
1952 case UNORDERED: return CC3;
1953 case ORDERED: return CC0 | CC1 | CC2;
1954 case UNEQ: return CC0 | CC3;
1955 case UNLT: return CC1 | CC3;
1956 case UNGT: return CC2 | CC3;
1957 case UNLE: return CC0 | CC1 | CC3;
1958 case UNGE: return CC0 | CC2 | CC3;
1959 case LTGT: return CC1 | CC2;
1960 default: return -1;
1962 break;
1964 case CCSRmode:
1965 switch (GET_CODE (code))
1967 case EQ: return CC0;
1968 case NE: return CC2 | CC1 | CC3;
1969 case LT: return CC2;
1970 case GT: return CC1;
1971 case LE: return CC0 | CC2;
1972 case GE: return CC0 | CC1;
1973 case UNORDERED: return CC3;
1974 case ORDERED: return CC0 | CC2 | CC1;
1975 case UNEQ: return CC0 | CC3;
1976 case UNLT: return CC2 | CC3;
1977 case UNGT: return CC1 | CC3;
1978 case UNLE: return CC0 | CC2 | CC3;
1979 case UNGE: return CC0 | CC1 | CC3;
1980 case LTGT: return CC2 | CC1;
1981 default: return -1;
1983 break;
1985 /* Vector comparison modes. */
1987 case CCVEQmode:
1988 switch (GET_CODE (code))
1990 case EQ: return CC0;
1991 case NE: return CC3;
1992 default: return -1;
1995 case CCVEQANYmode:
1996 switch (GET_CODE (code))
1998 case EQ: return CC0 | CC1;
1999 case NE: return CC3 | CC1;
2000 default: return -1;
2003 /* Integer vector compare modes. */
2005 case CCVHmode:
2006 switch (GET_CODE (code))
2008 case GT: return CC0;
2009 case LE: return CC3;
2010 default: return -1;
2013 case CCVHANYmode:
2014 switch (GET_CODE (code))
2016 case GT: return CC0 | CC1;
2017 case LE: return CC3 | CC1;
2018 default: return -1;
2021 case CCVHUmode:
2022 switch (GET_CODE (code))
2024 case GTU: return CC0;
2025 case LEU: return CC3;
2026 default: return -1;
2029 case CCVHUANYmode:
2030 switch (GET_CODE (code))
2032 case GTU: return CC0 | CC1;
2033 case LEU: return CC3 | CC1;
2034 default: return -1;
2037 /* FP vector compare modes. */
2039 case CCVFHmode:
2040 switch (GET_CODE (code))
2042 case GT: return CC0;
2043 case UNLE: return CC3;
2044 default: return -1;
2047 case CCVFHANYmode:
2048 switch (GET_CODE (code))
2050 case GT: return CC0 | CC1;
2051 case UNLE: return CC3 | CC1;
2052 default: return -1;
2055 case CCVFHEmode:
2056 switch (GET_CODE (code))
2058 case GE: return CC0;
2059 case UNLT: return CC3;
2060 default: return -1;
2063 case CCVFHEANYmode:
2064 switch (GET_CODE (code))
2066 case GE: return CC0 | CC1;
2067 case UNLT: return CC3 | CC1;
2068 default: return -1;
2072 case CCRAWmode:
2073 switch (GET_CODE (code))
2075 case EQ:
2076 return INTVAL (XEXP (code, 1));
2077 case NE:
2078 return (INTVAL (XEXP (code, 1))) ^ 0xf;
2079 default:
2080 gcc_unreachable ();
2083 default:
2084 return -1;
2089 /* Return branch condition mask to implement a compare and branch
2090 specified by CODE. Return -1 for invalid comparisons. */
2093 s390_compare_and_branch_condition_mask (rtx code)
2095 const int CC0 = 1 << 3;
2096 const int CC1 = 1 << 2;
2097 const int CC2 = 1 << 1;
2099 switch (GET_CODE (code))
2101 case EQ:
2102 return CC0;
2103 case NE:
2104 return CC1 | CC2;
2105 case LT:
2106 case LTU:
2107 return CC1;
2108 case GT:
2109 case GTU:
2110 return CC2;
2111 case LE:
2112 case LEU:
2113 return CC0 | CC1;
2114 case GE:
2115 case GEU:
2116 return CC0 | CC2;
2117 default:
2118 gcc_unreachable ();
2120 return -1;
2123 /* If INV is false, return assembler mnemonic string to implement
2124 a branch specified by CODE. If INV is true, return mnemonic
2125 for the corresponding inverted branch. */
2127 static const char *
2128 s390_branch_condition_mnemonic (rtx code, int inv)
2130 int mask;
2132 static const char *const mnemonic[16] =
2134 NULL, "o", "h", "nle",
2135 "l", "nhe", "lh", "ne",
2136 "e", "nlh", "he", "nl",
2137 "le", "nh", "no", NULL
2140 if (GET_CODE (XEXP (code, 0)) == REG
2141 && REGNO (XEXP (code, 0)) == CC_REGNUM
2142 && (XEXP (code, 1) == const0_rtx
2143 || (GET_MODE (XEXP (code, 0)) == CCRAWmode
2144 && CONST_INT_P (XEXP (code, 1)))))
2145 mask = s390_branch_condition_mask (code);
2146 else
2147 mask = s390_compare_and_branch_condition_mask (code);
2149 gcc_assert (mask >= 0);
2151 if (inv)
2152 mask ^= 15;
2154 gcc_assert (mask >= 1 && mask <= 14);
2156 return mnemonic[mask];
2159 /* Return the part of op which has a value different from def.
2160 The size of the part is determined by mode.
2161 Use this function only if you already know that op really
2162 contains such a part. */
2164 unsigned HOST_WIDE_INT
2165 s390_extract_part (rtx op, machine_mode mode, int def)
2167 unsigned HOST_WIDE_INT value = 0;
2168 int max_parts = HOST_BITS_PER_WIDE_INT / GET_MODE_BITSIZE (mode);
2169 int part_bits = GET_MODE_BITSIZE (mode);
2170 unsigned HOST_WIDE_INT part_mask
2171 = ((unsigned HOST_WIDE_INT)1 << part_bits) - 1;
2172 int i;
2174 for (i = 0; i < max_parts; i++)
2176 if (i == 0)
2177 value = (unsigned HOST_WIDE_INT) INTVAL (op);
2178 else
2179 value >>= part_bits;
2181 if ((value & part_mask) != (def & part_mask))
2182 return value & part_mask;
2185 gcc_unreachable ();
2188 /* If OP is an integer constant of mode MODE with exactly one
2189 part of mode PART_MODE unequal to DEF, return the number of that
2190 part. Otherwise, return -1. */
2193 s390_single_part (rtx op,
2194 machine_mode mode,
2195 machine_mode part_mode,
2196 int def)
2198 unsigned HOST_WIDE_INT value = 0;
2199 int n_parts = GET_MODE_SIZE (mode) / GET_MODE_SIZE (part_mode);
2200 unsigned HOST_WIDE_INT part_mask
2201 = ((unsigned HOST_WIDE_INT)1 << GET_MODE_BITSIZE (part_mode)) - 1;
2202 int i, part = -1;
2204 if (GET_CODE (op) != CONST_INT)
2205 return -1;
2207 for (i = 0; i < n_parts; i++)
2209 if (i == 0)
2210 value = (unsigned HOST_WIDE_INT) INTVAL (op);
2211 else
2212 value >>= GET_MODE_BITSIZE (part_mode);
2214 if ((value & part_mask) != (def & part_mask))
2216 if (part != -1)
2217 return -1;
2218 else
2219 part = i;
2222 return part == -1 ? -1 : n_parts - 1 - part;
2225 /* Return true if IN contains a contiguous bitfield in the lower SIZE
2226 bits and no other bits are set in IN. POS and LENGTH can be used
2227 to obtain the start position and the length of the bitfield.
2229 POS gives the position of the first bit of the bitfield counting
2230 from the lowest order bit starting with zero. In order to use this
2231 value for S/390 instructions this has to be converted to "bits big
2232 endian" style. */
2234 bool
2235 s390_contiguous_bitmask_p (unsigned HOST_WIDE_INT in, int size,
2236 int *pos, int *length)
2238 int tmp_pos = 0;
2239 int tmp_length = 0;
2240 int i;
2241 unsigned HOST_WIDE_INT mask = 1ULL;
2242 bool contiguous = false;
2244 for (i = 0; i < size; mask <<= 1, i++)
2246 if (contiguous)
2248 if (mask & in)
2249 tmp_length++;
2250 else
2251 break;
2253 else
2255 if (mask & in)
2257 contiguous = true;
2258 tmp_length++;
2260 else
2261 tmp_pos++;
2265 if (!tmp_length)
2266 return false;
2268 /* Calculate a mask for all bits beyond the contiguous bits. */
2269 mask = (-1LL & ~(((1ULL << (tmp_length + tmp_pos - 1)) << 1) - 1));
2271 if ((unsigned)size < sizeof (HOST_WIDE_INT) * BITS_PER_UNIT)
2272 mask &= (HOST_WIDE_INT_1U << size) - 1;
2274 if (mask & in)
2275 return false;
2277 if (tmp_length + tmp_pos - 1 > size)
2278 return false;
2280 if (length)
2281 *length = tmp_length;
2283 if (pos)
2284 *pos = tmp_pos;
2286 return true;
2289 /* Return true if OP contains the same contiguous bitfield in *all*
2290 its elements. START and END can be used to obtain the start and
2291 end position of the bitfield.
2293 START/STOP give the position of the first/last bit of the bitfield
2294 counting from the lowest order bit starting with zero. In order to
2295 use these values for S/390 instructions this has to be converted to
2296 "bits big endian" style. */
2298 bool
2299 s390_contiguous_bitmask_vector_p (rtx op, int *start, int *end)
2301 unsigned HOST_WIDE_INT mask;
2302 int length, size;
2303 rtx elt;
2305 if (!const_vec_duplicate_p (op, &elt)
2306 || !CONST_INT_P (elt))
2307 return false;
2309 size = GET_MODE_UNIT_BITSIZE (GET_MODE (op));
2311 /* We cannot deal with V1TI/V1TF. This would require a vgmq. */
2312 if (size > 64)
2313 return false;
2315 mask = UINTVAL (elt);
2316 if (s390_contiguous_bitmask_p (mask, size, start,
2317 end != NULL ? &length : NULL))
2319 if (end != NULL)
2320 *end = *start + length - 1;
2321 return true;
2323 /* 0xff00000f style immediates can be covered by swapping start and
2324 end indices in vgm. */
2325 if (s390_contiguous_bitmask_p (~mask, size, start,
2326 end != NULL ? &length : NULL))
2328 if (end != NULL)
2329 *end = *start - 1;
2330 if (start != NULL)
2331 *start = *start + length;
2332 return true;
2334 return false;
2337 /* Return true if C consists only of byte chunks being either 0 or
2338 0xff. If MASK is !=NULL a byte mask is generated which is
2339 appropriate for the vector generate byte mask instruction. */
2341 bool
2342 s390_bytemask_vector_p (rtx op, unsigned *mask)
2344 int i;
2345 unsigned tmp_mask = 0;
2346 int nunit, unit_size;
2348 if (!VECTOR_MODE_P (GET_MODE (op))
2349 || GET_CODE (op) != CONST_VECTOR
2350 || !CONST_INT_P (XVECEXP (op, 0, 0)))
2351 return false;
2353 nunit = GET_MODE_NUNITS (GET_MODE (op));
2354 unit_size = GET_MODE_UNIT_SIZE (GET_MODE (op));
2356 for (i = 0; i < nunit; i++)
2358 unsigned HOST_WIDE_INT c;
2359 int j;
2361 if (!CONST_INT_P (XVECEXP (op, 0, i)))
2362 return false;
2364 c = UINTVAL (XVECEXP (op, 0, i));
2365 for (j = 0; j < unit_size; j++)
2367 if ((c & 0xff) != 0 && (c & 0xff) != 0xff)
2368 return false;
2369 tmp_mask |= (c & 1) << ((nunit - 1 - i) * unit_size + j);
2370 c = c >> BITS_PER_UNIT;
2374 if (mask != NULL)
2375 *mask = tmp_mask;
2377 return true;
2380 /* Check whether a rotate of ROTL followed by an AND of CONTIG is
2381 equivalent to a shift followed by the AND. In particular, CONTIG
2382 should not overlap the (rotated) bit 0/bit 63 gap. Negative values
2383 for ROTL indicate a rotate to the right. */
2385 bool
2386 s390_extzv_shift_ok (int bitsize, int rotl, unsigned HOST_WIDE_INT contig)
2388 int pos, len;
2389 bool ok;
2391 ok = s390_contiguous_bitmask_p (contig, bitsize, &pos, &len);
2392 gcc_assert (ok);
2394 return ((rotl >= 0 && rotl <= pos)
2395 || (rotl < 0 && -rotl <= bitsize - len - pos));
2398 /* Check whether we can (and want to) split a double-word
2399 move in mode MODE from SRC to DST into two single-word
2400 moves, moving the subword FIRST_SUBWORD first. */
2402 bool
2403 s390_split_ok_p (rtx dst, rtx src, machine_mode mode, int first_subword)
2405 /* Floating point and vector registers cannot be split. */
2406 if (FP_REG_P (src) || FP_REG_P (dst) || VECTOR_REG_P (src) || VECTOR_REG_P (dst))
2407 return false;
2409 /* We don't need to split if operands are directly accessible. */
2410 if (s_operand (src, mode) || s_operand (dst, mode))
2411 return false;
2413 /* Non-offsettable memory references cannot be split. */
2414 if ((GET_CODE (src) == MEM && !offsettable_memref_p (src))
2415 || (GET_CODE (dst) == MEM && !offsettable_memref_p (dst)))
2416 return false;
2418 /* Moving the first subword must not clobber a register
2419 needed to move the second subword. */
2420 if (register_operand (dst, mode))
2422 rtx subreg = operand_subword (dst, first_subword, 0, mode);
2423 if (reg_overlap_mentioned_p (subreg, src))
2424 return false;
2427 return true;
2430 /* Return true if it can be proven that [MEM1, MEM1 + SIZE]
2431 and [MEM2, MEM2 + SIZE] do overlap and false
2432 otherwise. */
2434 bool
2435 s390_overlap_p (rtx mem1, rtx mem2, HOST_WIDE_INT size)
2437 rtx addr1, addr2, addr_delta;
2438 HOST_WIDE_INT delta;
2440 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
2441 return true;
2443 if (size == 0)
2444 return false;
2446 addr1 = XEXP (mem1, 0);
2447 addr2 = XEXP (mem2, 0);
2449 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
2451 /* This overlapping check is used by peepholes merging memory block operations.
2452 Overlapping operations would otherwise be recognized by the S/390 hardware
2453 and would fall back to a slower implementation. Allowing overlapping
2454 operations would lead to slow code but not to wrong code. Therefore we are
2455 somewhat optimistic if we cannot prove that the memory blocks are
2456 overlapping.
2457 That's why we return false here although this may accept operations on
2458 overlapping memory areas. */
2459 if (!addr_delta || GET_CODE (addr_delta) != CONST_INT)
2460 return false;
2462 delta = INTVAL (addr_delta);
2464 if (delta == 0
2465 || (delta > 0 && delta < size)
2466 || (delta < 0 && -delta < size))
2467 return true;
2469 return false;
2472 /* Check whether the address of memory reference MEM2 equals exactly
2473 the address of memory reference MEM1 plus DELTA. Return true if
2474 we can prove this to be the case, false otherwise. */
2476 bool
2477 s390_offset_p (rtx mem1, rtx mem2, rtx delta)
2479 rtx addr1, addr2, addr_delta;
2481 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
2482 return false;
2484 addr1 = XEXP (mem1, 0);
2485 addr2 = XEXP (mem2, 0);
2487 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
2488 if (!addr_delta || !rtx_equal_p (addr_delta, delta))
2489 return false;
2491 return true;
2494 /* Expand logical operator CODE in mode MODE with operands OPERANDS. */
2496 void
2497 s390_expand_logical_operator (enum rtx_code code, machine_mode mode,
2498 rtx *operands)
2500 machine_mode wmode = mode;
2501 rtx dst = operands[0];
2502 rtx src1 = operands[1];
2503 rtx src2 = operands[2];
2504 rtx op, clob, tem;
2506 /* If we cannot handle the operation directly, use a temp register. */
2507 if (!s390_logical_operator_ok_p (operands))
2508 dst = gen_reg_rtx (mode);
2510 /* QImode and HImode patterns make sense only if we have a destination
2511 in memory. Otherwise perform the operation in SImode. */
2512 if ((mode == QImode || mode == HImode) && GET_CODE (dst) != MEM)
2513 wmode = SImode;
2515 /* Widen operands if required. */
2516 if (mode != wmode)
2518 if (GET_CODE (dst) == SUBREG
2519 && (tem = simplify_subreg (wmode, dst, mode, 0)) != 0)
2520 dst = tem;
2521 else if (REG_P (dst))
2522 dst = gen_rtx_SUBREG (wmode, dst, 0);
2523 else
2524 dst = gen_reg_rtx (wmode);
2526 if (GET_CODE (src1) == SUBREG
2527 && (tem = simplify_subreg (wmode, src1, mode, 0)) != 0)
2528 src1 = tem;
2529 else if (GET_MODE (src1) != VOIDmode)
2530 src1 = gen_rtx_SUBREG (wmode, force_reg (mode, src1), 0);
2532 if (GET_CODE (src2) == SUBREG
2533 && (tem = simplify_subreg (wmode, src2, mode, 0)) != 0)
2534 src2 = tem;
2535 else if (GET_MODE (src2) != VOIDmode)
2536 src2 = gen_rtx_SUBREG (wmode, force_reg (mode, src2), 0);
2539 /* Emit the instruction. */
2540 op = gen_rtx_SET (dst, gen_rtx_fmt_ee (code, wmode, src1, src2));
2541 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
2542 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
2544 /* Fix up the destination if needed. */
2545 if (dst != operands[0])
2546 emit_move_insn (operands[0], gen_lowpart (mode, dst));
2549 /* Check whether OPERANDS are OK for a logical operation (AND, IOR, XOR). */
2551 bool
2552 s390_logical_operator_ok_p (rtx *operands)
2554 /* If the destination operand is in memory, it needs to coincide
2555 with one of the source operands. After reload, it has to be
2556 the first source operand. */
2557 if (GET_CODE (operands[0]) == MEM)
2558 return rtx_equal_p (operands[0], operands[1])
2559 || (!reload_completed && rtx_equal_p (operands[0], operands[2]));
2561 return true;
2564 /* Narrow logical operation CODE of memory operand MEMOP with immediate
2565 operand IMMOP to switch from SS to SI type instructions. */
2567 void
2568 s390_narrow_logical_operator (enum rtx_code code, rtx *memop, rtx *immop)
2570 int def = code == AND ? -1 : 0;
2571 HOST_WIDE_INT mask;
2572 int part;
2574 gcc_assert (GET_CODE (*memop) == MEM);
2575 gcc_assert (!MEM_VOLATILE_P (*memop));
2577 mask = s390_extract_part (*immop, QImode, def);
2578 part = s390_single_part (*immop, GET_MODE (*memop), QImode, def);
2579 gcc_assert (part >= 0);
2581 *memop = adjust_address (*memop, QImode, part);
2582 *immop = gen_int_mode (mask, QImode);
2586 /* How to allocate a 'struct machine_function'. */
2588 static struct machine_function *
2589 s390_init_machine_status (void)
2591 return ggc_cleared_alloc<machine_function> ();
2594 /* Map for smallest class containing reg regno. */
2596 const enum reg_class regclass_map[FIRST_PSEUDO_REGISTER] =
2597 { GENERAL_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 0 */
2598 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 4 */
2599 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 8 */
2600 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 12 */
2601 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 16 */
2602 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 20 */
2603 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 24 */
2604 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 28 */
2605 ADDR_REGS, CC_REGS, ADDR_REGS, ADDR_REGS, /* 32 */
2606 ACCESS_REGS, ACCESS_REGS, VEC_REGS, VEC_REGS, /* 36 */
2607 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 40 */
2608 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 44 */
2609 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 48 */
2610 VEC_REGS, VEC_REGS /* 52 */
2613 /* Return attribute type of insn. */
2615 static enum attr_type
2616 s390_safe_attr_type (rtx_insn *insn)
2618 if (recog_memoized (insn) >= 0)
2619 return get_attr_type (insn);
2620 else
2621 return TYPE_NONE;
2624 /* Return true if DISP is a valid short displacement. */
2626 static bool
2627 s390_short_displacement (rtx disp)
2629 /* No displacement is OK. */
2630 if (!disp)
2631 return true;
2633 /* Without the long displacement facility we don't need to
2634 distingiush between long and short displacement. */
2635 if (!TARGET_LONG_DISPLACEMENT)
2636 return true;
2638 /* Integer displacement in range. */
2639 if (GET_CODE (disp) == CONST_INT)
2640 return INTVAL (disp) >= 0 && INTVAL (disp) < 4096;
2642 /* GOT offset is not OK, the GOT can be large. */
2643 if (GET_CODE (disp) == CONST
2644 && GET_CODE (XEXP (disp, 0)) == UNSPEC
2645 && (XINT (XEXP (disp, 0), 1) == UNSPEC_GOT
2646 || XINT (XEXP (disp, 0), 1) == UNSPEC_GOTNTPOFF))
2647 return false;
2649 /* All other symbolic constants are literal pool references,
2650 which are OK as the literal pool must be small. */
2651 if (GET_CODE (disp) == CONST)
2652 return true;
2654 return false;
2657 /* Decompose a RTL expression ADDR for a memory address into
2658 its components, returned in OUT.
2660 Returns false if ADDR is not a valid memory address, true
2661 otherwise. If OUT is NULL, don't return the components,
2662 but check for validity only.
2664 Note: Only addresses in canonical form are recognized.
2665 LEGITIMIZE_ADDRESS should convert non-canonical forms to the
2666 canonical form so that they will be recognized. */
2668 static int
2669 s390_decompose_address (rtx addr, struct s390_address *out)
2671 HOST_WIDE_INT offset = 0;
2672 rtx base = NULL_RTX;
2673 rtx indx = NULL_RTX;
2674 rtx disp = NULL_RTX;
2675 rtx orig_disp;
2676 bool pointer = false;
2677 bool base_ptr = false;
2678 bool indx_ptr = false;
2679 bool literal_pool = false;
2681 /* We may need to substitute the literal pool base register into the address
2682 below. However, at this point we do not know which register is going to
2683 be used as base, so we substitute the arg pointer register. This is going
2684 to be treated as holding a pointer below -- it shouldn't be used for any
2685 other purpose. */
2686 rtx fake_pool_base = gen_rtx_REG (Pmode, ARG_POINTER_REGNUM);
2688 /* Decompose address into base + index + displacement. */
2690 if (GET_CODE (addr) == REG || GET_CODE (addr) == UNSPEC)
2691 base = addr;
2693 else if (GET_CODE (addr) == PLUS)
2695 rtx op0 = XEXP (addr, 0);
2696 rtx op1 = XEXP (addr, 1);
2697 enum rtx_code code0 = GET_CODE (op0);
2698 enum rtx_code code1 = GET_CODE (op1);
2700 if (code0 == REG || code0 == UNSPEC)
2702 if (code1 == REG || code1 == UNSPEC)
2704 indx = op0; /* index + base */
2705 base = op1;
2708 else
2710 base = op0; /* base + displacement */
2711 disp = op1;
2715 else if (code0 == PLUS)
2717 indx = XEXP (op0, 0); /* index + base + disp */
2718 base = XEXP (op0, 1);
2719 disp = op1;
2722 else
2724 return false;
2728 else
2729 disp = addr; /* displacement */
2731 /* Extract integer part of displacement. */
2732 orig_disp = disp;
2733 if (disp)
2735 if (GET_CODE (disp) == CONST_INT)
2737 offset = INTVAL (disp);
2738 disp = NULL_RTX;
2740 else if (GET_CODE (disp) == CONST
2741 && GET_CODE (XEXP (disp, 0)) == PLUS
2742 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
2744 offset = INTVAL (XEXP (XEXP (disp, 0), 1));
2745 disp = XEXP (XEXP (disp, 0), 0);
2749 /* Strip off CONST here to avoid special case tests later. */
2750 if (disp && GET_CODE (disp) == CONST)
2751 disp = XEXP (disp, 0);
2753 /* We can convert literal pool addresses to
2754 displacements by basing them off the base register. */
2755 if (disp && GET_CODE (disp) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (disp))
2757 /* Either base or index must be free to hold the base register. */
2758 if (!base)
2759 base = fake_pool_base, literal_pool = true;
2760 else if (!indx)
2761 indx = fake_pool_base, literal_pool = true;
2762 else
2763 return false;
2765 /* Mark up the displacement. */
2766 disp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, disp),
2767 UNSPEC_LTREL_OFFSET);
2770 /* Validate base register. */
2771 if (base)
2773 if (GET_CODE (base) == UNSPEC)
2774 switch (XINT (base, 1))
2776 case UNSPEC_LTREF:
2777 if (!disp)
2778 disp = gen_rtx_UNSPEC (Pmode,
2779 gen_rtvec (1, XVECEXP (base, 0, 0)),
2780 UNSPEC_LTREL_OFFSET);
2781 else
2782 return false;
2784 base = XVECEXP (base, 0, 1);
2785 break;
2787 case UNSPEC_LTREL_BASE:
2788 if (XVECLEN (base, 0) == 1)
2789 base = fake_pool_base, literal_pool = true;
2790 else
2791 base = XVECEXP (base, 0, 1);
2792 break;
2794 default:
2795 return false;
2798 if (!REG_P (base)
2799 || (GET_MODE (base) != SImode
2800 && GET_MODE (base) != Pmode))
2801 return false;
2803 if (REGNO (base) == STACK_POINTER_REGNUM
2804 || REGNO (base) == FRAME_POINTER_REGNUM
2805 || ((reload_completed || reload_in_progress)
2806 && frame_pointer_needed
2807 && REGNO (base) == HARD_FRAME_POINTER_REGNUM)
2808 || REGNO (base) == ARG_POINTER_REGNUM
2809 || (flag_pic
2810 && REGNO (base) == PIC_OFFSET_TABLE_REGNUM))
2811 pointer = base_ptr = true;
2813 if ((reload_completed || reload_in_progress)
2814 && base == cfun->machine->base_reg)
2815 pointer = base_ptr = literal_pool = true;
2818 /* Validate index register. */
2819 if (indx)
2821 if (GET_CODE (indx) == UNSPEC)
2822 switch (XINT (indx, 1))
2824 case UNSPEC_LTREF:
2825 if (!disp)
2826 disp = gen_rtx_UNSPEC (Pmode,
2827 gen_rtvec (1, XVECEXP (indx, 0, 0)),
2828 UNSPEC_LTREL_OFFSET);
2829 else
2830 return false;
2832 indx = XVECEXP (indx, 0, 1);
2833 break;
2835 case UNSPEC_LTREL_BASE:
2836 if (XVECLEN (indx, 0) == 1)
2837 indx = fake_pool_base, literal_pool = true;
2838 else
2839 indx = XVECEXP (indx, 0, 1);
2840 break;
2842 default:
2843 return false;
2846 if (!REG_P (indx)
2847 || (GET_MODE (indx) != SImode
2848 && GET_MODE (indx) != Pmode))
2849 return false;
2851 if (REGNO (indx) == STACK_POINTER_REGNUM
2852 || REGNO (indx) == FRAME_POINTER_REGNUM
2853 || ((reload_completed || reload_in_progress)
2854 && frame_pointer_needed
2855 && REGNO (indx) == HARD_FRAME_POINTER_REGNUM)
2856 || REGNO (indx) == ARG_POINTER_REGNUM
2857 || (flag_pic
2858 && REGNO (indx) == PIC_OFFSET_TABLE_REGNUM))
2859 pointer = indx_ptr = true;
2861 if ((reload_completed || reload_in_progress)
2862 && indx == cfun->machine->base_reg)
2863 pointer = indx_ptr = literal_pool = true;
2866 /* Prefer to use pointer as base, not index. */
2867 if (base && indx && !base_ptr
2868 && (indx_ptr || (!REG_POINTER (base) && REG_POINTER (indx))))
2870 rtx tmp = base;
2871 base = indx;
2872 indx = tmp;
2875 /* Validate displacement. */
2876 if (!disp)
2878 /* If virtual registers are involved, the displacement will change later
2879 anyway as the virtual registers get eliminated. This could make a
2880 valid displacement invalid, but it is more likely to make an invalid
2881 displacement valid, because we sometimes access the register save area
2882 via negative offsets to one of those registers.
2883 Thus we don't check the displacement for validity here. If after
2884 elimination the displacement turns out to be invalid after all,
2885 this is fixed up by reload in any case. */
2886 /* LRA maintains always displacements up to date and we need to
2887 know the displacement is right during all LRA not only at the
2888 final elimination. */
2889 if (lra_in_progress
2890 || (base != arg_pointer_rtx
2891 && indx != arg_pointer_rtx
2892 && base != return_address_pointer_rtx
2893 && indx != return_address_pointer_rtx
2894 && base != frame_pointer_rtx
2895 && indx != frame_pointer_rtx
2896 && base != virtual_stack_vars_rtx
2897 && indx != virtual_stack_vars_rtx))
2898 if (!DISP_IN_RANGE (offset))
2899 return false;
2901 else
2903 /* All the special cases are pointers. */
2904 pointer = true;
2906 /* In the small-PIC case, the linker converts @GOT
2907 and @GOTNTPOFF offsets to possible displacements. */
2908 if (GET_CODE (disp) == UNSPEC
2909 && (XINT (disp, 1) == UNSPEC_GOT
2910 || XINT (disp, 1) == UNSPEC_GOTNTPOFF)
2911 && flag_pic == 1)
2916 /* Accept pool label offsets. */
2917 else if (GET_CODE (disp) == UNSPEC
2918 && XINT (disp, 1) == UNSPEC_POOL_OFFSET)
2921 /* Accept literal pool references. */
2922 else if (GET_CODE (disp) == UNSPEC
2923 && XINT (disp, 1) == UNSPEC_LTREL_OFFSET)
2925 /* In case CSE pulled a non literal pool reference out of
2926 the pool we have to reject the address. This is
2927 especially important when loading the GOT pointer on non
2928 zarch CPUs. In this case the literal pool contains an lt
2929 relative offset to the _GLOBAL_OFFSET_TABLE_ label which
2930 will most likely exceed the displacement. */
2931 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
2932 || !CONSTANT_POOL_ADDRESS_P (XVECEXP (disp, 0, 0)))
2933 return false;
2935 orig_disp = gen_rtx_CONST (Pmode, disp);
2936 if (offset)
2938 /* If we have an offset, make sure it does not
2939 exceed the size of the constant pool entry. */
2940 rtx sym = XVECEXP (disp, 0, 0);
2941 if (offset >= GET_MODE_SIZE (get_pool_mode (sym)))
2942 return false;
2944 orig_disp = plus_constant (Pmode, orig_disp, offset);
2948 else
2949 return false;
2952 if (!base && !indx)
2953 pointer = true;
2955 if (out)
2957 out->base = base;
2958 out->indx = indx;
2959 out->disp = orig_disp;
2960 out->pointer = pointer;
2961 out->literal_pool = literal_pool;
2964 return true;
2967 /* Decompose a RTL expression OP for a shift count into its components,
2968 and return the base register in BASE and the offset in OFFSET.
2970 Return true if OP is a valid shift count, false if not. */
2972 bool
2973 s390_decompose_shift_count (rtx op, rtx *base, HOST_WIDE_INT *offset)
2975 HOST_WIDE_INT off = 0;
2977 /* We can have an integer constant, an address register,
2978 or a sum of the two. */
2979 if (GET_CODE (op) == CONST_INT)
2981 off = INTVAL (op);
2982 op = NULL_RTX;
2984 if (op && GET_CODE (op) == PLUS && GET_CODE (XEXP (op, 1)) == CONST_INT)
2986 off = INTVAL (XEXP (op, 1));
2987 op = XEXP (op, 0);
2989 while (op && GET_CODE (op) == SUBREG)
2990 op = SUBREG_REG (op);
2992 if (op && GET_CODE (op) != REG)
2993 return false;
2995 if (offset)
2996 *offset = off;
2997 if (base)
2998 *base = op;
3000 return true;
3004 /* Return true if CODE is a valid address without index. */
3006 bool
3007 s390_legitimate_address_without_index_p (rtx op)
3009 struct s390_address addr;
3011 if (!s390_decompose_address (XEXP (op, 0), &addr))
3012 return false;
3013 if (addr.indx)
3014 return false;
3016 return true;
3020 /* Return TRUE if ADDR is an operand valid for a load/store relative
3021 instruction. Be aware that the alignment of the operand needs to
3022 be checked separately.
3023 Valid addresses are single references or a sum of a reference and a
3024 constant integer. Return these parts in SYMREF and ADDEND. You can
3025 pass NULL in REF and/or ADDEND if you are not interested in these
3026 values. Literal pool references are *not* considered symbol
3027 references. */
3029 static bool
3030 s390_loadrelative_operand_p (rtx addr, rtx *symref, HOST_WIDE_INT *addend)
3032 HOST_WIDE_INT tmpaddend = 0;
3034 if (GET_CODE (addr) == CONST)
3035 addr = XEXP (addr, 0);
3037 if (GET_CODE (addr) == PLUS)
3039 if (!CONST_INT_P (XEXP (addr, 1)))
3040 return false;
3042 tmpaddend = INTVAL (XEXP (addr, 1));
3043 addr = XEXP (addr, 0);
3046 if ((GET_CODE (addr) == SYMBOL_REF && !CONSTANT_POOL_ADDRESS_P (addr))
3047 || (GET_CODE (addr) == UNSPEC
3048 && (XINT (addr, 1) == UNSPEC_GOTENT
3049 || (TARGET_CPU_ZARCH && XINT (addr, 1) == UNSPEC_PLT))))
3051 if (symref)
3052 *symref = addr;
3053 if (addend)
3054 *addend = tmpaddend;
3056 return true;
3058 return false;
3061 /* Return true if the address in OP is valid for constraint letter C
3062 if wrapped in a MEM rtx. Set LIT_POOL_OK to true if it literal
3063 pool MEMs should be accepted. Only the Q, R, S, T constraint
3064 letters are allowed for C. */
3066 static int
3067 s390_check_qrst_address (char c, rtx op, bool lit_pool_ok)
3069 struct s390_address addr;
3070 bool decomposed = false;
3072 /* This check makes sure that no symbolic address (except literal
3073 pool references) are accepted by the R or T constraints. */
3074 if (s390_loadrelative_operand_p (op, NULL, NULL))
3075 return 0;
3077 /* Ensure literal pool references are only accepted if LIT_POOL_OK. */
3078 if (!lit_pool_ok)
3080 if (!s390_decompose_address (op, &addr))
3081 return 0;
3082 if (addr.literal_pool)
3083 return 0;
3084 decomposed = true;
3087 switch (c)
3089 case 'Q': /* no index short displacement */
3090 if (!decomposed && !s390_decompose_address (op, &addr))
3091 return 0;
3092 if (addr.indx)
3093 return 0;
3094 if (!s390_short_displacement (addr.disp))
3095 return 0;
3096 break;
3098 case 'R': /* with index short displacement */
3099 if (TARGET_LONG_DISPLACEMENT)
3101 if (!decomposed && !s390_decompose_address (op, &addr))
3102 return 0;
3103 if (!s390_short_displacement (addr.disp))
3104 return 0;
3106 /* Any invalid address here will be fixed up by reload,
3107 so accept it for the most generic constraint. */
3108 break;
3110 case 'S': /* no index long displacement */
3111 if (!TARGET_LONG_DISPLACEMENT)
3112 return 0;
3113 if (!decomposed && !s390_decompose_address (op, &addr))
3114 return 0;
3115 if (addr.indx)
3116 return 0;
3117 if (s390_short_displacement (addr.disp))
3118 return 0;
3119 break;
3121 case 'T': /* with index long displacement */
3122 if (!TARGET_LONG_DISPLACEMENT)
3123 return 0;
3124 /* Any invalid address here will be fixed up by reload,
3125 so accept it for the most generic constraint. */
3126 if ((decomposed || s390_decompose_address (op, &addr))
3127 && s390_short_displacement (addr.disp))
3128 return 0;
3129 break;
3130 default:
3131 return 0;
3133 return 1;
3137 /* Evaluates constraint strings described by the regular expression
3138 ([A|B|Z](Q|R|S|T))|U|W|Y and returns 1 if OP is a valid operand for
3139 the constraint given in STR, or 0 else. */
3142 s390_mem_constraint (const char *str, rtx op)
3144 char c = str[0];
3146 switch (c)
3148 case 'A':
3149 /* Check for offsettable variants of memory constraints. */
3150 if (!MEM_P (op) || MEM_VOLATILE_P (op))
3151 return 0;
3152 if ((reload_completed || reload_in_progress)
3153 ? !offsettable_memref_p (op) : !offsettable_nonstrict_memref_p (op))
3154 return 0;
3155 return s390_check_qrst_address (str[1], XEXP (op, 0), true);
3156 case 'B':
3157 /* Check for non-literal-pool variants of memory constraints. */
3158 if (!MEM_P (op))
3159 return 0;
3160 return s390_check_qrst_address (str[1], XEXP (op, 0), false);
3161 case 'Q':
3162 case 'R':
3163 case 'S':
3164 case 'T':
3165 if (GET_CODE (op) != MEM)
3166 return 0;
3167 return s390_check_qrst_address (c, XEXP (op, 0), true);
3168 case 'U':
3169 return (s390_check_qrst_address ('Q', op, true)
3170 || s390_check_qrst_address ('R', op, true));
3171 case 'W':
3172 return (s390_check_qrst_address ('S', op, true)
3173 || s390_check_qrst_address ('T', op, true));
3174 case 'Y':
3175 /* Simply check for the basic form of a shift count. Reload will
3176 take care of making sure we have a proper base register. */
3177 if (!s390_decompose_shift_count (op, NULL, NULL))
3178 return 0;
3179 break;
3180 case 'Z':
3181 return s390_check_qrst_address (str[1], op, true);
3182 default:
3183 return 0;
3185 return 1;
3189 /* Evaluates constraint strings starting with letter O. Input
3190 parameter C is the second letter following the "O" in the constraint
3191 string. Returns 1 if VALUE meets the respective constraint and 0
3192 otherwise. */
3195 s390_O_constraint_str (const char c, HOST_WIDE_INT value)
3197 if (!TARGET_EXTIMM)
3198 return 0;
3200 switch (c)
3202 case 's':
3203 return trunc_int_for_mode (value, SImode) == value;
3205 case 'p':
3206 return value == 0
3207 || s390_single_part (GEN_INT (value), DImode, SImode, 0) == 1;
3209 case 'n':
3210 return s390_single_part (GEN_INT (value - 1), DImode, SImode, -1) == 1;
3212 default:
3213 gcc_unreachable ();
3218 /* Evaluates constraint strings starting with letter N. Parameter STR
3219 contains the letters following letter "N" in the constraint string.
3220 Returns true if VALUE matches the constraint. */
3223 s390_N_constraint_str (const char *str, HOST_WIDE_INT value)
3225 machine_mode mode, part_mode;
3226 int def;
3227 int part, part_goal;
3230 if (str[0] == 'x')
3231 part_goal = -1;
3232 else
3233 part_goal = str[0] - '0';
3235 switch (str[1])
3237 case 'Q':
3238 part_mode = QImode;
3239 break;
3240 case 'H':
3241 part_mode = HImode;
3242 break;
3243 case 'S':
3244 part_mode = SImode;
3245 break;
3246 default:
3247 return 0;
3250 switch (str[2])
3252 case 'H':
3253 mode = HImode;
3254 break;
3255 case 'S':
3256 mode = SImode;
3257 break;
3258 case 'D':
3259 mode = DImode;
3260 break;
3261 default:
3262 return 0;
3265 switch (str[3])
3267 case '0':
3268 def = 0;
3269 break;
3270 case 'F':
3271 def = -1;
3272 break;
3273 default:
3274 return 0;
3277 if (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (part_mode))
3278 return 0;
3280 part = s390_single_part (GEN_INT (value), mode, part_mode, def);
3281 if (part < 0)
3282 return 0;
3283 if (part_goal != -1 && part_goal != part)
3284 return 0;
3286 return 1;
3290 /* Returns true if the input parameter VALUE is a float zero. */
3293 s390_float_const_zero_p (rtx value)
3295 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
3296 && value == CONST0_RTX (GET_MODE (value)));
3299 /* Implement TARGET_REGISTER_MOVE_COST. */
3301 static int
3302 s390_register_move_cost (machine_mode mode,
3303 reg_class_t from, reg_class_t to)
3305 /* On s390, copy between fprs and gprs is expensive. */
3307 /* It becomes somewhat faster having ldgr/lgdr. */
3308 if (TARGET_Z10 && GET_MODE_SIZE (mode) == 8)
3310 /* ldgr is single cycle. */
3311 if (reg_classes_intersect_p (from, GENERAL_REGS)
3312 && reg_classes_intersect_p (to, FP_REGS))
3313 return 1;
3314 /* lgdr needs 3 cycles. */
3315 if (reg_classes_intersect_p (to, GENERAL_REGS)
3316 && reg_classes_intersect_p (from, FP_REGS))
3317 return 3;
3320 /* Otherwise copying is done via memory. */
3321 if ((reg_classes_intersect_p (from, GENERAL_REGS)
3322 && reg_classes_intersect_p (to, FP_REGS))
3323 || (reg_classes_intersect_p (from, FP_REGS)
3324 && reg_classes_intersect_p (to, GENERAL_REGS)))
3325 return 10;
3327 return 1;
3330 /* Implement TARGET_MEMORY_MOVE_COST. */
3332 static int
3333 s390_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
3334 reg_class_t rclass ATTRIBUTE_UNUSED,
3335 bool in ATTRIBUTE_UNUSED)
3337 return 2;
3340 /* Compute a (partial) cost for rtx X. Return true if the complete
3341 cost has been computed, and false if subexpressions should be
3342 scanned. In either case, *TOTAL contains the cost result.
3343 OUTER_CODE contains the code of the superexpression of x. */
3345 static bool
3346 s390_rtx_costs (rtx x, machine_mode mode, int outer_code,
3347 int opno ATTRIBUTE_UNUSED,
3348 int *total, bool speed ATTRIBUTE_UNUSED)
3350 int code = GET_CODE (x);
3351 switch (code)
3353 case CONST:
3354 case CONST_INT:
3355 case LABEL_REF:
3356 case SYMBOL_REF:
3357 case CONST_DOUBLE:
3358 case CONST_WIDE_INT:
3359 case MEM:
3360 *total = 0;
3361 return true;
3363 case IOR:
3364 /* risbg */
3365 if (GET_CODE (XEXP (x, 0)) == AND
3366 && GET_CODE (XEXP (x, 1)) == ASHIFT
3367 && REG_P (XEXP (XEXP (x, 0), 0))
3368 && REG_P (XEXP (XEXP (x, 1), 0))
3369 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
3370 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
3371 && (UINTVAL (XEXP (XEXP (x, 0), 1)) ==
3372 (1UL << UINTVAL (XEXP (XEXP (x, 1), 1))) - 1))
3374 *total = COSTS_N_INSNS (2);
3375 return true;
3377 case ASHIFT:
3378 case ASHIFTRT:
3379 case LSHIFTRT:
3380 case ROTATE:
3381 case ROTATERT:
3382 case AND:
3383 case XOR:
3384 case NEG:
3385 case NOT:
3386 *total = COSTS_N_INSNS (1);
3387 return false;
3389 case PLUS:
3390 case MINUS:
3391 *total = COSTS_N_INSNS (1);
3392 return false;
3394 case MULT:
3395 switch (mode)
3397 case SImode:
3399 rtx left = XEXP (x, 0);
3400 rtx right = XEXP (x, 1);
3401 if (GET_CODE (right) == CONST_INT
3402 && CONST_OK_FOR_K (INTVAL (right)))
3403 *total = s390_cost->mhi;
3404 else if (GET_CODE (left) == SIGN_EXTEND)
3405 *total = s390_cost->mh;
3406 else
3407 *total = s390_cost->ms; /* msr, ms, msy */
3408 break;
3410 case DImode:
3412 rtx left = XEXP (x, 0);
3413 rtx right = XEXP (x, 1);
3414 if (TARGET_ZARCH)
3416 if (GET_CODE (right) == CONST_INT
3417 && CONST_OK_FOR_K (INTVAL (right)))
3418 *total = s390_cost->mghi;
3419 else if (GET_CODE (left) == SIGN_EXTEND)
3420 *total = s390_cost->msgf;
3421 else
3422 *total = s390_cost->msg; /* msgr, msg */
3424 else /* TARGET_31BIT */
3426 if (GET_CODE (left) == SIGN_EXTEND
3427 && GET_CODE (right) == SIGN_EXTEND)
3428 /* mulsidi case: mr, m */
3429 *total = s390_cost->m;
3430 else if (GET_CODE (left) == ZERO_EXTEND
3431 && GET_CODE (right) == ZERO_EXTEND
3432 && TARGET_CPU_ZARCH)
3433 /* umulsidi case: ml, mlr */
3434 *total = s390_cost->ml;
3435 else
3436 /* Complex calculation is required. */
3437 *total = COSTS_N_INSNS (40);
3439 break;
3441 case SFmode:
3442 case DFmode:
3443 *total = s390_cost->mult_df;
3444 break;
3445 case TFmode:
3446 *total = s390_cost->mxbr;
3447 break;
3448 default:
3449 return false;
3451 return false;
3453 case FMA:
3454 switch (mode)
3456 case DFmode:
3457 *total = s390_cost->madbr;
3458 break;
3459 case SFmode:
3460 *total = s390_cost->maebr;
3461 break;
3462 default:
3463 return false;
3465 /* Negate in the third argument is free: FMSUB. */
3466 if (GET_CODE (XEXP (x, 2)) == NEG)
3468 *total += (rtx_cost (XEXP (x, 0), mode, FMA, 0, speed)
3469 + rtx_cost (XEXP (x, 1), mode, FMA, 1, speed)
3470 + rtx_cost (XEXP (XEXP (x, 2), 0), mode, FMA, 2, speed));
3471 return true;
3473 return false;
3475 case UDIV:
3476 case UMOD:
3477 if (mode == TImode) /* 128 bit division */
3478 *total = s390_cost->dlgr;
3479 else if (mode == DImode)
3481 rtx right = XEXP (x, 1);
3482 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
3483 *total = s390_cost->dlr;
3484 else /* 64 by 64 bit division */
3485 *total = s390_cost->dlgr;
3487 else if (mode == SImode) /* 32 bit division */
3488 *total = s390_cost->dlr;
3489 return false;
3491 case DIV:
3492 case MOD:
3493 if (mode == DImode)
3495 rtx right = XEXP (x, 1);
3496 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
3497 if (TARGET_ZARCH)
3498 *total = s390_cost->dsgfr;
3499 else
3500 *total = s390_cost->dr;
3501 else /* 64 by 64 bit division */
3502 *total = s390_cost->dsgr;
3504 else if (mode == SImode) /* 32 bit division */
3505 *total = s390_cost->dlr;
3506 else if (mode == SFmode)
3508 *total = s390_cost->debr;
3510 else if (mode == DFmode)
3512 *total = s390_cost->ddbr;
3514 else if (mode == TFmode)
3516 *total = s390_cost->dxbr;
3518 return false;
3520 case SQRT:
3521 if (mode == SFmode)
3522 *total = s390_cost->sqebr;
3523 else if (mode == DFmode)
3524 *total = s390_cost->sqdbr;
3525 else /* TFmode */
3526 *total = s390_cost->sqxbr;
3527 return false;
3529 case SIGN_EXTEND:
3530 case ZERO_EXTEND:
3531 if (outer_code == MULT || outer_code == DIV || outer_code == MOD
3532 || outer_code == PLUS || outer_code == MINUS
3533 || outer_code == COMPARE)
3534 *total = 0;
3535 return false;
3537 case COMPARE:
3538 *total = COSTS_N_INSNS (1);
3539 if (GET_CODE (XEXP (x, 0)) == AND
3540 && GET_CODE (XEXP (x, 1)) == CONST_INT
3541 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
3543 rtx op0 = XEXP (XEXP (x, 0), 0);
3544 rtx op1 = XEXP (XEXP (x, 0), 1);
3545 rtx op2 = XEXP (x, 1);
3547 if (memory_operand (op0, GET_MODE (op0))
3548 && s390_tm_ccmode (op1, op2, 0) != VOIDmode)
3549 return true;
3550 if (register_operand (op0, GET_MODE (op0))
3551 && s390_tm_ccmode (op1, op2, 1) != VOIDmode)
3552 return true;
3554 return false;
3556 default:
3557 return false;
3561 /* Return the cost of an address rtx ADDR. */
3563 static int
3564 s390_address_cost (rtx addr, machine_mode mode ATTRIBUTE_UNUSED,
3565 addr_space_t as ATTRIBUTE_UNUSED,
3566 bool speed ATTRIBUTE_UNUSED)
3568 struct s390_address ad;
3569 if (!s390_decompose_address (addr, &ad))
3570 return 1000;
3572 return ad.indx? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (1);
3575 /* If OP is a SYMBOL_REF of a thread-local symbol, return its TLS mode,
3576 otherwise return 0. */
3579 tls_symbolic_operand (rtx op)
3581 if (GET_CODE (op) != SYMBOL_REF)
3582 return 0;
3583 return SYMBOL_REF_TLS_MODEL (op);
3586 /* Split DImode access register reference REG (on 64-bit) into its constituent
3587 low and high parts, and store them into LO and HI. Note that gen_lowpart/
3588 gen_highpart cannot be used as they assume all registers are word-sized,
3589 while our access registers have only half that size. */
3591 void
3592 s390_split_access_reg (rtx reg, rtx *lo, rtx *hi)
3594 gcc_assert (TARGET_64BIT);
3595 gcc_assert (ACCESS_REG_P (reg));
3596 gcc_assert (GET_MODE (reg) == DImode);
3597 gcc_assert (!(REGNO (reg) & 1));
3599 *lo = gen_rtx_REG (SImode, REGNO (reg) + 1);
3600 *hi = gen_rtx_REG (SImode, REGNO (reg));
3603 /* Return true if OP contains a symbol reference */
3605 bool
3606 symbolic_reference_mentioned_p (rtx op)
3608 const char *fmt;
3609 int i;
3611 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
3612 return 1;
3614 fmt = GET_RTX_FORMAT (GET_CODE (op));
3615 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
3617 if (fmt[i] == 'E')
3619 int j;
3621 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
3622 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
3623 return 1;
3626 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
3627 return 1;
3630 return 0;
3633 /* Return true if OP contains a reference to a thread-local symbol. */
3635 bool
3636 tls_symbolic_reference_mentioned_p (rtx op)
3638 const char *fmt;
3639 int i;
3641 if (GET_CODE (op) == SYMBOL_REF)
3642 return tls_symbolic_operand (op);
3644 fmt = GET_RTX_FORMAT (GET_CODE (op));
3645 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
3647 if (fmt[i] == 'E')
3649 int j;
3651 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
3652 if (tls_symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
3653 return true;
3656 else if (fmt[i] == 'e' && tls_symbolic_reference_mentioned_p (XEXP (op, i)))
3657 return true;
3660 return false;
3664 /* Return true if OP is a legitimate general operand when
3665 generating PIC code. It is given that flag_pic is on
3666 and that OP satisfies CONSTANT_P. */
3669 legitimate_pic_operand_p (rtx op)
3671 /* Accept all non-symbolic constants. */
3672 if (!SYMBOLIC_CONST (op))
3673 return 1;
3675 /* Reject everything else; must be handled
3676 via emit_symbolic_move. */
3677 return 0;
3680 /* Returns true if the constant value OP is a legitimate general operand.
3681 It is given that OP satisfies CONSTANT_P. */
3683 static bool
3684 s390_legitimate_constant_p (machine_mode mode, rtx op)
3686 if (TARGET_VX && VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
3688 if (GET_MODE_SIZE (mode) != 16)
3689 return 0;
3691 if (!satisfies_constraint_j00 (op)
3692 && !satisfies_constraint_jm1 (op)
3693 && !satisfies_constraint_jKK (op)
3694 && !satisfies_constraint_jxx (op)
3695 && !satisfies_constraint_jyy (op))
3696 return 0;
3699 /* Accept all non-symbolic constants. */
3700 if (!SYMBOLIC_CONST (op))
3701 return 1;
3703 /* Accept immediate LARL operands. */
3704 if (TARGET_CPU_ZARCH && larl_operand (op, mode))
3705 return 1;
3707 /* Thread-local symbols are never legal constants. This is
3708 so that emit_call knows that computing such addresses
3709 might require a function call. */
3710 if (TLS_SYMBOLIC_CONST (op))
3711 return 0;
3713 /* In the PIC case, symbolic constants must *not* be
3714 forced into the literal pool. We accept them here,
3715 so that they will be handled by emit_symbolic_move. */
3716 if (flag_pic)
3717 return 1;
3719 /* All remaining non-PIC symbolic constants are
3720 forced into the literal pool. */
3721 return 0;
3724 /* Determine if it's legal to put X into the constant pool. This
3725 is not possible if X contains the address of a symbol that is
3726 not constant (TLS) or not known at final link time (PIC). */
3728 static bool
3729 s390_cannot_force_const_mem (machine_mode mode, rtx x)
3731 switch (GET_CODE (x))
3733 case CONST_INT:
3734 case CONST_DOUBLE:
3735 case CONST_WIDE_INT:
3736 case CONST_VECTOR:
3737 /* Accept all non-symbolic constants. */
3738 return false;
3740 case LABEL_REF:
3741 /* Labels are OK iff we are non-PIC. */
3742 return flag_pic != 0;
3744 case SYMBOL_REF:
3745 /* 'Naked' TLS symbol references are never OK,
3746 non-TLS symbols are OK iff we are non-PIC. */
3747 if (tls_symbolic_operand (x))
3748 return true;
3749 else
3750 return flag_pic != 0;
3752 case CONST:
3753 return s390_cannot_force_const_mem (mode, XEXP (x, 0));
3754 case PLUS:
3755 case MINUS:
3756 return s390_cannot_force_const_mem (mode, XEXP (x, 0))
3757 || s390_cannot_force_const_mem (mode, XEXP (x, 1));
3759 case UNSPEC:
3760 switch (XINT (x, 1))
3762 /* Only lt-relative or GOT-relative UNSPECs are OK. */
3763 case UNSPEC_LTREL_OFFSET:
3764 case UNSPEC_GOT:
3765 case UNSPEC_GOTOFF:
3766 case UNSPEC_PLTOFF:
3767 case UNSPEC_TLSGD:
3768 case UNSPEC_TLSLDM:
3769 case UNSPEC_NTPOFF:
3770 case UNSPEC_DTPOFF:
3771 case UNSPEC_GOTNTPOFF:
3772 case UNSPEC_INDNTPOFF:
3773 return false;
3775 /* If the literal pool shares the code section, be put
3776 execute template placeholders into the pool as well. */
3777 case UNSPEC_INSN:
3778 return TARGET_CPU_ZARCH;
3780 default:
3781 return true;
3783 break;
3785 default:
3786 gcc_unreachable ();
3790 /* Returns true if the constant value OP is a legitimate general
3791 operand during and after reload. The difference to
3792 legitimate_constant_p is that this function will not accept
3793 a constant that would need to be forced to the literal pool
3794 before it can be used as operand.
3795 This function accepts all constants which can be loaded directly
3796 into a GPR. */
3798 bool
3799 legitimate_reload_constant_p (rtx op)
3801 /* Accept la(y) operands. */
3802 if (GET_CODE (op) == CONST_INT
3803 && DISP_IN_RANGE (INTVAL (op)))
3804 return true;
3806 /* Accept l(g)hi/l(g)fi operands. */
3807 if (GET_CODE (op) == CONST_INT
3808 && (CONST_OK_FOR_K (INTVAL (op)) || CONST_OK_FOR_Os (INTVAL (op))))
3809 return true;
3811 /* Accept lliXX operands. */
3812 if (TARGET_ZARCH
3813 && GET_CODE (op) == CONST_INT
3814 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
3815 && s390_single_part (op, word_mode, HImode, 0) >= 0)
3816 return true;
3818 if (TARGET_EXTIMM
3819 && GET_CODE (op) == CONST_INT
3820 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
3821 && s390_single_part (op, word_mode, SImode, 0) >= 0)
3822 return true;
3824 /* Accept larl operands. */
3825 if (TARGET_CPU_ZARCH
3826 && larl_operand (op, VOIDmode))
3827 return true;
3829 /* Accept floating-point zero operands that fit into a single GPR. */
3830 if (GET_CODE (op) == CONST_DOUBLE
3831 && s390_float_const_zero_p (op)
3832 && GET_MODE_SIZE (GET_MODE (op)) <= UNITS_PER_WORD)
3833 return true;
3835 /* Accept double-word operands that can be split. */
3836 if (GET_CODE (op) == CONST_WIDE_INT
3837 || (GET_CODE (op) == CONST_INT
3838 && trunc_int_for_mode (INTVAL (op), word_mode) != INTVAL (op)))
3840 machine_mode dword_mode = word_mode == SImode ? DImode : TImode;
3841 rtx hi = operand_subword (op, 0, 0, dword_mode);
3842 rtx lo = operand_subword (op, 1, 0, dword_mode);
3843 return legitimate_reload_constant_p (hi)
3844 && legitimate_reload_constant_p (lo);
3847 /* Everything else cannot be handled without reload. */
3848 return false;
3851 /* Returns true if the constant value OP is a legitimate fp operand
3852 during and after reload.
3853 This function accepts all constants which can be loaded directly
3854 into an FPR. */
3856 static bool
3857 legitimate_reload_fp_constant_p (rtx op)
3859 /* Accept floating-point zero operands if the load zero instruction
3860 can be used. Prior to z196 the load fp zero instruction caused a
3861 performance penalty if the result is used as BFP number. */
3862 if (TARGET_Z196
3863 && GET_CODE (op) == CONST_DOUBLE
3864 && s390_float_const_zero_p (op))
3865 return true;
3867 return false;
3870 /* Returns true if the constant value OP is a legitimate vector operand
3871 during and after reload.
3872 This function accepts all constants which can be loaded directly
3873 into an VR. */
3875 static bool
3876 legitimate_reload_vector_constant_p (rtx op)
3878 if (TARGET_VX && GET_MODE_SIZE (GET_MODE (op)) == 16
3879 && (satisfies_constraint_j00 (op)
3880 || satisfies_constraint_jm1 (op)
3881 || satisfies_constraint_jKK (op)
3882 || satisfies_constraint_jxx (op)
3883 || satisfies_constraint_jyy (op)))
3884 return true;
3886 return false;
3889 /* Given an rtx OP being reloaded into a reg required to be in class RCLASS,
3890 return the class of reg to actually use. */
3892 static reg_class_t
3893 s390_preferred_reload_class (rtx op, reg_class_t rclass)
3895 switch (GET_CODE (op))
3897 /* Constants we cannot reload into general registers
3898 must be forced into the literal pool. */
3899 case CONST_VECTOR:
3900 case CONST_DOUBLE:
3901 case CONST_INT:
3902 case CONST_WIDE_INT:
3903 if (reg_class_subset_p (GENERAL_REGS, rclass)
3904 && legitimate_reload_constant_p (op))
3905 return GENERAL_REGS;
3906 else if (reg_class_subset_p (ADDR_REGS, rclass)
3907 && legitimate_reload_constant_p (op))
3908 return ADDR_REGS;
3909 else if (reg_class_subset_p (FP_REGS, rclass)
3910 && legitimate_reload_fp_constant_p (op))
3911 return FP_REGS;
3912 else if (reg_class_subset_p (VEC_REGS, rclass)
3913 && legitimate_reload_vector_constant_p (op))
3914 return VEC_REGS;
3916 return NO_REGS;
3918 /* If a symbolic constant or a PLUS is reloaded,
3919 it is most likely being used as an address, so
3920 prefer ADDR_REGS. If 'class' is not a superset
3921 of ADDR_REGS, e.g. FP_REGS, reject this reload. */
3922 case CONST:
3923 /* Symrefs cannot be pushed into the literal pool with -fPIC
3924 so we *MUST NOT* return NO_REGS for these cases
3925 (s390_cannot_force_const_mem will return true).
3927 On the other hand we MUST return NO_REGS for symrefs with
3928 invalid addend which might have been pushed to the literal
3929 pool (no -fPIC). Usually we would expect them to be
3930 handled via secondary reload but this does not happen if
3931 they are used as literal pool slot replacement in reload
3932 inheritance (see emit_input_reload_insns). */
3933 if (TARGET_CPU_ZARCH
3934 && GET_CODE (XEXP (op, 0)) == PLUS
3935 && GET_CODE (XEXP (XEXP(op, 0), 0)) == SYMBOL_REF
3936 && GET_CODE (XEXP (XEXP(op, 0), 1)) == CONST_INT)
3938 if (flag_pic && reg_class_subset_p (ADDR_REGS, rclass))
3939 return ADDR_REGS;
3940 else
3941 return NO_REGS;
3943 /* fallthrough */
3944 case LABEL_REF:
3945 case SYMBOL_REF:
3946 if (!legitimate_reload_constant_p (op))
3947 return NO_REGS;
3948 /* fallthrough */
3949 case PLUS:
3950 /* load address will be used. */
3951 if (reg_class_subset_p (ADDR_REGS, rclass))
3952 return ADDR_REGS;
3953 else
3954 return NO_REGS;
3956 default:
3957 break;
3960 return rclass;
3963 /* Return true if ADDR is SYMBOL_REF + addend with addend being a
3964 multiple of ALIGNMENT and the SYMBOL_REF being naturally
3965 aligned. */
3967 bool
3968 s390_check_symref_alignment (rtx addr, HOST_WIDE_INT alignment)
3970 HOST_WIDE_INT addend;
3971 rtx symref;
3973 /* The "required alignment" might be 0 (e.g. for certain structs
3974 accessed via BLKmode). Early abort in this case, as well as when
3975 an alignment > 8 is required. */
3976 if (alignment < 2 || alignment > 8)
3977 return false;
3979 if (!s390_loadrelative_operand_p (addr, &symref, &addend))
3980 return false;
3982 if (addend & (alignment - 1))
3983 return false;
3985 if (GET_CODE (symref) == SYMBOL_REF)
3987 /* We have load-relative instructions for 2-byte, 4-byte, and
3988 8-byte alignment so allow only these. */
3989 switch (alignment)
3991 case 8: return !SYMBOL_FLAG_NOTALIGN8_P (symref);
3992 case 4: return !SYMBOL_FLAG_NOTALIGN4_P (symref);
3993 case 2: return !SYMBOL_FLAG_NOTALIGN2_P (symref);
3994 default: return false;
3998 if (GET_CODE (symref) == UNSPEC
3999 && alignment <= UNITS_PER_LONG)
4000 return true;
4002 return false;
4005 /* ADDR is moved into REG using larl. If ADDR isn't a valid larl
4006 operand SCRATCH is used to reload the even part of the address and
4007 adding one. */
4009 void
4010 s390_reload_larl_operand (rtx reg, rtx addr, rtx scratch)
4012 HOST_WIDE_INT addend;
4013 rtx symref;
4015 if (!s390_loadrelative_operand_p (addr, &symref, &addend))
4016 gcc_unreachable ();
4018 if (!(addend & 1))
4019 /* Easy case. The addend is even so larl will do fine. */
4020 emit_move_insn (reg, addr);
4021 else
4023 /* We can leave the scratch register untouched if the target
4024 register is a valid base register. */
4025 if (REGNO (reg) < FIRST_PSEUDO_REGISTER
4026 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS)
4027 scratch = reg;
4029 gcc_assert (REGNO (scratch) < FIRST_PSEUDO_REGISTER);
4030 gcc_assert (REGNO_REG_CLASS (REGNO (scratch)) == ADDR_REGS);
4032 if (addend != 1)
4033 emit_move_insn (scratch,
4034 gen_rtx_CONST (Pmode,
4035 gen_rtx_PLUS (Pmode, symref,
4036 GEN_INT (addend - 1))));
4037 else
4038 emit_move_insn (scratch, symref);
4040 /* Increment the address using la in order to avoid clobbering cc. */
4041 s390_load_address (reg, gen_rtx_PLUS (Pmode, scratch, const1_rtx));
4045 /* Generate what is necessary to move between REG and MEM using
4046 SCRATCH. The direction is given by TOMEM. */
4048 void
4049 s390_reload_symref_address (rtx reg, rtx mem, rtx scratch, bool tomem)
4051 /* Reload might have pulled a constant out of the literal pool.
4052 Force it back in. */
4053 if (CONST_INT_P (mem) || GET_CODE (mem) == CONST_DOUBLE
4054 || GET_CODE (mem) == CONST_WIDE_INT
4055 || GET_CODE (mem) == CONST_VECTOR
4056 || GET_CODE (mem) == CONST)
4057 mem = force_const_mem (GET_MODE (reg), mem);
4059 gcc_assert (MEM_P (mem));
4061 /* For a load from memory we can leave the scratch register
4062 untouched if the target register is a valid base register. */
4063 if (!tomem
4064 && REGNO (reg) < FIRST_PSEUDO_REGISTER
4065 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS
4066 && GET_MODE (reg) == GET_MODE (scratch))
4067 scratch = reg;
4069 /* Load address into scratch register. Since we can't have a
4070 secondary reload for a secondary reload we have to cover the case
4071 where larl would need a secondary reload here as well. */
4072 s390_reload_larl_operand (scratch, XEXP (mem, 0), scratch);
4074 /* Now we can use a standard load/store to do the move. */
4075 if (tomem)
4076 emit_move_insn (replace_equiv_address (mem, scratch), reg);
4077 else
4078 emit_move_insn (reg, replace_equiv_address (mem, scratch));
4081 /* Inform reload about cases where moving X with a mode MODE to a register in
4082 RCLASS requires an extra scratch or immediate register. Return the class
4083 needed for the immediate register. */
4085 static reg_class_t
4086 s390_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
4087 machine_mode mode, secondary_reload_info *sri)
4089 enum reg_class rclass = (enum reg_class) rclass_i;
4091 /* Intermediate register needed. */
4092 if (reg_classes_intersect_p (CC_REGS, rclass))
4093 return GENERAL_REGS;
4095 if (TARGET_VX)
4097 /* The vst/vl vector move instructions allow only for short
4098 displacements. */
4099 if (MEM_P (x)
4100 && GET_CODE (XEXP (x, 0)) == PLUS
4101 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4102 && !SHORT_DISP_IN_RANGE(INTVAL (XEXP (XEXP (x, 0), 1)))
4103 && reg_class_subset_p (rclass, VEC_REGS)
4104 && (!reg_class_subset_p (rclass, FP_REGS)
4105 || (GET_MODE_SIZE (mode) > 8
4106 && s390_class_max_nregs (FP_REGS, mode) == 1)))
4108 if (in_p)
4109 sri->icode = (TARGET_64BIT ?
4110 CODE_FOR_reloaddi_la_in :
4111 CODE_FOR_reloadsi_la_in);
4112 else
4113 sri->icode = (TARGET_64BIT ?
4114 CODE_FOR_reloaddi_la_out :
4115 CODE_FOR_reloadsi_la_out);
4119 if (TARGET_Z10)
4121 HOST_WIDE_INT offset;
4122 rtx symref;
4124 /* On z10 several optimizer steps may generate larl operands with
4125 an odd addend. */
4126 if (in_p
4127 && s390_loadrelative_operand_p (x, &symref, &offset)
4128 && mode == Pmode
4129 && !SYMBOL_FLAG_NOTALIGN2_P (symref)
4130 && (offset & 1) == 1)
4131 sri->icode = ((mode == DImode) ? CODE_FOR_reloaddi_larl_odd_addend_z10
4132 : CODE_FOR_reloadsi_larl_odd_addend_z10);
4134 /* Handle all the (mem (symref)) accesses we cannot use the z10
4135 instructions for. */
4136 if (MEM_P (x)
4137 && s390_loadrelative_operand_p (XEXP (x, 0), NULL, NULL)
4138 && (mode == QImode
4139 || !reg_class_subset_p (rclass, GENERAL_REGS)
4140 || GET_MODE_SIZE (mode) > UNITS_PER_WORD
4141 || !s390_check_symref_alignment (XEXP (x, 0),
4142 GET_MODE_SIZE (mode))))
4144 #define __SECONDARY_RELOAD_CASE(M,m) \
4145 case M##mode: \
4146 if (TARGET_64BIT) \
4147 sri->icode = in_p ? CODE_FOR_reload##m##di_toreg_z10 : \
4148 CODE_FOR_reload##m##di_tomem_z10; \
4149 else \
4150 sri->icode = in_p ? CODE_FOR_reload##m##si_toreg_z10 : \
4151 CODE_FOR_reload##m##si_tomem_z10; \
4152 break;
4154 switch (GET_MODE (x))
4156 __SECONDARY_RELOAD_CASE (QI, qi);
4157 __SECONDARY_RELOAD_CASE (HI, hi);
4158 __SECONDARY_RELOAD_CASE (SI, si);
4159 __SECONDARY_RELOAD_CASE (DI, di);
4160 __SECONDARY_RELOAD_CASE (TI, ti);
4161 __SECONDARY_RELOAD_CASE (SF, sf);
4162 __SECONDARY_RELOAD_CASE (DF, df);
4163 __SECONDARY_RELOAD_CASE (TF, tf);
4164 __SECONDARY_RELOAD_CASE (SD, sd);
4165 __SECONDARY_RELOAD_CASE (DD, dd);
4166 __SECONDARY_RELOAD_CASE (TD, td);
4167 __SECONDARY_RELOAD_CASE (V1QI, v1qi);
4168 __SECONDARY_RELOAD_CASE (V2QI, v2qi);
4169 __SECONDARY_RELOAD_CASE (V4QI, v4qi);
4170 __SECONDARY_RELOAD_CASE (V8QI, v8qi);
4171 __SECONDARY_RELOAD_CASE (V16QI, v16qi);
4172 __SECONDARY_RELOAD_CASE (V1HI, v1hi);
4173 __SECONDARY_RELOAD_CASE (V2HI, v2hi);
4174 __SECONDARY_RELOAD_CASE (V4HI, v4hi);
4175 __SECONDARY_RELOAD_CASE (V8HI, v8hi);
4176 __SECONDARY_RELOAD_CASE (V1SI, v1si);
4177 __SECONDARY_RELOAD_CASE (V2SI, v2si);
4178 __SECONDARY_RELOAD_CASE (V4SI, v4si);
4179 __SECONDARY_RELOAD_CASE (V1DI, v1di);
4180 __SECONDARY_RELOAD_CASE (V2DI, v2di);
4181 __SECONDARY_RELOAD_CASE (V1TI, v1ti);
4182 __SECONDARY_RELOAD_CASE (V1SF, v1sf);
4183 __SECONDARY_RELOAD_CASE (V2SF, v2sf);
4184 __SECONDARY_RELOAD_CASE (V4SF, v4sf);
4185 __SECONDARY_RELOAD_CASE (V1DF, v1df);
4186 __SECONDARY_RELOAD_CASE (V2DF, v2df);
4187 __SECONDARY_RELOAD_CASE (V1TF, v1tf);
4188 default:
4189 gcc_unreachable ();
4191 #undef __SECONDARY_RELOAD_CASE
4195 /* We need a scratch register when loading a PLUS expression which
4196 is not a legitimate operand of the LOAD ADDRESS instruction. */
4197 /* LRA can deal with transformation of plus op very well -- so we
4198 don't need to prompt LRA in this case. */
4199 if (! lra_in_progress && in_p && s390_plus_operand (x, mode))
4200 sri->icode = (TARGET_64BIT ?
4201 CODE_FOR_reloaddi_plus : CODE_FOR_reloadsi_plus);
4203 /* Performing a multiword move from or to memory we have to make sure the
4204 second chunk in memory is addressable without causing a displacement
4205 overflow. If that would be the case we calculate the address in
4206 a scratch register. */
4207 if (MEM_P (x)
4208 && GET_CODE (XEXP (x, 0)) == PLUS
4209 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4210 && !DISP_IN_RANGE (INTVAL (XEXP (XEXP (x, 0), 1))
4211 + GET_MODE_SIZE (mode) - 1))
4213 /* For GENERAL_REGS a displacement overflow is no problem if occurring
4214 in a s_operand address since we may fallback to lm/stm. So we only
4215 have to care about overflows in the b+i+d case. */
4216 if ((reg_classes_intersect_p (GENERAL_REGS, rclass)
4217 && s390_class_max_nregs (GENERAL_REGS, mode) > 1
4218 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
4219 /* For FP_REGS no lm/stm is available so this check is triggered
4220 for displacement overflows in b+i+d and b+d like addresses. */
4221 || (reg_classes_intersect_p (FP_REGS, rclass)
4222 && s390_class_max_nregs (FP_REGS, mode) > 1))
4224 if (in_p)
4225 sri->icode = (TARGET_64BIT ?
4226 CODE_FOR_reloaddi_la_in :
4227 CODE_FOR_reloadsi_la_in);
4228 else
4229 sri->icode = (TARGET_64BIT ?
4230 CODE_FOR_reloaddi_la_out :
4231 CODE_FOR_reloadsi_la_out);
4235 /* A scratch address register is needed when a symbolic constant is
4236 copied to r0 compiling with -fPIC. In other cases the target
4237 register might be used as temporary (see legitimize_pic_address). */
4238 if (in_p && SYMBOLIC_CONST (x) && flag_pic == 2 && rclass != ADDR_REGS)
4239 sri->icode = (TARGET_64BIT ?
4240 CODE_FOR_reloaddi_PIC_addr :
4241 CODE_FOR_reloadsi_PIC_addr);
4243 /* Either scratch or no register needed. */
4244 return NO_REGS;
4247 /* Generate code to load SRC, which is PLUS that is not a
4248 legitimate operand for the LA instruction, into TARGET.
4249 SCRATCH may be used as scratch register. */
4251 void
4252 s390_expand_plus_operand (rtx target, rtx src,
4253 rtx scratch)
4255 rtx sum1, sum2;
4256 struct s390_address ad;
4258 /* src must be a PLUS; get its two operands. */
4259 gcc_assert (GET_CODE (src) == PLUS);
4260 gcc_assert (GET_MODE (src) == Pmode);
4262 /* Check if any of the two operands is already scheduled
4263 for replacement by reload. This can happen e.g. when
4264 float registers occur in an address. */
4265 sum1 = find_replacement (&XEXP (src, 0));
4266 sum2 = find_replacement (&XEXP (src, 1));
4267 src = gen_rtx_PLUS (Pmode, sum1, sum2);
4269 /* If the address is already strictly valid, there's nothing to do. */
4270 if (!s390_decompose_address (src, &ad)
4271 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
4272 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
4274 /* Otherwise, one of the operands cannot be an address register;
4275 we reload its value into the scratch register. */
4276 if (true_regnum (sum1) < 1 || true_regnum (sum1) > 15)
4278 emit_move_insn (scratch, sum1);
4279 sum1 = scratch;
4281 if (true_regnum (sum2) < 1 || true_regnum (sum2) > 15)
4283 emit_move_insn (scratch, sum2);
4284 sum2 = scratch;
4287 /* According to the way these invalid addresses are generated
4288 in reload.c, it should never happen (at least on s390) that
4289 *neither* of the PLUS components, after find_replacements
4290 was applied, is an address register. */
4291 if (sum1 == scratch && sum2 == scratch)
4293 debug_rtx (src);
4294 gcc_unreachable ();
4297 src = gen_rtx_PLUS (Pmode, sum1, sum2);
4300 /* Emit the LOAD ADDRESS pattern. Note that reload of PLUS
4301 is only ever performed on addresses, so we can mark the
4302 sum as legitimate for LA in any case. */
4303 s390_load_address (target, src);
4307 /* Return true if ADDR is a valid memory address.
4308 STRICT specifies whether strict register checking applies. */
4310 static bool
4311 s390_legitimate_address_p (machine_mode mode, rtx addr, bool strict)
4313 struct s390_address ad;
4315 if (TARGET_Z10
4316 && larl_operand (addr, VOIDmode)
4317 && (mode == VOIDmode
4318 || s390_check_symref_alignment (addr, GET_MODE_SIZE (mode))))
4319 return true;
4321 if (!s390_decompose_address (addr, &ad))
4322 return false;
4324 if (strict)
4326 if (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
4327 return false;
4329 if (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx)))
4330 return false;
4332 else
4334 if (ad.base
4335 && !(REGNO (ad.base) >= FIRST_PSEUDO_REGISTER
4336 || REGNO_REG_CLASS (REGNO (ad.base)) == ADDR_REGS))
4337 return false;
4339 if (ad.indx
4340 && !(REGNO (ad.indx) >= FIRST_PSEUDO_REGISTER
4341 || REGNO_REG_CLASS (REGNO (ad.indx)) == ADDR_REGS))
4342 return false;
4344 return true;
4347 /* Return true if OP is a valid operand for the LA instruction.
4348 In 31-bit, we need to prove that the result is used as an
4349 address, as LA performs only a 31-bit addition. */
4351 bool
4352 legitimate_la_operand_p (rtx op)
4354 struct s390_address addr;
4355 if (!s390_decompose_address (op, &addr))
4356 return false;
4358 return (TARGET_64BIT || addr.pointer);
4361 /* Return true if it is valid *and* preferable to use LA to
4362 compute the sum of OP1 and OP2. */
4364 bool
4365 preferred_la_operand_p (rtx op1, rtx op2)
4367 struct s390_address addr;
4369 if (op2 != const0_rtx)
4370 op1 = gen_rtx_PLUS (Pmode, op1, op2);
4372 if (!s390_decompose_address (op1, &addr))
4373 return false;
4374 if (addr.base && !REGNO_OK_FOR_BASE_P (REGNO (addr.base)))
4375 return false;
4376 if (addr.indx && !REGNO_OK_FOR_INDEX_P (REGNO (addr.indx)))
4377 return false;
4379 /* Avoid LA instructions with index register on z196; it is
4380 preferable to use regular add instructions when possible.
4381 Starting with zEC12 the la with index register is "uncracked"
4382 again. */
4383 if (addr.indx && s390_tune == PROCESSOR_2817_Z196)
4384 return false;
4386 if (!TARGET_64BIT && !addr.pointer)
4387 return false;
4389 if (addr.pointer)
4390 return true;
4392 if ((addr.base && REG_P (addr.base) && REG_POINTER (addr.base))
4393 || (addr.indx && REG_P (addr.indx) && REG_POINTER (addr.indx)))
4394 return true;
4396 return false;
4399 /* Emit a forced load-address operation to load SRC into DST.
4400 This will use the LOAD ADDRESS instruction even in situations
4401 where legitimate_la_operand_p (SRC) returns false. */
4403 void
4404 s390_load_address (rtx dst, rtx src)
4406 if (TARGET_64BIT)
4407 emit_move_insn (dst, src);
4408 else
4409 emit_insn (gen_force_la_31 (dst, src));
4412 /* Return a legitimate reference for ORIG (an address) using the
4413 register REG. If REG is 0, a new pseudo is generated.
4415 There are two types of references that must be handled:
4417 1. Global data references must load the address from the GOT, via
4418 the PIC reg. An insn is emitted to do this load, and the reg is
4419 returned.
4421 2. Static data references, constant pool addresses, and code labels
4422 compute the address as an offset from the GOT, whose base is in
4423 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
4424 differentiate them from global data objects. The returned
4425 address is the PIC reg + an unspec constant.
4427 TARGET_LEGITIMIZE_ADDRESS_P rejects symbolic references unless the PIC
4428 reg also appears in the address. */
4431 legitimize_pic_address (rtx orig, rtx reg)
4433 rtx addr = orig;
4434 rtx addend = const0_rtx;
4435 rtx new_rtx = orig;
4437 gcc_assert (!TLS_SYMBOLIC_CONST (addr));
4439 if (GET_CODE (addr) == CONST)
4440 addr = XEXP (addr, 0);
4442 if (GET_CODE (addr) == PLUS)
4444 addend = XEXP (addr, 1);
4445 addr = XEXP (addr, 0);
4448 if ((GET_CODE (addr) == LABEL_REF
4449 || (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (addr))
4450 || (GET_CODE (addr) == UNSPEC &&
4451 (XINT (addr, 1) == UNSPEC_GOTENT
4452 || (TARGET_CPU_ZARCH && XINT (addr, 1) == UNSPEC_PLT))))
4453 && GET_CODE (addend) == CONST_INT)
4455 /* This can be locally addressed. */
4457 /* larl_operand requires UNSPECs to be wrapped in a const rtx. */
4458 rtx const_addr = (GET_CODE (addr) == UNSPEC ?
4459 gen_rtx_CONST (Pmode, addr) : addr);
4461 if (TARGET_CPU_ZARCH
4462 && larl_operand (const_addr, VOIDmode)
4463 && INTVAL (addend) < (HOST_WIDE_INT)1 << 31
4464 && INTVAL (addend) >= -((HOST_WIDE_INT)1 << 31))
4466 if (INTVAL (addend) & 1)
4468 /* LARL can't handle odd offsets, so emit a pair of LARL
4469 and LA. */
4470 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4472 if (!DISP_IN_RANGE (INTVAL (addend)))
4474 HOST_WIDE_INT even = INTVAL (addend) - 1;
4475 addr = gen_rtx_PLUS (Pmode, addr, GEN_INT (even));
4476 addr = gen_rtx_CONST (Pmode, addr);
4477 addend = const1_rtx;
4480 emit_move_insn (temp, addr);
4481 new_rtx = gen_rtx_PLUS (Pmode, temp, addend);
4483 if (reg != 0)
4485 s390_load_address (reg, new_rtx);
4486 new_rtx = reg;
4489 else
4491 /* If the offset is even, we can just use LARL. This
4492 will happen automatically. */
4495 else
4497 /* No larl - Access local symbols relative to the GOT. */
4499 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4501 if (reload_in_progress || reload_completed)
4502 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4504 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
4505 if (addend != const0_rtx)
4506 addr = gen_rtx_PLUS (Pmode, addr, addend);
4507 addr = gen_rtx_CONST (Pmode, addr);
4508 addr = force_const_mem (Pmode, addr);
4509 emit_move_insn (temp, addr);
4511 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4512 if (reg != 0)
4514 s390_load_address (reg, new_rtx);
4515 new_rtx = reg;
4519 else if (GET_CODE (addr) == SYMBOL_REF && addend == const0_rtx)
4521 /* A non-local symbol reference without addend.
4523 The symbol ref is wrapped into an UNSPEC to make sure the
4524 proper operand modifier (@GOT or @GOTENT) will be emitted.
4525 This will tell the linker to put the symbol into the GOT.
4527 Additionally the code dereferencing the GOT slot is emitted here.
4529 An addend to the symref needs to be added afterwards.
4530 legitimize_pic_address calls itself recursively to handle
4531 that case. So no need to do it here. */
4533 if (reg == 0)
4534 reg = gen_reg_rtx (Pmode);
4536 if (TARGET_Z10)
4538 /* Use load relative if possible.
4539 lgrl <target>, sym@GOTENT */
4540 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
4541 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4542 new_rtx = gen_const_mem (GET_MODE (reg), new_rtx);
4544 emit_move_insn (reg, new_rtx);
4545 new_rtx = reg;
4547 else if (flag_pic == 1)
4549 /* Assume GOT offset is a valid displacement operand (< 4k
4550 or < 512k with z990). This is handled the same way in
4551 both 31- and 64-bit code (@GOT).
4552 lg <target>, sym@GOT(r12) */
4554 if (reload_in_progress || reload_completed)
4555 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4557 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
4558 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4559 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
4560 new_rtx = gen_const_mem (Pmode, new_rtx);
4561 emit_move_insn (reg, new_rtx);
4562 new_rtx = reg;
4564 else if (TARGET_CPU_ZARCH)
4566 /* If the GOT offset might be >= 4k, we determine the position
4567 of the GOT entry via a PC-relative LARL (@GOTENT).
4568 larl temp, sym@GOTENT
4569 lg <target>, 0(temp) */
4571 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
4573 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
4574 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
4576 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
4577 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4578 emit_move_insn (temp, new_rtx);
4580 new_rtx = gen_const_mem (Pmode, temp);
4581 emit_move_insn (reg, new_rtx);
4583 new_rtx = reg;
4585 else
4587 /* If the GOT offset might be >= 4k, we have to load it
4588 from the literal pool (@GOT).
4590 lg temp, lit-litbase(r13)
4591 lg <target>, 0(temp)
4592 lit: .long sym@GOT */
4594 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
4596 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
4597 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
4599 if (reload_in_progress || reload_completed)
4600 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4602 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
4603 addr = gen_rtx_CONST (Pmode, addr);
4604 addr = force_const_mem (Pmode, addr);
4605 emit_move_insn (temp, addr);
4607 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4608 new_rtx = gen_const_mem (Pmode, new_rtx);
4609 emit_move_insn (reg, new_rtx);
4610 new_rtx = reg;
4613 else if (GET_CODE (addr) == UNSPEC && GET_CODE (addend) == CONST_INT)
4615 gcc_assert (XVECLEN (addr, 0) == 1);
4616 switch (XINT (addr, 1))
4618 /* These address symbols (or PLT slots) relative to the GOT
4619 (not GOT slots!). In general this will exceed the
4620 displacement range so these value belong into the literal
4621 pool. */
4622 case UNSPEC_GOTOFF:
4623 case UNSPEC_PLTOFF:
4624 new_rtx = force_const_mem (Pmode, orig);
4625 break;
4627 /* For -fPIC the GOT size might exceed the displacement
4628 range so make sure the value is in the literal pool. */
4629 case UNSPEC_GOT:
4630 if (flag_pic == 2)
4631 new_rtx = force_const_mem (Pmode, orig);
4632 break;
4634 /* For @GOTENT larl is used. This is handled like local
4635 symbol refs. */
4636 case UNSPEC_GOTENT:
4637 gcc_unreachable ();
4638 break;
4640 /* @PLT is OK as is on 64-bit, must be converted to
4641 GOT-relative @PLTOFF on 31-bit. */
4642 case UNSPEC_PLT:
4643 if (!TARGET_CPU_ZARCH)
4645 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4647 if (reload_in_progress || reload_completed)
4648 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4650 addr = XVECEXP (addr, 0, 0);
4651 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr),
4652 UNSPEC_PLTOFF);
4653 if (addend != const0_rtx)
4654 addr = gen_rtx_PLUS (Pmode, addr, addend);
4655 addr = gen_rtx_CONST (Pmode, addr);
4656 addr = force_const_mem (Pmode, addr);
4657 emit_move_insn (temp, addr);
4659 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4660 if (reg != 0)
4662 s390_load_address (reg, new_rtx);
4663 new_rtx = reg;
4666 else
4667 /* On 64 bit larl can be used. This case is handled like
4668 local symbol refs. */
4669 gcc_unreachable ();
4670 break;
4672 /* Everything else cannot happen. */
4673 default:
4674 gcc_unreachable ();
4677 else if (addend != const0_rtx)
4679 /* Otherwise, compute the sum. */
4681 rtx base = legitimize_pic_address (addr, reg);
4682 new_rtx = legitimize_pic_address (addend,
4683 base == reg ? NULL_RTX : reg);
4684 if (GET_CODE (new_rtx) == CONST_INT)
4685 new_rtx = plus_constant (Pmode, base, INTVAL (new_rtx));
4686 else
4688 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
4690 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
4691 new_rtx = XEXP (new_rtx, 1);
4693 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
4696 if (GET_CODE (new_rtx) == CONST)
4697 new_rtx = XEXP (new_rtx, 0);
4698 new_rtx = force_operand (new_rtx, 0);
4701 return new_rtx;
4704 /* Load the thread pointer into a register. */
4707 s390_get_thread_pointer (void)
4709 rtx tp = gen_reg_rtx (Pmode);
4711 emit_move_insn (tp, gen_rtx_REG (Pmode, TP_REGNUM));
4712 mark_reg_pointer (tp, BITS_PER_WORD);
4714 return tp;
4717 /* Emit a tls call insn. The call target is the SYMBOL_REF stored
4718 in s390_tls_symbol which always refers to __tls_get_offset.
4719 The returned offset is written to RESULT_REG and an USE rtx is
4720 generated for TLS_CALL. */
4722 static GTY(()) rtx s390_tls_symbol;
4724 static void
4725 s390_emit_tls_call_insn (rtx result_reg, rtx tls_call)
4727 rtx insn;
4729 if (!flag_pic)
4730 emit_insn (s390_load_got ());
4732 if (!s390_tls_symbol)
4733 s390_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_offset");
4735 insn = s390_emit_call (s390_tls_symbol, tls_call, result_reg,
4736 gen_rtx_REG (Pmode, RETURN_REGNUM));
4738 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), result_reg);
4739 RTL_CONST_CALL_P (insn) = 1;
4742 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
4743 this (thread-local) address. REG may be used as temporary. */
4745 static rtx
4746 legitimize_tls_address (rtx addr, rtx reg)
4748 rtx new_rtx, tls_call, temp, base, r2, insn;
4750 if (GET_CODE (addr) == SYMBOL_REF)
4751 switch (tls_symbolic_operand (addr))
4753 case TLS_MODEL_GLOBAL_DYNAMIC:
4754 start_sequence ();
4755 r2 = gen_rtx_REG (Pmode, 2);
4756 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_TLSGD);
4757 new_rtx = gen_rtx_CONST (Pmode, tls_call);
4758 new_rtx = force_const_mem (Pmode, new_rtx);
4759 emit_move_insn (r2, new_rtx);
4760 s390_emit_tls_call_insn (r2, tls_call);
4761 insn = get_insns ();
4762 end_sequence ();
4764 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
4765 temp = gen_reg_rtx (Pmode);
4766 emit_libcall_block (insn, temp, r2, new_rtx);
4768 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
4769 if (reg != 0)
4771 s390_load_address (reg, new_rtx);
4772 new_rtx = reg;
4774 break;
4776 case TLS_MODEL_LOCAL_DYNAMIC:
4777 start_sequence ();
4778 r2 = gen_rtx_REG (Pmode, 2);
4779 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM);
4780 new_rtx = gen_rtx_CONST (Pmode, tls_call);
4781 new_rtx = force_const_mem (Pmode, new_rtx);
4782 emit_move_insn (r2, new_rtx);
4783 s390_emit_tls_call_insn (r2, tls_call);
4784 insn = get_insns ();
4785 end_sequence ();
4787 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM_NTPOFF);
4788 temp = gen_reg_rtx (Pmode);
4789 emit_libcall_block (insn, temp, r2, new_rtx);
4791 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
4792 base = gen_reg_rtx (Pmode);
4793 s390_load_address (base, new_rtx);
4795 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_DTPOFF);
4796 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4797 new_rtx = force_const_mem (Pmode, new_rtx);
4798 temp = gen_reg_rtx (Pmode);
4799 emit_move_insn (temp, new_rtx);
4801 new_rtx = gen_rtx_PLUS (Pmode, base, temp);
4802 if (reg != 0)
4804 s390_load_address (reg, new_rtx);
4805 new_rtx = reg;
4807 break;
4809 case TLS_MODEL_INITIAL_EXEC:
4810 if (flag_pic == 1)
4812 /* Assume GOT offset < 4k. This is handled the same way
4813 in both 31- and 64-bit code. */
4815 if (reload_in_progress || reload_completed)
4816 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4818 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
4819 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4820 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
4821 new_rtx = gen_const_mem (Pmode, new_rtx);
4822 temp = gen_reg_rtx (Pmode);
4823 emit_move_insn (temp, new_rtx);
4825 else if (TARGET_CPU_ZARCH)
4827 /* If the GOT offset might be >= 4k, we determine the position
4828 of the GOT entry via a PC-relative LARL. */
4830 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
4831 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4832 temp = gen_reg_rtx (Pmode);
4833 emit_move_insn (temp, new_rtx);
4835 new_rtx = gen_const_mem (Pmode, temp);
4836 temp = gen_reg_rtx (Pmode);
4837 emit_move_insn (temp, new_rtx);
4839 else if (flag_pic)
4841 /* If the GOT offset might be >= 4k, we have to load it
4842 from the literal pool. */
4844 if (reload_in_progress || reload_completed)
4845 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4847 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
4848 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4849 new_rtx = force_const_mem (Pmode, new_rtx);
4850 temp = gen_reg_rtx (Pmode);
4851 emit_move_insn (temp, new_rtx);
4853 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4854 new_rtx = gen_const_mem (Pmode, new_rtx);
4856 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
4857 temp = gen_reg_rtx (Pmode);
4858 emit_insn (gen_rtx_SET (temp, new_rtx));
4860 else
4862 /* In position-dependent code, load the absolute address of
4863 the GOT entry from the literal pool. */
4865 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
4866 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4867 new_rtx = force_const_mem (Pmode, new_rtx);
4868 temp = gen_reg_rtx (Pmode);
4869 emit_move_insn (temp, new_rtx);
4871 new_rtx = temp;
4872 new_rtx = gen_const_mem (Pmode, new_rtx);
4873 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
4874 temp = gen_reg_rtx (Pmode);
4875 emit_insn (gen_rtx_SET (temp, new_rtx));
4878 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
4879 if (reg != 0)
4881 s390_load_address (reg, new_rtx);
4882 new_rtx = reg;
4884 break;
4886 case TLS_MODEL_LOCAL_EXEC:
4887 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
4888 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4889 new_rtx = force_const_mem (Pmode, new_rtx);
4890 temp = gen_reg_rtx (Pmode);
4891 emit_move_insn (temp, new_rtx);
4893 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
4894 if (reg != 0)
4896 s390_load_address (reg, new_rtx);
4897 new_rtx = reg;
4899 break;
4901 default:
4902 gcc_unreachable ();
4905 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == UNSPEC)
4907 switch (XINT (XEXP (addr, 0), 1))
4909 case UNSPEC_INDNTPOFF:
4910 gcc_assert (TARGET_CPU_ZARCH);
4911 new_rtx = addr;
4912 break;
4914 default:
4915 gcc_unreachable ();
4919 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
4920 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
4922 new_rtx = XEXP (XEXP (addr, 0), 0);
4923 if (GET_CODE (new_rtx) != SYMBOL_REF)
4924 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4926 new_rtx = legitimize_tls_address (new_rtx, reg);
4927 new_rtx = plus_constant (Pmode, new_rtx,
4928 INTVAL (XEXP (XEXP (addr, 0), 1)));
4929 new_rtx = force_operand (new_rtx, 0);
4932 else
4933 gcc_unreachable (); /* for now ... */
4935 return new_rtx;
4938 /* Emit insns making the address in operands[1] valid for a standard
4939 move to operands[0]. operands[1] is replaced by an address which
4940 should be used instead of the former RTX to emit the move
4941 pattern. */
4943 void
4944 emit_symbolic_move (rtx *operands)
4946 rtx temp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (Pmode);
4948 if (GET_CODE (operands[0]) == MEM)
4949 operands[1] = force_reg (Pmode, operands[1]);
4950 else if (TLS_SYMBOLIC_CONST (operands[1]))
4951 operands[1] = legitimize_tls_address (operands[1], temp);
4952 else if (flag_pic)
4953 operands[1] = legitimize_pic_address (operands[1], temp);
4956 /* Try machine-dependent ways of modifying an illegitimate address X
4957 to be legitimate. If we find one, return the new, valid address.
4959 OLDX is the address as it was before break_out_memory_refs was called.
4960 In some cases it is useful to look at this to decide what needs to be done.
4962 MODE is the mode of the operand pointed to by X.
4964 When -fpic is used, special handling is needed for symbolic references.
4965 See comments by legitimize_pic_address for details. */
4967 static rtx
4968 s390_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
4969 machine_mode mode ATTRIBUTE_UNUSED)
4971 rtx constant_term = const0_rtx;
4973 if (TLS_SYMBOLIC_CONST (x))
4975 x = legitimize_tls_address (x, 0);
4977 if (s390_legitimate_address_p (mode, x, FALSE))
4978 return x;
4980 else if (GET_CODE (x) == PLUS
4981 && (TLS_SYMBOLIC_CONST (XEXP (x, 0))
4982 || TLS_SYMBOLIC_CONST (XEXP (x, 1))))
4984 return x;
4986 else if (flag_pic)
4988 if (SYMBOLIC_CONST (x)
4989 || (GET_CODE (x) == PLUS
4990 && (SYMBOLIC_CONST (XEXP (x, 0))
4991 || SYMBOLIC_CONST (XEXP (x, 1)))))
4992 x = legitimize_pic_address (x, 0);
4994 if (s390_legitimate_address_p (mode, x, FALSE))
4995 return x;
4998 x = eliminate_constant_term (x, &constant_term);
5000 /* Optimize loading of large displacements by splitting them
5001 into the multiple of 4K and the rest; this allows the
5002 former to be CSE'd if possible.
5004 Don't do this if the displacement is added to a register
5005 pointing into the stack frame, as the offsets will
5006 change later anyway. */
5008 if (GET_CODE (constant_term) == CONST_INT
5009 && !TARGET_LONG_DISPLACEMENT
5010 && !DISP_IN_RANGE (INTVAL (constant_term))
5011 && !(REG_P (x) && REGNO_PTR_FRAME_P (REGNO (x))))
5013 HOST_WIDE_INT lower = INTVAL (constant_term) & 0xfff;
5014 HOST_WIDE_INT upper = INTVAL (constant_term) ^ lower;
5016 rtx temp = gen_reg_rtx (Pmode);
5017 rtx val = force_operand (GEN_INT (upper), temp);
5018 if (val != temp)
5019 emit_move_insn (temp, val);
5021 x = gen_rtx_PLUS (Pmode, x, temp);
5022 constant_term = GEN_INT (lower);
5025 if (GET_CODE (x) == PLUS)
5027 if (GET_CODE (XEXP (x, 0)) == REG)
5029 rtx temp = gen_reg_rtx (Pmode);
5030 rtx val = force_operand (XEXP (x, 1), temp);
5031 if (val != temp)
5032 emit_move_insn (temp, val);
5034 x = gen_rtx_PLUS (Pmode, XEXP (x, 0), temp);
5037 else if (GET_CODE (XEXP (x, 1)) == REG)
5039 rtx temp = gen_reg_rtx (Pmode);
5040 rtx val = force_operand (XEXP (x, 0), temp);
5041 if (val != temp)
5042 emit_move_insn (temp, val);
5044 x = gen_rtx_PLUS (Pmode, temp, XEXP (x, 1));
5048 if (constant_term != const0_rtx)
5049 x = gen_rtx_PLUS (Pmode, x, constant_term);
5051 return x;
5054 /* Try a machine-dependent way of reloading an illegitimate address AD
5055 operand. If we find one, push the reload and return the new address.
5057 MODE is the mode of the enclosing MEM. OPNUM is the operand number
5058 and TYPE is the reload type of the current reload. */
5061 legitimize_reload_address (rtx ad, machine_mode mode ATTRIBUTE_UNUSED,
5062 int opnum, int type)
5064 if (!optimize || TARGET_LONG_DISPLACEMENT)
5065 return NULL_RTX;
5067 if (GET_CODE (ad) == PLUS)
5069 rtx tem = simplify_binary_operation (PLUS, Pmode,
5070 XEXP (ad, 0), XEXP (ad, 1));
5071 if (tem)
5072 ad = tem;
5075 if (GET_CODE (ad) == PLUS
5076 && GET_CODE (XEXP (ad, 0)) == REG
5077 && GET_CODE (XEXP (ad, 1)) == CONST_INT
5078 && !DISP_IN_RANGE (INTVAL (XEXP (ad, 1))))
5080 HOST_WIDE_INT lower = INTVAL (XEXP (ad, 1)) & 0xfff;
5081 HOST_WIDE_INT upper = INTVAL (XEXP (ad, 1)) ^ lower;
5082 rtx cst, tem, new_rtx;
5084 cst = GEN_INT (upper);
5085 if (!legitimate_reload_constant_p (cst))
5086 cst = force_const_mem (Pmode, cst);
5088 tem = gen_rtx_PLUS (Pmode, XEXP (ad, 0), cst);
5089 new_rtx = gen_rtx_PLUS (Pmode, tem, GEN_INT (lower));
5091 push_reload (XEXP (tem, 1), 0, &XEXP (tem, 1), 0,
5092 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
5093 opnum, (enum reload_type) type);
5094 return new_rtx;
5097 return NULL_RTX;
5100 /* Emit code to move LEN bytes from DST to SRC. */
5102 bool
5103 s390_expand_movmem (rtx dst, rtx src, rtx len)
5105 /* When tuning for z10 or higher we rely on the Glibc functions to
5106 do the right thing. Only for constant lengths below 64k we will
5107 generate inline code. */
5108 if (s390_tune >= PROCESSOR_2097_Z10
5109 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
5110 return false;
5112 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
5114 if (INTVAL (len) > 0)
5115 emit_insn (gen_movmem_short (dst, src, GEN_INT (INTVAL (len) - 1)));
5118 else if (TARGET_MVCLE)
5120 emit_insn (gen_movmem_long (dst, src, convert_to_mode (Pmode, len, 1)));
5123 else
5125 rtx dst_addr, src_addr, count, blocks, temp;
5126 rtx_code_label *loop_start_label = gen_label_rtx ();
5127 rtx_code_label *loop_end_label = gen_label_rtx ();
5128 rtx_code_label *end_label = gen_label_rtx ();
5129 machine_mode mode;
5131 mode = GET_MODE (len);
5132 if (mode == VOIDmode)
5133 mode = Pmode;
5135 dst_addr = gen_reg_rtx (Pmode);
5136 src_addr = gen_reg_rtx (Pmode);
5137 count = gen_reg_rtx (mode);
5138 blocks = gen_reg_rtx (mode);
5140 convert_move (count, len, 1);
5141 emit_cmp_and_jump_insns (count, const0_rtx,
5142 EQ, NULL_RTX, mode, 1, end_label);
5144 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
5145 emit_move_insn (src_addr, force_operand (XEXP (src, 0), NULL_RTX));
5146 dst = change_address (dst, VOIDmode, dst_addr);
5147 src = change_address (src, VOIDmode, src_addr);
5149 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
5150 OPTAB_DIRECT);
5151 if (temp != count)
5152 emit_move_insn (count, temp);
5154 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5155 OPTAB_DIRECT);
5156 if (temp != blocks)
5157 emit_move_insn (blocks, temp);
5159 emit_cmp_and_jump_insns (blocks, const0_rtx,
5160 EQ, NULL_RTX, mode, 1, loop_end_label);
5162 emit_label (loop_start_label);
5164 if (TARGET_Z10
5165 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 768))
5167 rtx prefetch;
5169 /* Issue a read prefetch for the +3 cache line. */
5170 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, src_addr, GEN_INT (768)),
5171 const0_rtx, const0_rtx);
5172 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5173 emit_insn (prefetch);
5175 /* Issue a write prefetch for the +3 cache line. */
5176 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (768)),
5177 const1_rtx, const0_rtx);
5178 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5179 emit_insn (prefetch);
5182 emit_insn (gen_movmem_short (dst, src, GEN_INT (255)));
5183 s390_load_address (dst_addr,
5184 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
5185 s390_load_address (src_addr,
5186 gen_rtx_PLUS (Pmode, src_addr, GEN_INT (256)));
5188 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5189 OPTAB_DIRECT);
5190 if (temp != blocks)
5191 emit_move_insn (blocks, temp);
5193 emit_cmp_and_jump_insns (blocks, const0_rtx,
5194 EQ, NULL_RTX, mode, 1, loop_end_label);
5196 emit_jump (loop_start_label);
5197 emit_label (loop_end_label);
5199 emit_insn (gen_movmem_short (dst, src,
5200 convert_to_mode (Pmode, count, 1)));
5201 emit_label (end_label);
5203 return true;
5206 /* Emit code to set LEN bytes at DST to VAL.
5207 Make use of clrmem if VAL is zero. */
5209 void
5210 s390_expand_setmem (rtx dst, rtx len, rtx val)
5212 if (GET_CODE (len) == CONST_INT && INTVAL (len) == 0)
5213 return;
5215 gcc_assert (GET_CODE (val) == CONST_INT || GET_MODE (val) == QImode);
5217 if (GET_CODE (len) == CONST_INT && INTVAL (len) > 0 && INTVAL (len) <= 257)
5219 if (val == const0_rtx && INTVAL (len) <= 256)
5220 emit_insn (gen_clrmem_short (dst, GEN_INT (INTVAL (len) - 1)));
5221 else
5223 /* Initialize memory by storing the first byte. */
5224 emit_move_insn (adjust_address (dst, QImode, 0), val);
5226 if (INTVAL (len) > 1)
5228 /* Initiate 1 byte overlap move.
5229 The first byte of DST is propagated through DSTP1.
5230 Prepare a movmem for: DST+1 = DST (length = LEN - 1).
5231 DST is set to size 1 so the rest of the memory location
5232 does not count as source operand. */
5233 rtx dstp1 = adjust_address (dst, VOIDmode, 1);
5234 set_mem_size (dst, 1);
5236 emit_insn (gen_movmem_short (dstp1, dst,
5237 GEN_INT (INTVAL (len) - 2)));
5242 else if (TARGET_MVCLE)
5244 val = force_not_mem (convert_modes (Pmode, QImode, val, 1));
5245 if (TARGET_64BIT)
5246 emit_insn (gen_setmem_long_di (dst, convert_to_mode (Pmode, len, 1),
5247 val));
5248 else
5249 emit_insn (gen_setmem_long_si (dst, convert_to_mode (Pmode, len, 1),
5250 val));
5253 else
5255 rtx dst_addr, count, blocks, temp, dstp1 = NULL_RTX;
5256 rtx_code_label *loop_start_label = gen_label_rtx ();
5257 rtx_code_label *loop_end_label = gen_label_rtx ();
5258 rtx_code_label *end_label = gen_label_rtx ();
5259 machine_mode mode;
5261 mode = GET_MODE (len);
5262 if (mode == VOIDmode)
5263 mode = Pmode;
5265 dst_addr = gen_reg_rtx (Pmode);
5266 count = gen_reg_rtx (mode);
5267 blocks = gen_reg_rtx (mode);
5269 convert_move (count, len, 1);
5270 emit_cmp_and_jump_insns (count, const0_rtx,
5271 EQ, NULL_RTX, mode, 1, end_label);
5273 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
5274 dst = change_address (dst, VOIDmode, dst_addr);
5276 if (val == const0_rtx)
5277 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
5278 OPTAB_DIRECT);
5279 else
5281 dstp1 = adjust_address (dst, VOIDmode, 1);
5282 set_mem_size (dst, 1);
5284 /* Initialize memory by storing the first byte. */
5285 emit_move_insn (adjust_address (dst, QImode, 0), val);
5287 /* If count is 1 we are done. */
5288 emit_cmp_and_jump_insns (count, const1_rtx,
5289 EQ, NULL_RTX, mode, 1, end_label);
5291 temp = expand_binop (mode, add_optab, count, GEN_INT (-2), count, 1,
5292 OPTAB_DIRECT);
5294 if (temp != count)
5295 emit_move_insn (count, temp);
5297 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5298 OPTAB_DIRECT);
5299 if (temp != blocks)
5300 emit_move_insn (blocks, temp);
5302 emit_cmp_and_jump_insns (blocks, const0_rtx,
5303 EQ, NULL_RTX, mode, 1, loop_end_label);
5305 emit_label (loop_start_label);
5307 if (TARGET_Z10
5308 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 1024))
5310 /* Issue a write prefetch for the +4 cache line. */
5311 rtx prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr,
5312 GEN_INT (1024)),
5313 const1_rtx, const0_rtx);
5314 emit_insn (prefetch);
5315 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5318 if (val == const0_rtx)
5319 emit_insn (gen_clrmem_short (dst, GEN_INT (255)));
5320 else
5321 emit_insn (gen_movmem_short (dstp1, dst, GEN_INT (255)));
5322 s390_load_address (dst_addr,
5323 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
5325 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5326 OPTAB_DIRECT);
5327 if (temp != blocks)
5328 emit_move_insn (blocks, temp);
5330 emit_cmp_and_jump_insns (blocks, const0_rtx,
5331 EQ, NULL_RTX, mode, 1, loop_end_label);
5333 emit_jump (loop_start_label);
5334 emit_label (loop_end_label);
5336 if (val == const0_rtx)
5337 emit_insn (gen_clrmem_short (dst, convert_to_mode (Pmode, count, 1)));
5338 else
5339 emit_insn (gen_movmem_short (dstp1, dst, convert_to_mode (Pmode, count, 1)));
5340 emit_label (end_label);
5344 /* Emit code to compare LEN bytes at OP0 with those at OP1,
5345 and return the result in TARGET. */
5347 bool
5348 s390_expand_cmpmem (rtx target, rtx op0, rtx op1, rtx len)
5350 rtx ccreg = gen_rtx_REG (CCUmode, CC_REGNUM);
5351 rtx tmp;
5353 /* When tuning for z10 or higher we rely on the Glibc functions to
5354 do the right thing. Only for constant lengths below 64k we will
5355 generate inline code. */
5356 if (s390_tune >= PROCESSOR_2097_Z10
5357 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
5358 return false;
5360 /* As the result of CMPINT is inverted compared to what we need,
5361 we have to swap the operands. */
5362 tmp = op0; op0 = op1; op1 = tmp;
5364 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
5366 if (INTVAL (len) > 0)
5368 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (INTVAL (len) - 1)));
5369 emit_insn (gen_cmpint (target, ccreg));
5371 else
5372 emit_move_insn (target, const0_rtx);
5374 else if (TARGET_MVCLE)
5376 emit_insn (gen_cmpmem_long (op0, op1, convert_to_mode (Pmode, len, 1)));
5377 emit_insn (gen_cmpint (target, ccreg));
5379 else
5381 rtx addr0, addr1, count, blocks, temp;
5382 rtx_code_label *loop_start_label = gen_label_rtx ();
5383 rtx_code_label *loop_end_label = gen_label_rtx ();
5384 rtx_code_label *end_label = gen_label_rtx ();
5385 machine_mode mode;
5387 mode = GET_MODE (len);
5388 if (mode == VOIDmode)
5389 mode = Pmode;
5391 addr0 = gen_reg_rtx (Pmode);
5392 addr1 = gen_reg_rtx (Pmode);
5393 count = gen_reg_rtx (mode);
5394 blocks = gen_reg_rtx (mode);
5396 convert_move (count, len, 1);
5397 emit_cmp_and_jump_insns (count, const0_rtx,
5398 EQ, NULL_RTX, mode, 1, end_label);
5400 emit_move_insn (addr0, force_operand (XEXP (op0, 0), NULL_RTX));
5401 emit_move_insn (addr1, force_operand (XEXP (op1, 0), NULL_RTX));
5402 op0 = change_address (op0, VOIDmode, addr0);
5403 op1 = change_address (op1, VOIDmode, addr1);
5405 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
5406 OPTAB_DIRECT);
5407 if (temp != count)
5408 emit_move_insn (count, temp);
5410 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5411 OPTAB_DIRECT);
5412 if (temp != blocks)
5413 emit_move_insn (blocks, temp);
5415 emit_cmp_and_jump_insns (blocks, const0_rtx,
5416 EQ, NULL_RTX, mode, 1, loop_end_label);
5418 emit_label (loop_start_label);
5420 if (TARGET_Z10
5421 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 512))
5423 rtx prefetch;
5425 /* Issue a read prefetch for the +2 cache line of operand 1. */
5426 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr0, GEN_INT (512)),
5427 const0_rtx, const0_rtx);
5428 emit_insn (prefetch);
5429 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5431 /* Issue a read prefetch for the +2 cache line of operand 2. */
5432 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr1, GEN_INT (512)),
5433 const0_rtx, const0_rtx);
5434 emit_insn (prefetch);
5435 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5438 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (255)));
5439 temp = gen_rtx_NE (VOIDmode, ccreg, const0_rtx);
5440 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
5441 gen_rtx_LABEL_REF (VOIDmode, end_label), pc_rtx);
5442 temp = gen_rtx_SET (pc_rtx, temp);
5443 emit_jump_insn (temp);
5445 s390_load_address (addr0,
5446 gen_rtx_PLUS (Pmode, addr0, GEN_INT (256)));
5447 s390_load_address (addr1,
5448 gen_rtx_PLUS (Pmode, addr1, GEN_INT (256)));
5450 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5451 OPTAB_DIRECT);
5452 if (temp != blocks)
5453 emit_move_insn (blocks, temp);
5455 emit_cmp_and_jump_insns (blocks, const0_rtx,
5456 EQ, NULL_RTX, mode, 1, loop_end_label);
5458 emit_jump (loop_start_label);
5459 emit_label (loop_end_label);
5461 emit_insn (gen_cmpmem_short (op0, op1,
5462 convert_to_mode (Pmode, count, 1)));
5463 emit_label (end_label);
5465 emit_insn (gen_cmpint (target, ccreg));
5467 return true;
5470 /* Emit a conditional jump to LABEL for condition code mask MASK using
5471 comparsion operator COMPARISON. Return the emitted jump insn. */
5473 static rtx
5474 s390_emit_ccraw_jump (HOST_WIDE_INT mask, enum rtx_code comparison, rtx label)
5476 rtx temp;
5478 gcc_assert (comparison == EQ || comparison == NE);
5479 gcc_assert (mask > 0 && mask < 15);
5481 temp = gen_rtx_fmt_ee (comparison, VOIDmode,
5482 gen_rtx_REG (CCRAWmode, CC_REGNUM), GEN_INT (mask));
5483 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
5484 gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx);
5485 temp = gen_rtx_SET (pc_rtx, temp);
5486 return emit_jump_insn (temp);
5489 /* Emit the instructions to implement strlen of STRING and store the
5490 result in TARGET. The string has the known ALIGNMENT. This
5491 version uses vector instructions and is therefore not appropriate
5492 for targets prior to z13. */
5494 void
5495 s390_expand_vec_strlen (rtx target, rtx string, rtx alignment)
5497 int very_unlikely = REG_BR_PROB_BASE / 100 - 1;
5498 int very_likely = REG_BR_PROB_BASE - 1;
5499 rtx highest_index_to_load_reg = gen_reg_rtx (Pmode);
5500 rtx str_reg = gen_reg_rtx (V16QImode);
5501 rtx str_addr_base_reg = gen_reg_rtx (Pmode);
5502 rtx str_idx_reg = gen_reg_rtx (Pmode);
5503 rtx result_reg = gen_reg_rtx (V16QImode);
5504 rtx is_aligned_label = gen_label_rtx ();
5505 rtx into_loop_label = NULL_RTX;
5506 rtx loop_start_label = gen_label_rtx ();
5507 rtx temp;
5508 rtx len = gen_reg_rtx (QImode);
5509 rtx cond;
5511 s390_load_address (str_addr_base_reg, XEXP (string, 0));
5512 emit_move_insn (str_idx_reg, const0_rtx);
5514 if (INTVAL (alignment) < 16)
5516 /* Check whether the address happens to be aligned properly so
5517 jump directly to the aligned loop. */
5518 emit_cmp_and_jump_insns (gen_rtx_AND (Pmode,
5519 str_addr_base_reg, GEN_INT (15)),
5520 const0_rtx, EQ, NULL_RTX,
5521 Pmode, 1, is_aligned_label);
5523 temp = gen_reg_rtx (Pmode);
5524 temp = expand_binop (Pmode, and_optab, str_addr_base_reg,
5525 GEN_INT (15), temp, 1, OPTAB_DIRECT);
5526 gcc_assert (REG_P (temp));
5527 highest_index_to_load_reg =
5528 expand_binop (Pmode, sub_optab, GEN_INT (15), temp,
5529 highest_index_to_load_reg, 1, OPTAB_DIRECT);
5530 gcc_assert (REG_P (highest_index_to_load_reg));
5531 emit_insn (gen_vllv16qi (str_reg,
5532 convert_to_mode (SImode, highest_index_to_load_reg, 1),
5533 gen_rtx_MEM (BLKmode, str_addr_base_reg)));
5535 into_loop_label = gen_label_rtx ();
5536 s390_emit_jump (into_loop_label, NULL_RTX);
5537 emit_barrier ();
5540 emit_label (is_aligned_label);
5541 LABEL_NUSES (is_aligned_label) = INTVAL (alignment) < 16 ? 2 : 1;
5543 /* Reaching this point we are only performing 16 bytes aligned
5544 loads. */
5545 emit_move_insn (highest_index_to_load_reg, GEN_INT (15));
5547 emit_label (loop_start_label);
5548 LABEL_NUSES (loop_start_label) = 1;
5550 /* Load 16 bytes of the string into VR. */
5551 emit_move_insn (str_reg,
5552 gen_rtx_MEM (V16QImode,
5553 gen_rtx_PLUS (Pmode, str_idx_reg,
5554 str_addr_base_reg)));
5555 if (into_loop_label != NULL_RTX)
5557 emit_label (into_loop_label);
5558 LABEL_NUSES (into_loop_label) = 1;
5561 /* Increment string index by 16 bytes. */
5562 expand_binop (Pmode, add_optab, str_idx_reg, GEN_INT (16),
5563 str_idx_reg, 1, OPTAB_DIRECT);
5565 emit_insn (gen_vec_vfenesv16qi (result_reg, str_reg, str_reg,
5566 GEN_INT (VSTRING_FLAG_ZS | VSTRING_FLAG_CS)));
5568 add_int_reg_note (s390_emit_ccraw_jump (8, NE, loop_start_label),
5569 REG_BR_PROB, very_likely);
5570 emit_insn (gen_vec_extractv16qi (len, result_reg, GEN_INT (7)));
5572 /* If the string pointer wasn't aligned we have loaded less then 16
5573 bytes and the remaining bytes got filled with zeros (by vll).
5574 Now we have to check whether the resulting index lies within the
5575 bytes actually part of the string. */
5577 cond = s390_emit_compare (GT, convert_to_mode (Pmode, len, 1),
5578 highest_index_to_load_reg);
5579 s390_load_address (highest_index_to_load_reg,
5580 gen_rtx_PLUS (Pmode, highest_index_to_load_reg,
5581 const1_rtx));
5582 if (TARGET_64BIT)
5583 emit_insn (gen_movdicc (str_idx_reg, cond,
5584 highest_index_to_load_reg, str_idx_reg));
5585 else
5586 emit_insn (gen_movsicc (str_idx_reg, cond,
5587 highest_index_to_load_reg, str_idx_reg));
5589 add_int_reg_note (s390_emit_jump (is_aligned_label, cond), REG_BR_PROB,
5590 very_unlikely);
5592 expand_binop (Pmode, add_optab, str_idx_reg,
5593 GEN_INT (-16), str_idx_reg, 1, OPTAB_DIRECT);
5594 /* FIXME: len is already zero extended - so avoid the llgcr emitted
5595 here. */
5596 temp = expand_binop (Pmode, add_optab, str_idx_reg,
5597 convert_to_mode (Pmode, len, 1),
5598 target, 1, OPTAB_DIRECT);
5599 if (temp != target)
5600 emit_move_insn (target, temp);
5603 /* Expand conditional increment or decrement using alc/slb instructions.
5604 Should generate code setting DST to either SRC or SRC + INCREMENT,
5605 depending on the result of the comparison CMP_OP0 CMP_CODE CMP_OP1.
5606 Returns true if successful, false otherwise.
5608 That makes it possible to implement some if-constructs without jumps e.g.:
5609 (borrow = CC0 | CC1 and carry = CC2 | CC3)
5610 unsigned int a, b, c;
5611 if (a < b) c++; -> CCU b > a -> CC2; c += carry;
5612 if (a < b) c--; -> CCL3 a - b -> borrow; c -= borrow;
5613 if (a <= b) c++; -> CCL3 b - a -> borrow; c += carry;
5614 if (a <= b) c--; -> CCU a <= b -> borrow; c -= borrow;
5616 Checks for EQ and NE with a nonzero value need an additional xor e.g.:
5617 if (a == b) c++; -> CCL3 a ^= b; 0 - a -> borrow; c += carry;
5618 if (a == b) c--; -> CCU a ^= b; a <= 0 -> CC0 | CC1; c -= borrow;
5619 if (a != b) c++; -> CCU a ^= b; a > 0 -> CC2; c += carry;
5620 if (a != b) c--; -> CCL3 a ^= b; 0 - a -> borrow; c -= borrow; */
5622 bool
5623 s390_expand_addcc (enum rtx_code cmp_code, rtx cmp_op0, rtx cmp_op1,
5624 rtx dst, rtx src, rtx increment)
5626 machine_mode cmp_mode;
5627 machine_mode cc_mode;
5628 rtx op_res;
5629 rtx insn;
5630 rtvec p;
5631 int ret;
5633 if ((GET_MODE (cmp_op0) == SImode || GET_MODE (cmp_op0) == VOIDmode)
5634 && (GET_MODE (cmp_op1) == SImode || GET_MODE (cmp_op1) == VOIDmode))
5635 cmp_mode = SImode;
5636 else if ((GET_MODE (cmp_op0) == DImode || GET_MODE (cmp_op0) == VOIDmode)
5637 && (GET_MODE (cmp_op1) == DImode || GET_MODE (cmp_op1) == VOIDmode))
5638 cmp_mode = DImode;
5639 else
5640 return false;
5642 /* Try ADD LOGICAL WITH CARRY. */
5643 if (increment == const1_rtx)
5645 /* Determine CC mode to use. */
5646 if (cmp_code == EQ || cmp_code == NE)
5648 if (cmp_op1 != const0_rtx)
5650 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
5651 NULL_RTX, 0, OPTAB_WIDEN);
5652 cmp_op1 = const0_rtx;
5655 cmp_code = cmp_code == EQ ? LEU : GTU;
5658 if (cmp_code == LTU || cmp_code == LEU)
5660 rtx tem = cmp_op0;
5661 cmp_op0 = cmp_op1;
5662 cmp_op1 = tem;
5663 cmp_code = swap_condition (cmp_code);
5666 switch (cmp_code)
5668 case GTU:
5669 cc_mode = CCUmode;
5670 break;
5672 case GEU:
5673 cc_mode = CCL3mode;
5674 break;
5676 default:
5677 return false;
5680 /* Emit comparison instruction pattern. */
5681 if (!register_operand (cmp_op0, cmp_mode))
5682 cmp_op0 = force_reg (cmp_mode, cmp_op0);
5684 insn = gen_rtx_SET (gen_rtx_REG (cc_mode, CC_REGNUM),
5685 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
5686 /* We use insn_invalid_p here to add clobbers if required. */
5687 ret = insn_invalid_p (emit_insn (insn), false);
5688 gcc_assert (!ret);
5690 /* Emit ALC instruction pattern. */
5691 op_res = gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
5692 gen_rtx_REG (cc_mode, CC_REGNUM),
5693 const0_rtx);
5695 if (src != const0_rtx)
5697 if (!register_operand (src, GET_MODE (dst)))
5698 src = force_reg (GET_MODE (dst), src);
5700 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, src);
5701 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, const0_rtx);
5704 p = rtvec_alloc (2);
5705 RTVEC_ELT (p, 0) =
5706 gen_rtx_SET (dst, op_res);
5707 RTVEC_ELT (p, 1) =
5708 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
5709 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
5711 return true;
5714 /* Try SUBTRACT LOGICAL WITH BORROW. */
5715 if (increment == constm1_rtx)
5717 /* Determine CC mode to use. */
5718 if (cmp_code == EQ || cmp_code == NE)
5720 if (cmp_op1 != const0_rtx)
5722 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
5723 NULL_RTX, 0, OPTAB_WIDEN);
5724 cmp_op1 = const0_rtx;
5727 cmp_code = cmp_code == EQ ? LEU : GTU;
5730 if (cmp_code == GTU || cmp_code == GEU)
5732 rtx tem = cmp_op0;
5733 cmp_op0 = cmp_op1;
5734 cmp_op1 = tem;
5735 cmp_code = swap_condition (cmp_code);
5738 switch (cmp_code)
5740 case LEU:
5741 cc_mode = CCUmode;
5742 break;
5744 case LTU:
5745 cc_mode = CCL3mode;
5746 break;
5748 default:
5749 return false;
5752 /* Emit comparison instruction pattern. */
5753 if (!register_operand (cmp_op0, cmp_mode))
5754 cmp_op0 = force_reg (cmp_mode, cmp_op0);
5756 insn = gen_rtx_SET (gen_rtx_REG (cc_mode, CC_REGNUM),
5757 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
5758 /* We use insn_invalid_p here to add clobbers if required. */
5759 ret = insn_invalid_p (emit_insn (insn), false);
5760 gcc_assert (!ret);
5762 /* Emit SLB instruction pattern. */
5763 if (!register_operand (src, GET_MODE (dst)))
5764 src = force_reg (GET_MODE (dst), src);
5766 op_res = gen_rtx_MINUS (GET_MODE (dst),
5767 gen_rtx_MINUS (GET_MODE (dst), src, const0_rtx),
5768 gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
5769 gen_rtx_REG (cc_mode, CC_REGNUM),
5770 const0_rtx));
5771 p = rtvec_alloc (2);
5772 RTVEC_ELT (p, 0) =
5773 gen_rtx_SET (dst, op_res);
5774 RTVEC_ELT (p, 1) =
5775 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
5776 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
5778 return true;
5781 return false;
5784 /* Expand code for the insv template. Return true if successful. */
5786 bool
5787 s390_expand_insv (rtx dest, rtx op1, rtx op2, rtx src)
5789 int bitsize = INTVAL (op1);
5790 int bitpos = INTVAL (op2);
5791 machine_mode mode = GET_MODE (dest);
5792 machine_mode smode;
5793 int smode_bsize, mode_bsize;
5794 rtx op, clobber;
5796 if (bitsize + bitpos > GET_MODE_BITSIZE (mode))
5797 return false;
5799 /* Generate INSERT IMMEDIATE (IILL et al). */
5800 /* (set (ze (reg)) (const_int)). */
5801 if (TARGET_ZARCH
5802 && register_operand (dest, word_mode)
5803 && (bitpos % 16) == 0
5804 && (bitsize % 16) == 0
5805 && const_int_operand (src, VOIDmode))
5807 HOST_WIDE_INT val = INTVAL (src);
5808 int regpos = bitpos + bitsize;
5810 while (regpos > bitpos)
5812 machine_mode putmode;
5813 int putsize;
5815 if (TARGET_EXTIMM && (regpos % 32 == 0) && (regpos >= bitpos + 32))
5816 putmode = SImode;
5817 else
5818 putmode = HImode;
5820 putsize = GET_MODE_BITSIZE (putmode);
5821 regpos -= putsize;
5822 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
5823 GEN_INT (putsize),
5824 GEN_INT (regpos)),
5825 gen_int_mode (val, putmode));
5826 val >>= putsize;
5828 gcc_assert (regpos == bitpos);
5829 return true;
5832 smode = smallest_mode_for_size (bitsize, MODE_INT);
5833 smode_bsize = GET_MODE_BITSIZE (smode);
5834 mode_bsize = GET_MODE_BITSIZE (mode);
5836 /* Generate STORE CHARACTERS UNDER MASK (STCM et al). */
5837 if (bitpos == 0
5838 && (bitsize % BITS_PER_UNIT) == 0
5839 && MEM_P (dest)
5840 && (register_operand (src, word_mode)
5841 || const_int_operand (src, VOIDmode)))
5843 /* Emit standard pattern if possible. */
5844 if (smode_bsize == bitsize)
5846 emit_move_insn (adjust_address (dest, smode, 0),
5847 gen_lowpart (smode, src));
5848 return true;
5851 /* (set (ze (mem)) (const_int)). */
5852 else if (const_int_operand (src, VOIDmode))
5854 int size = bitsize / BITS_PER_UNIT;
5855 rtx src_mem = adjust_address (force_const_mem (word_mode, src),
5856 BLKmode,
5857 UNITS_PER_WORD - size);
5859 dest = adjust_address (dest, BLKmode, 0);
5860 set_mem_size (dest, size);
5861 s390_expand_movmem (dest, src_mem, GEN_INT (size));
5862 return true;
5865 /* (set (ze (mem)) (reg)). */
5866 else if (register_operand (src, word_mode))
5868 if (bitsize <= 32)
5869 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, op1,
5870 const0_rtx), src);
5871 else
5873 /* Emit st,stcmh sequence. */
5874 int stcmh_width = bitsize - 32;
5875 int size = stcmh_width / BITS_PER_UNIT;
5877 emit_move_insn (adjust_address (dest, SImode, size),
5878 gen_lowpart (SImode, src));
5879 set_mem_size (dest, size);
5880 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
5881 GEN_INT (stcmh_width),
5882 const0_rtx),
5883 gen_rtx_LSHIFTRT (word_mode, src, GEN_INT (32)));
5885 return true;
5889 /* Generate INSERT CHARACTERS UNDER MASK (IC, ICM et al). */
5890 if ((bitpos % BITS_PER_UNIT) == 0
5891 && (bitsize % BITS_PER_UNIT) == 0
5892 && (bitpos & 32) == ((bitpos + bitsize - 1) & 32)
5893 && MEM_P (src)
5894 && (mode == DImode || mode == SImode)
5895 && register_operand (dest, mode))
5897 /* Emit a strict_low_part pattern if possible. */
5898 if (smode_bsize == bitsize && bitpos == mode_bsize - smode_bsize)
5900 op = gen_rtx_STRICT_LOW_PART (VOIDmode, gen_lowpart (smode, dest));
5901 op = gen_rtx_SET (op, gen_lowpart (smode, src));
5902 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
5903 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber)));
5904 return true;
5907 /* ??? There are more powerful versions of ICM that are not
5908 completely represented in the md file. */
5911 /* For z10, generate ROTATE THEN INSERT SELECTED BITS (RISBG et al). */
5912 if (TARGET_Z10 && (mode == DImode || mode == SImode))
5914 machine_mode mode_s = GET_MODE (src);
5916 if (mode_s == VOIDmode)
5918 /* For constant zero values the representation with AND
5919 appears to be folded in more situations than the (set
5920 (zero_extract) ...).
5921 We only do this when the start and end of the bitfield
5922 remain in the same SImode chunk. That way nihf or nilf
5923 can be used.
5924 The AND patterns might still generate a risbg for this. */
5925 if (src == const0_rtx && bitpos / 32 == (bitpos + bitsize - 1) / 32)
5926 return false;
5927 else
5928 src = force_reg (mode, src);
5930 else if (mode_s != mode)
5932 gcc_assert (GET_MODE_BITSIZE (mode_s) >= bitsize);
5933 src = force_reg (mode_s, src);
5934 src = gen_lowpart (mode, src);
5937 op = gen_rtx_ZERO_EXTRACT (mode, dest, op1, op2),
5938 op = gen_rtx_SET (op, src);
5940 if (!TARGET_ZEC12)
5942 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
5943 op = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber));
5945 emit_insn (op);
5947 return true;
5950 return false;
5953 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic which returns a
5954 register that holds VAL of mode MODE shifted by COUNT bits. */
5956 static inline rtx
5957 s390_expand_mask_and_shift (rtx val, machine_mode mode, rtx count)
5959 val = expand_simple_binop (SImode, AND, val, GEN_INT (GET_MODE_MASK (mode)),
5960 NULL_RTX, 1, OPTAB_DIRECT);
5961 return expand_simple_binop (SImode, ASHIFT, val, count,
5962 NULL_RTX, 1, OPTAB_DIRECT);
5965 /* Generate a vector comparison COND of CMP_OP1 and CMP_OP2 and store
5966 the result in TARGET. */
5968 void
5969 s390_expand_vec_compare (rtx target, enum rtx_code cond,
5970 rtx cmp_op1, rtx cmp_op2)
5972 machine_mode mode = GET_MODE (target);
5973 bool neg_p = false, swap_p = false;
5974 rtx tmp;
5976 if (GET_MODE (cmp_op1) == V2DFmode)
5978 switch (cond)
5980 /* NE a != b -> !(a == b) */
5981 case NE: cond = EQ; neg_p = true; break;
5982 /* UNGT a u> b -> !(b >= a) */
5983 case UNGT: cond = GE; neg_p = true; swap_p = true; break;
5984 /* UNGE a u>= b -> !(b > a) */
5985 case UNGE: cond = GT; neg_p = true; swap_p = true; break;
5986 /* LE: a <= b -> b >= a */
5987 case LE: cond = GE; swap_p = true; break;
5988 /* UNLE: a u<= b -> !(a > b) */
5989 case UNLE: cond = GT; neg_p = true; break;
5990 /* LT: a < b -> b > a */
5991 case LT: cond = GT; swap_p = true; break;
5992 /* UNLT: a u< b -> !(a >= b) */
5993 case UNLT: cond = GE; neg_p = true; break;
5994 case UNEQ:
5995 emit_insn (gen_vec_cmpuneqv2df (target, cmp_op1, cmp_op2));
5996 return;
5997 case LTGT:
5998 emit_insn (gen_vec_cmpltgtv2df (target, cmp_op1, cmp_op2));
5999 return;
6000 case ORDERED:
6001 emit_insn (gen_vec_orderedv2df (target, cmp_op1, cmp_op2));
6002 return;
6003 case UNORDERED:
6004 emit_insn (gen_vec_unorderedv2df (target, cmp_op1, cmp_op2));
6005 return;
6006 default: break;
6009 else
6011 switch (cond)
6013 /* NE: a != b -> !(a == b) */
6014 case NE: cond = EQ; neg_p = true; break;
6015 /* GE: a >= b -> !(b > a) */
6016 case GE: cond = GT; neg_p = true; swap_p = true; break;
6017 /* GEU: a >= b -> !(b > a) */
6018 case GEU: cond = GTU; neg_p = true; swap_p = true; break;
6019 /* LE: a <= b -> !(a > b) */
6020 case LE: cond = GT; neg_p = true; break;
6021 /* LEU: a <= b -> !(a > b) */
6022 case LEU: cond = GTU; neg_p = true; break;
6023 /* LT: a < b -> b > a */
6024 case LT: cond = GT; swap_p = true; break;
6025 /* LTU: a < b -> b > a */
6026 case LTU: cond = GTU; swap_p = true; break;
6027 default: break;
6031 if (swap_p)
6033 tmp = cmp_op1; cmp_op1 = cmp_op2; cmp_op2 = tmp;
6036 emit_insn (gen_rtx_SET (target, gen_rtx_fmt_ee (cond,
6037 mode,
6038 cmp_op1, cmp_op2)));
6039 if (neg_p)
6040 emit_insn (gen_rtx_SET (target, gen_rtx_NOT (mode, target)));
6043 /* Expand the comparison CODE of CMP1 and CMP2 and copy 1 or 0 into
6044 TARGET if either all (ALL_P is true) or any (ALL_P is false) of the
6045 elements in CMP1 and CMP2 fulfill the comparison. */
6046 void
6047 s390_expand_vec_compare_cc (rtx target, enum rtx_code code,
6048 rtx cmp1, rtx cmp2, bool all_p)
6050 enum rtx_code new_code = code;
6051 machine_mode cmp_mode, full_cmp_mode, scratch_mode;
6052 rtx tmp_reg = gen_reg_rtx (SImode);
6053 bool swap_p = false;
6055 if (GET_MODE_CLASS (GET_MODE (cmp1)) == MODE_VECTOR_INT)
6057 switch (code)
6059 case EQ: cmp_mode = CCVEQmode; break;
6060 case NE: cmp_mode = CCVEQmode; break;
6061 case GT: cmp_mode = CCVHmode; break;
6062 case GE: cmp_mode = CCVHmode; new_code = LE; swap_p = true; break;
6063 case LT: cmp_mode = CCVHmode; new_code = GT; swap_p = true; break;
6064 case LE: cmp_mode = CCVHmode; new_code = LE; break;
6065 case GTU: cmp_mode = CCVHUmode; break;
6066 case GEU: cmp_mode = CCVHUmode; new_code = LEU; swap_p = true; break;
6067 case LTU: cmp_mode = CCVHUmode; new_code = GTU; swap_p = true; break;
6068 case LEU: cmp_mode = CCVHUmode; new_code = LEU; break;
6069 default: gcc_unreachable ();
6071 scratch_mode = GET_MODE (cmp1);
6073 else if (GET_MODE (cmp1) == V2DFmode)
6075 switch (code)
6077 case EQ: cmp_mode = CCVEQmode; break;
6078 case NE: cmp_mode = CCVEQmode; break;
6079 case GT: cmp_mode = CCVFHmode; break;
6080 case GE: cmp_mode = CCVFHEmode; break;
6081 case UNLE: cmp_mode = CCVFHmode; break;
6082 case UNLT: cmp_mode = CCVFHEmode; break;
6083 case LT: cmp_mode = CCVFHmode; new_code = GT; swap_p = true; break;
6084 case LE: cmp_mode = CCVFHEmode; new_code = GE; swap_p = true; break;
6085 default: gcc_unreachable ();
6087 scratch_mode = V2DImode;
6089 else
6090 gcc_unreachable ();
6092 if (!all_p)
6093 switch (cmp_mode)
6095 case CCVEQmode: full_cmp_mode = CCVEQANYmode; break;
6096 case CCVHmode: full_cmp_mode = CCVHANYmode; break;
6097 case CCVHUmode: full_cmp_mode = CCVHUANYmode; break;
6098 case CCVFHmode: full_cmp_mode = CCVFHANYmode; break;
6099 case CCVFHEmode: full_cmp_mode = CCVFHEANYmode; break;
6100 default: gcc_unreachable ();
6102 else
6103 /* The modes without ANY match the ALL modes. */
6104 full_cmp_mode = cmp_mode;
6106 if (swap_p)
6108 rtx tmp = cmp2;
6109 cmp2 = cmp1;
6110 cmp1 = tmp;
6113 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6114 gen_rtvec (2, gen_rtx_SET (
6115 gen_rtx_REG (cmp_mode, CC_REGNUM),
6116 gen_rtx_COMPARE (cmp_mode, cmp1, cmp2)),
6117 gen_rtx_CLOBBER (VOIDmode,
6118 gen_rtx_SCRATCH (scratch_mode)))));
6119 emit_move_insn (target, const0_rtx);
6120 emit_move_insn (tmp_reg, const1_rtx);
6122 emit_move_insn (target,
6123 gen_rtx_IF_THEN_ELSE (SImode,
6124 gen_rtx_fmt_ee (new_code, VOIDmode,
6125 gen_rtx_REG (full_cmp_mode, CC_REGNUM),
6126 const0_rtx),
6127 target, tmp_reg));
6130 /* Generate a vector comparison expression loading either elements of
6131 THEN or ELS into TARGET depending on the comparison COND of CMP_OP1
6132 and CMP_OP2. */
6134 void
6135 s390_expand_vcond (rtx target, rtx then, rtx els,
6136 enum rtx_code cond, rtx cmp_op1, rtx cmp_op2)
6138 rtx tmp;
6139 machine_mode result_mode;
6140 rtx result_target;
6142 machine_mode target_mode = GET_MODE (target);
6143 machine_mode cmp_mode = GET_MODE (cmp_op1);
6144 rtx op = (cond == LT) ? els : then;
6146 /* Try to optimize x < 0 ? -1 : 0 into (signed) x >> 31
6147 and x < 0 ? 1 : 0 into (unsigned) x >> 31. Likewise
6148 for short and byte (x >> 15 and x >> 7 respectively). */
6149 if ((cond == LT || cond == GE)
6150 && target_mode == cmp_mode
6151 && cmp_op2 == CONST0_RTX (cmp_mode)
6152 && op == CONST0_RTX (target_mode)
6153 && s390_vector_mode_supported_p (target_mode)
6154 && GET_MODE_CLASS (target_mode) == MODE_VECTOR_INT)
6156 rtx negop = (cond == LT) ? then : els;
6158 int shift = GET_MODE_BITSIZE (GET_MODE_INNER (target_mode)) - 1;
6160 /* if x < 0 ? 1 : 0 or if x >= 0 ? 0 : 1 */
6161 if (negop == CONST1_RTX (target_mode))
6163 rtx res = expand_simple_binop (cmp_mode, LSHIFTRT, cmp_op1,
6164 GEN_INT (shift), target,
6165 1, OPTAB_DIRECT);
6166 if (res != target)
6167 emit_move_insn (target, res);
6168 return;
6171 /* if x < 0 ? -1 : 0 or if x >= 0 ? 0 : -1 */
6172 else if (all_ones_operand (negop, target_mode))
6174 rtx res = expand_simple_binop (cmp_mode, ASHIFTRT, cmp_op1,
6175 GEN_INT (shift), target,
6176 0, OPTAB_DIRECT);
6177 if (res != target)
6178 emit_move_insn (target, res);
6179 return;
6183 /* We always use an integral type vector to hold the comparison
6184 result. */
6185 result_mode = cmp_mode == V2DFmode ? V2DImode : cmp_mode;
6186 result_target = gen_reg_rtx (result_mode);
6188 /* We allow vector immediates as comparison operands that
6189 can be handled by the optimization above but not by the
6190 following code. Hence, force them into registers here. */
6191 if (!REG_P (cmp_op1))
6192 cmp_op1 = force_reg (target_mode, cmp_op1);
6194 if (!REG_P (cmp_op2))
6195 cmp_op2 = force_reg (target_mode, cmp_op2);
6197 s390_expand_vec_compare (result_target, cond,
6198 cmp_op1, cmp_op2);
6200 /* If the results are supposed to be either -1 or 0 we are done
6201 since this is what our compare instructions generate anyway. */
6202 if (all_ones_operand (then, GET_MODE (then))
6203 && const0_operand (els, GET_MODE (els)))
6205 emit_move_insn (target, gen_rtx_SUBREG (target_mode,
6206 result_target, 0));
6207 return;
6210 /* Otherwise we will do a vsel afterwards. */
6211 /* This gets triggered e.g.
6212 with gcc.c-torture/compile/pr53410-1.c */
6213 if (!REG_P (then))
6214 then = force_reg (target_mode, then);
6216 if (!REG_P (els))
6217 els = force_reg (target_mode, els);
6219 tmp = gen_rtx_fmt_ee (EQ, VOIDmode,
6220 result_target,
6221 CONST0_RTX (result_mode));
6223 /* We compared the result against zero above so we have to swap then
6224 and els here. */
6225 tmp = gen_rtx_IF_THEN_ELSE (target_mode, tmp, els, then);
6227 gcc_assert (target_mode == GET_MODE (then));
6228 emit_insn (gen_rtx_SET (target, tmp));
6231 /* Emit the RTX necessary to initialize the vector TARGET with values
6232 in VALS. */
6233 void
6234 s390_expand_vec_init (rtx target, rtx vals)
6236 machine_mode mode = GET_MODE (target);
6237 machine_mode inner_mode = GET_MODE_INNER (mode);
6238 int n_elts = GET_MODE_NUNITS (mode);
6239 bool all_same = true, all_regs = true, all_const_int = true;
6240 rtx x;
6241 int i;
6243 for (i = 0; i < n_elts; ++i)
6245 x = XVECEXP (vals, 0, i);
6247 if (!CONST_INT_P (x))
6248 all_const_int = false;
6250 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
6251 all_same = false;
6253 if (!REG_P (x))
6254 all_regs = false;
6257 /* Use vector gen mask or vector gen byte mask if possible. */
6258 if (all_same && all_const_int
6259 && (XVECEXP (vals, 0, 0) == const0_rtx
6260 || s390_contiguous_bitmask_vector_p (XVECEXP (vals, 0, 0),
6261 NULL, NULL)
6262 || s390_bytemask_vector_p (XVECEXP (vals, 0, 0), NULL)))
6264 emit_insn (gen_rtx_SET (target,
6265 gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0))));
6266 return;
6269 if (all_same)
6271 emit_insn (gen_rtx_SET (target,
6272 gen_rtx_VEC_DUPLICATE (mode,
6273 XVECEXP (vals, 0, 0))));
6274 return;
6277 if (all_regs && REG_P (target) && n_elts == 2 && inner_mode == DImode)
6279 /* Use vector load pair. */
6280 emit_insn (gen_rtx_SET (target,
6281 gen_rtx_VEC_CONCAT (mode,
6282 XVECEXP (vals, 0, 0),
6283 XVECEXP (vals, 0, 1))));
6284 return;
6287 /* We are about to set the vector elements one by one. Zero out the
6288 full register first in order to help the data flow framework to
6289 detect it as full VR set. */
6290 emit_insn (gen_rtx_SET (target, CONST0_RTX (mode)));
6292 /* Unfortunately the vec_init expander is not allowed to fail. So
6293 we have to implement the fallback ourselves. */
6294 for (i = 0; i < n_elts; i++)
6295 emit_insn (gen_rtx_SET (target,
6296 gen_rtx_UNSPEC (mode,
6297 gen_rtvec (3, XVECEXP (vals, 0, i),
6298 GEN_INT (i), target),
6299 UNSPEC_VEC_SET)));
6302 /* Structure to hold the initial parameters for a compare_and_swap operation
6303 in HImode and QImode. */
6305 struct alignment_context
6307 rtx memsi; /* SI aligned memory location. */
6308 rtx shift; /* Bit offset with regard to lsb. */
6309 rtx modemask; /* Mask of the HQImode shifted by SHIFT bits. */
6310 rtx modemaski; /* ~modemask */
6311 bool aligned; /* True if memory is aligned, false else. */
6314 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic to initialize
6315 structure AC for transparent simplifying, if the memory alignment is known
6316 to be at least 32bit. MEM is the memory location for the actual operation
6317 and MODE its mode. */
6319 static void
6320 init_alignment_context (struct alignment_context *ac, rtx mem,
6321 machine_mode mode)
6323 ac->shift = GEN_INT (GET_MODE_SIZE (SImode) - GET_MODE_SIZE (mode));
6324 ac->aligned = (MEM_ALIGN (mem) >= GET_MODE_BITSIZE (SImode));
6326 if (ac->aligned)
6327 ac->memsi = adjust_address (mem, SImode, 0); /* Memory is aligned. */
6328 else
6330 /* Alignment is unknown. */
6331 rtx byteoffset, addr, align;
6333 /* Force the address into a register. */
6334 addr = force_reg (Pmode, XEXP (mem, 0));
6336 /* Align it to SImode. */
6337 align = expand_simple_binop (Pmode, AND, addr,
6338 GEN_INT (-GET_MODE_SIZE (SImode)),
6339 NULL_RTX, 1, OPTAB_DIRECT);
6340 /* Generate MEM. */
6341 ac->memsi = gen_rtx_MEM (SImode, align);
6342 MEM_VOLATILE_P (ac->memsi) = MEM_VOLATILE_P (mem);
6343 set_mem_alias_set (ac->memsi, ALIAS_SET_MEMORY_BARRIER);
6344 set_mem_align (ac->memsi, GET_MODE_BITSIZE (SImode));
6346 /* Calculate shiftcount. */
6347 byteoffset = expand_simple_binop (Pmode, AND, addr,
6348 GEN_INT (GET_MODE_SIZE (SImode) - 1),
6349 NULL_RTX, 1, OPTAB_DIRECT);
6350 /* As we already have some offset, evaluate the remaining distance. */
6351 ac->shift = expand_simple_binop (SImode, MINUS, ac->shift, byteoffset,
6352 NULL_RTX, 1, OPTAB_DIRECT);
6355 /* Shift is the byte count, but we need the bitcount. */
6356 ac->shift = expand_simple_binop (SImode, ASHIFT, ac->shift, GEN_INT (3),
6357 NULL_RTX, 1, OPTAB_DIRECT);
6359 /* Calculate masks. */
6360 ac->modemask = expand_simple_binop (SImode, ASHIFT,
6361 GEN_INT (GET_MODE_MASK (mode)),
6362 ac->shift, NULL_RTX, 1, OPTAB_DIRECT);
6363 ac->modemaski = expand_simple_unop (SImode, NOT, ac->modemask,
6364 NULL_RTX, 1);
6367 /* A subroutine of s390_expand_cs_hqi. Insert INS into VAL. If possible,
6368 use a single insv insn into SEQ2. Otherwise, put prep insns in SEQ1 and
6369 perform the merge in SEQ2. */
6371 static rtx
6372 s390_two_part_insv (struct alignment_context *ac, rtx *seq1, rtx *seq2,
6373 machine_mode mode, rtx val, rtx ins)
6375 rtx tmp;
6377 if (ac->aligned)
6379 start_sequence ();
6380 tmp = copy_to_mode_reg (SImode, val);
6381 if (s390_expand_insv (tmp, GEN_INT (GET_MODE_BITSIZE (mode)),
6382 const0_rtx, ins))
6384 *seq1 = NULL;
6385 *seq2 = get_insns ();
6386 end_sequence ();
6387 return tmp;
6389 end_sequence ();
6392 /* Failed to use insv. Generate a two part shift and mask. */
6393 start_sequence ();
6394 tmp = s390_expand_mask_and_shift (ins, mode, ac->shift);
6395 *seq1 = get_insns ();
6396 end_sequence ();
6398 start_sequence ();
6399 tmp = expand_simple_binop (SImode, IOR, tmp, val, NULL_RTX, 1, OPTAB_DIRECT);
6400 *seq2 = get_insns ();
6401 end_sequence ();
6403 return tmp;
6406 /* Expand an atomic compare and swap operation for HImode and QImode. MEM is
6407 the memory location, CMP the old value to compare MEM with and NEW_RTX the
6408 value to set if CMP == MEM. */
6410 void
6411 s390_expand_cs_hqi (machine_mode mode, rtx btarget, rtx vtarget, rtx mem,
6412 rtx cmp, rtx new_rtx, bool is_weak)
6414 struct alignment_context ac;
6415 rtx cmpv, newv, val, cc, seq0, seq1, seq2, seq3;
6416 rtx res = gen_reg_rtx (SImode);
6417 rtx_code_label *csloop = NULL, *csend = NULL;
6419 gcc_assert (MEM_P (mem));
6421 init_alignment_context (&ac, mem, mode);
6423 /* Load full word. Subsequent loads are performed by CS. */
6424 val = expand_simple_binop (SImode, AND, ac.memsi, ac.modemaski,
6425 NULL_RTX, 1, OPTAB_DIRECT);
6427 /* Prepare insertions of cmp and new_rtx into the loaded value. When
6428 possible, we try to use insv to make this happen efficiently. If
6429 that fails we'll generate code both inside and outside the loop. */
6430 cmpv = s390_two_part_insv (&ac, &seq0, &seq2, mode, val, cmp);
6431 newv = s390_two_part_insv (&ac, &seq1, &seq3, mode, val, new_rtx);
6433 if (seq0)
6434 emit_insn (seq0);
6435 if (seq1)
6436 emit_insn (seq1);
6438 /* Start CS loop. */
6439 if (!is_weak)
6441 /* Begin assuming success. */
6442 emit_move_insn (btarget, const1_rtx);
6444 csloop = gen_label_rtx ();
6445 csend = gen_label_rtx ();
6446 emit_label (csloop);
6449 /* val = "<mem>00..0<mem>"
6450 * cmp = "00..0<cmp>00..0"
6451 * new = "00..0<new>00..0"
6454 emit_insn (seq2);
6455 emit_insn (seq3);
6457 cc = s390_emit_compare_and_swap (EQ, res, ac.memsi, cmpv, newv);
6458 if (is_weak)
6459 emit_insn (gen_cstorecc4 (btarget, cc, XEXP (cc, 0), XEXP (cc, 1)));
6460 else
6462 rtx tmp;
6464 /* Jump to end if we're done (likely?). */
6465 s390_emit_jump (csend, cc);
6467 /* Check for changes outside mode, and loop internal if so.
6468 Arrange the moves so that the compare is adjacent to the
6469 branch so that we can generate CRJ. */
6470 tmp = copy_to_reg (val);
6471 force_expand_binop (SImode, and_optab, res, ac.modemaski, val,
6472 1, OPTAB_DIRECT);
6473 cc = s390_emit_compare (NE, val, tmp);
6474 s390_emit_jump (csloop, cc);
6476 /* Failed. */
6477 emit_move_insn (btarget, const0_rtx);
6478 emit_label (csend);
6481 /* Return the correct part of the bitfield. */
6482 convert_move (vtarget, expand_simple_binop (SImode, LSHIFTRT, res, ac.shift,
6483 NULL_RTX, 1, OPTAB_DIRECT), 1);
6486 /* Expand an atomic operation CODE of mode MODE. MEM is the memory location
6487 and VAL the value to play with. If AFTER is true then store the value
6488 MEM holds after the operation, if AFTER is false then store the value MEM
6489 holds before the operation. If TARGET is zero then discard that value, else
6490 store it to TARGET. */
6492 void
6493 s390_expand_atomic (machine_mode mode, enum rtx_code code,
6494 rtx target, rtx mem, rtx val, bool after)
6496 struct alignment_context ac;
6497 rtx cmp;
6498 rtx new_rtx = gen_reg_rtx (SImode);
6499 rtx orig = gen_reg_rtx (SImode);
6500 rtx_code_label *csloop = gen_label_rtx ();
6502 gcc_assert (!target || register_operand (target, VOIDmode));
6503 gcc_assert (MEM_P (mem));
6505 init_alignment_context (&ac, mem, mode);
6507 /* Shift val to the correct bit positions.
6508 Preserve "icm", but prevent "ex icm". */
6509 if (!(ac.aligned && code == SET && MEM_P (val)))
6510 val = s390_expand_mask_and_shift (val, mode, ac.shift);
6512 /* Further preparation insns. */
6513 if (code == PLUS || code == MINUS)
6514 emit_move_insn (orig, val);
6515 else if (code == MULT || code == AND) /* val = "11..1<val>11..1" */
6516 val = expand_simple_binop (SImode, XOR, val, ac.modemaski,
6517 NULL_RTX, 1, OPTAB_DIRECT);
6519 /* Load full word. Subsequent loads are performed by CS. */
6520 cmp = force_reg (SImode, ac.memsi);
6522 /* Start CS loop. */
6523 emit_label (csloop);
6524 emit_move_insn (new_rtx, cmp);
6526 /* Patch new with val at correct position. */
6527 switch (code)
6529 case PLUS:
6530 case MINUS:
6531 val = expand_simple_binop (SImode, code, new_rtx, orig,
6532 NULL_RTX, 1, OPTAB_DIRECT);
6533 val = expand_simple_binop (SImode, AND, val, ac.modemask,
6534 NULL_RTX, 1, OPTAB_DIRECT);
6535 /* FALLTHRU */
6536 case SET:
6537 if (ac.aligned && MEM_P (val))
6538 store_bit_field (new_rtx, GET_MODE_BITSIZE (mode), 0,
6539 0, 0, SImode, val, false);
6540 else
6542 new_rtx = expand_simple_binop (SImode, AND, new_rtx, ac.modemaski,
6543 NULL_RTX, 1, OPTAB_DIRECT);
6544 new_rtx = expand_simple_binop (SImode, IOR, new_rtx, val,
6545 NULL_RTX, 1, OPTAB_DIRECT);
6547 break;
6548 case AND:
6549 case IOR:
6550 case XOR:
6551 new_rtx = expand_simple_binop (SImode, code, new_rtx, val,
6552 NULL_RTX, 1, OPTAB_DIRECT);
6553 break;
6554 case MULT: /* NAND */
6555 new_rtx = expand_simple_binop (SImode, AND, new_rtx, val,
6556 NULL_RTX, 1, OPTAB_DIRECT);
6557 new_rtx = expand_simple_binop (SImode, XOR, new_rtx, ac.modemask,
6558 NULL_RTX, 1, OPTAB_DIRECT);
6559 break;
6560 default:
6561 gcc_unreachable ();
6564 s390_emit_jump (csloop, s390_emit_compare_and_swap (NE, cmp,
6565 ac.memsi, cmp, new_rtx));
6567 /* Return the correct part of the bitfield. */
6568 if (target)
6569 convert_move (target, expand_simple_binop (SImode, LSHIFTRT,
6570 after ? new_rtx : cmp, ac.shift,
6571 NULL_RTX, 1, OPTAB_DIRECT), 1);
6574 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
6575 We need to emit DTP-relative relocations. */
6577 static void s390_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
6579 static void
6580 s390_output_dwarf_dtprel (FILE *file, int size, rtx x)
6582 switch (size)
6584 case 4:
6585 fputs ("\t.long\t", file);
6586 break;
6587 case 8:
6588 fputs ("\t.quad\t", file);
6589 break;
6590 default:
6591 gcc_unreachable ();
6593 output_addr_const (file, x);
6594 fputs ("@DTPOFF", file);
6597 /* Return the proper mode for REGNO being represented in the dwarf
6598 unwind table. */
6599 machine_mode
6600 s390_dwarf_frame_reg_mode (int regno)
6602 machine_mode save_mode = default_dwarf_frame_reg_mode (regno);
6604 /* Make sure not to return DImode for any GPR with -m31 -mzarch. */
6605 if (GENERAL_REGNO_P (regno))
6606 save_mode = Pmode;
6608 /* The rightmost 64 bits of vector registers are call-clobbered. */
6609 if (GET_MODE_SIZE (save_mode) > 8)
6610 save_mode = DImode;
6612 return save_mode;
6615 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
6616 /* Implement TARGET_MANGLE_TYPE. */
6618 static const char *
6619 s390_mangle_type (const_tree type)
6621 type = TYPE_MAIN_VARIANT (type);
6623 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
6624 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
6625 return NULL;
6627 if (type == s390_builtin_types[BT_BV16QI]) return "U6__boolc";
6628 if (type == s390_builtin_types[BT_BV8HI]) return "U6__bools";
6629 if (type == s390_builtin_types[BT_BV4SI]) return "U6__booli";
6630 if (type == s390_builtin_types[BT_BV2DI]) return "U6__booll";
6632 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
6633 && TARGET_LONG_DOUBLE_128)
6634 return "g";
6636 /* For all other types, use normal C++ mangling. */
6637 return NULL;
6639 #endif
6641 /* In the name of slightly smaller debug output, and to cater to
6642 general assembler lossage, recognize various UNSPEC sequences
6643 and turn them back into a direct symbol reference. */
6645 static rtx
6646 s390_delegitimize_address (rtx orig_x)
6648 rtx x, y;
6650 orig_x = delegitimize_mem_from_attrs (orig_x);
6651 x = orig_x;
6653 /* Extract the symbol ref from:
6654 (plus:SI (reg:SI 12 %r12)
6655 (const:SI (unspec:SI [(symbol_ref/f:SI ("*.LC0"))]
6656 UNSPEC_GOTOFF/PLTOFF)))
6658 (plus:SI (reg:SI 12 %r12)
6659 (const:SI (plus:SI (unspec:SI [(symbol_ref:SI ("L"))]
6660 UNSPEC_GOTOFF/PLTOFF)
6661 (const_int 4 [0x4])))) */
6662 if (GET_CODE (x) == PLUS
6663 && REG_P (XEXP (x, 0))
6664 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM
6665 && GET_CODE (XEXP (x, 1)) == CONST)
6667 HOST_WIDE_INT offset = 0;
6669 /* The const operand. */
6670 y = XEXP (XEXP (x, 1), 0);
6672 if (GET_CODE (y) == PLUS
6673 && GET_CODE (XEXP (y, 1)) == CONST_INT)
6675 offset = INTVAL (XEXP (y, 1));
6676 y = XEXP (y, 0);
6679 if (GET_CODE (y) == UNSPEC
6680 && (XINT (y, 1) == UNSPEC_GOTOFF
6681 || XINT (y, 1) == UNSPEC_PLTOFF))
6682 return plus_constant (Pmode, XVECEXP (y, 0, 0), offset);
6685 if (GET_CODE (x) != MEM)
6686 return orig_x;
6688 x = XEXP (x, 0);
6689 if (GET_CODE (x) == PLUS
6690 && GET_CODE (XEXP (x, 1)) == CONST
6691 && GET_CODE (XEXP (x, 0)) == REG
6692 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
6694 y = XEXP (XEXP (x, 1), 0);
6695 if (GET_CODE (y) == UNSPEC
6696 && XINT (y, 1) == UNSPEC_GOT)
6697 y = XVECEXP (y, 0, 0);
6698 else
6699 return orig_x;
6701 else if (GET_CODE (x) == CONST)
6703 /* Extract the symbol ref from:
6704 (mem:QI (const:DI (unspec:DI [(symbol_ref:DI ("foo"))]
6705 UNSPEC_PLT/GOTENT))) */
6707 y = XEXP (x, 0);
6708 if (GET_CODE (y) == UNSPEC
6709 && (XINT (y, 1) == UNSPEC_GOTENT
6710 || XINT (y, 1) == UNSPEC_PLT))
6711 y = XVECEXP (y, 0, 0);
6712 else
6713 return orig_x;
6715 else
6716 return orig_x;
6718 if (GET_MODE (orig_x) != Pmode)
6720 if (GET_MODE (orig_x) == BLKmode)
6721 return orig_x;
6722 y = lowpart_subreg (GET_MODE (orig_x), y, Pmode);
6723 if (y == NULL_RTX)
6724 return orig_x;
6726 return y;
6729 /* Output operand OP to stdio stream FILE.
6730 OP is an address (register + offset) which is not used to address data;
6731 instead the rightmost bits are interpreted as the value. */
6733 static void
6734 print_shift_count_operand (FILE *file, rtx op)
6736 HOST_WIDE_INT offset;
6737 rtx base;
6739 /* Extract base register and offset. */
6740 if (!s390_decompose_shift_count (op, &base, &offset))
6741 gcc_unreachable ();
6743 /* Sanity check. */
6744 if (base)
6746 gcc_assert (GET_CODE (base) == REG);
6747 gcc_assert (REGNO (base) < FIRST_PSEUDO_REGISTER);
6748 gcc_assert (REGNO_REG_CLASS (REGNO (base)) == ADDR_REGS);
6751 /* Offsets are constricted to twelve bits. */
6752 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset & ((1 << 12) - 1));
6753 if (base)
6754 fprintf (file, "(%s)", reg_names[REGNO (base)]);
6757 /* Assigns the number of NOP halfwords to be emitted before and after the
6758 function label to *HW_BEFORE and *HW_AFTER. Both pointers must not be NULL.
6759 If hotpatching is disabled for the function, the values are set to zero.
6762 static void
6763 s390_function_num_hotpatch_hw (tree decl,
6764 int *hw_before,
6765 int *hw_after)
6767 tree attr;
6769 attr = lookup_attribute ("hotpatch", DECL_ATTRIBUTES (decl));
6771 /* Handle the arguments of the hotpatch attribute. The values
6772 specified via attribute might override the cmdline argument
6773 values. */
6774 if (attr)
6776 tree args = TREE_VALUE (attr);
6778 *hw_before = TREE_INT_CST_LOW (TREE_VALUE (args));
6779 *hw_after = TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (args)));
6781 else
6783 /* Use the values specified by the cmdline arguments. */
6784 *hw_before = s390_hotpatch_hw_before_label;
6785 *hw_after = s390_hotpatch_hw_after_label;
6789 /* Write the current .machine and .machinemode specification to the assembler
6790 file. */
6792 #ifdef HAVE_AS_MACHINE_MACHINEMODE
6793 static void
6794 s390_asm_output_machine_for_arch (FILE *asm_out_file)
6796 fprintf (asm_out_file, "\t.machinemode %s\n",
6797 (TARGET_ZARCH) ? "zarch" : "esa");
6798 fprintf (asm_out_file, "\t.machine \"%s", processor_table[s390_arch].name);
6799 if (S390_USE_ARCHITECTURE_MODIFIERS)
6801 int cpu_flags;
6803 cpu_flags = processor_flags_table[(int) s390_arch];
6804 if (TARGET_HTM && !(cpu_flags & PF_TX))
6805 fprintf (asm_out_file, "+htm");
6806 else if (!TARGET_HTM && (cpu_flags & PF_TX))
6807 fprintf (asm_out_file, "+nohtm");
6808 if (TARGET_VX && !(cpu_flags & PF_VX))
6809 fprintf (asm_out_file, "+vx");
6810 else if (!TARGET_VX && (cpu_flags & PF_VX))
6811 fprintf (asm_out_file, "+novx");
6813 fprintf (asm_out_file, "\"\n");
6816 /* Write an extra function header before the very start of the function. */
6818 void
6819 s390_asm_output_function_prefix (FILE *asm_out_file,
6820 const char *fnname ATTRIBUTE_UNUSED)
6822 if (DECL_FUNCTION_SPECIFIC_TARGET (current_function_decl) == NULL)
6823 return;
6824 /* Since only the function specific options are saved but not the indications
6825 which options are set, it's too much work here to figure out which options
6826 have actually changed. Thus, generate .machine and .machinemode whenever a
6827 function has the target attribute or pragma. */
6828 fprintf (asm_out_file, "\t.machinemode push\n");
6829 fprintf (asm_out_file, "\t.machine push\n");
6830 s390_asm_output_machine_for_arch (asm_out_file);
6833 /* Write an extra function footer after the very end of the function. */
6835 void
6836 s390_asm_declare_function_size (FILE *asm_out_file,
6837 const char *fnname, tree decl)
6839 if (!flag_inhibit_size_directive)
6840 ASM_OUTPUT_MEASURED_SIZE (asm_out_file, fnname);
6841 if (DECL_FUNCTION_SPECIFIC_TARGET (decl) == NULL)
6842 return;
6843 fprintf (asm_out_file, "\t.machine pop\n");
6844 fprintf (asm_out_file, "\t.machinemode pop\n");
6846 #endif
6848 /* Write the extra assembler code needed to declare a function properly. */
6850 void
6851 s390_asm_output_function_label (FILE *asm_out_file, const char *fname,
6852 tree decl)
6854 int hw_before, hw_after;
6856 s390_function_num_hotpatch_hw (decl, &hw_before, &hw_after);
6857 if (hw_before > 0)
6859 unsigned int function_alignment;
6860 int i;
6862 /* Add a trampoline code area before the function label and initialize it
6863 with two-byte nop instructions. This area can be overwritten with code
6864 that jumps to a patched version of the function. */
6865 asm_fprintf (asm_out_file, "\tnopr\t%%r7"
6866 "\t# pre-label NOPs for hotpatch (%d halfwords)\n",
6867 hw_before);
6868 for (i = 1; i < hw_before; i++)
6869 fputs ("\tnopr\t%r7\n", asm_out_file);
6871 /* Note: The function label must be aligned so that (a) the bytes of the
6872 following nop do not cross a cacheline boundary, and (b) a jump address
6873 (eight bytes for 64 bit targets, 4 bytes for 32 bit targets) can be
6874 stored directly before the label without crossing a cacheline
6875 boundary. All this is necessary to make sure the trampoline code can
6876 be changed atomically.
6877 This alignment is done automatically using the FOUNCTION_BOUNDARY, but
6878 if there are NOPs before the function label, the alignment is placed
6879 before them. So it is necessary to duplicate the alignment after the
6880 NOPs. */
6881 function_alignment = MAX (8, DECL_ALIGN (decl) / BITS_PER_UNIT);
6882 if (! DECL_USER_ALIGN (decl))
6883 function_alignment = MAX (function_alignment,
6884 (unsigned int) align_functions);
6885 fputs ("\t# alignment for hotpatch\n", asm_out_file);
6886 ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (function_alignment));
6889 if (S390_USE_TARGET_ATTRIBUTE && TARGET_DEBUG_ARG)
6891 asm_fprintf (asm_out_file, "\t# fn:%s ar%d\n", fname, s390_arch);
6892 asm_fprintf (asm_out_file, "\t# fn:%s tu%d\n", fname, s390_tune);
6893 asm_fprintf (asm_out_file, "\t# fn:%s sg%d\n", fname, s390_stack_guard);
6894 asm_fprintf (asm_out_file, "\t# fn:%s ss%d\n", fname, s390_stack_size);
6895 asm_fprintf (asm_out_file, "\t# fn:%s bc%d\n", fname, s390_branch_cost);
6896 asm_fprintf (asm_out_file, "\t# fn:%s wf%d\n", fname,
6897 s390_warn_framesize);
6898 asm_fprintf (asm_out_file, "\t# fn:%s ba%d\n", fname, TARGET_BACKCHAIN);
6899 asm_fprintf (asm_out_file, "\t# fn:%s hd%d\n", fname, TARGET_HARD_DFP);
6900 asm_fprintf (asm_out_file, "\t# fn:%s hf%d\n", fname, !TARGET_SOFT_FLOAT);
6901 asm_fprintf (asm_out_file, "\t# fn:%s ht%d\n", fname, TARGET_OPT_HTM);
6902 asm_fprintf (asm_out_file, "\t# fn:%s vx%d\n", fname, TARGET_OPT_VX);
6903 asm_fprintf (asm_out_file, "\t# fn:%s ps%d\n", fname,
6904 TARGET_PACKED_STACK);
6905 asm_fprintf (asm_out_file, "\t# fn:%s se%d\n", fname, TARGET_SMALL_EXEC);
6906 asm_fprintf (asm_out_file, "\t# fn:%s mv%d\n", fname, TARGET_MVCLE);
6907 asm_fprintf (asm_out_file, "\t# fn:%s zv%d\n", fname, TARGET_ZVECTOR);
6908 asm_fprintf (asm_out_file, "\t# fn:%s wd%d\n", fname,
6909 s390_warn_dynamicstack_p);
6911 ASM_OUTPUT_LABEL (asm_out_file, fname);
6912 if (hw_after > 0)
6913 asm_fprintf (asm_out_file,
6914 "\t# post-label NOPs for hotpatch (%d halfwords)\n",
6915 hw_after);
6918 /* Output machine-dependent UNSPECs occurring in address constant X
6919 in assembler syntax to stdio stream FILE. Returns true if the
6920 constant X could be recognized, false otherwise. */
6922 static bool
6923 s390_output_addr_const_extra (FILE *file, rtx x)
6925 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 1)
6926 switch (XINT (x, 1))
6928 case UNSPEC_GOTENT:
6929 output_addr_const (file, XVECEXP (x, 0, 0));
6930 fprintf (file, "@GOTENT");
6931 return true;
6932 case UNSPEC_GOT:
6933 output_addr_const (file, XVECEXP (x, 0, 0));
6934 fprintf (file, "@GOT");
6935 return true;
6936 case UNSPEC_GOTOFF:
6937 output_addr_const (file, XVECEXP (x, 0, 0));
6938 fprintf (file, "@GOTOFF");
6939 return true;
6940 case UNSPEC_PLT:
6941 output_addr_const (file, XVECEXP (x, 0, 0));
6942 fprintf (file, "@PLT");
6943 return true;
6944 case UNSPEC_PLTOFF:
6945 output_addr_const (file, XVECEXP (x, 0, 0));
6946 fprintf (file, "@PLTOFF");
6947 return true;
6948 case UNSPEC_TLSGD:
6949 output_addr_const (file, XVECEXP (x, 0, 0));
6950 fprintf (file, "@TLSGD");
6951 return true;
6952 case UNSPEC_TLSLDM:
6953 assemble_name (file, get_some_local_dynamic_name ());
6954 fprintf (file, "@TLSLDM");
6955 return true;
6956 case UNSPEC_DTPOFF:
6957 output_addr_const (file, XVECEXP (x, 0, 0));
6958 fprintf (file, "@DTPOFF");
6959 return true;
6960 case UNSPEC_NTPOFF:
6961 output_addr_const (file, XVECEXP (x, 0, 0));
6962 fprintf (file, "@NTPOFF");
6963 return true;
6964 case UNSPEC_GOTNTPOFF:
6965 output_addr_const (file, XVECEXP (x, 0, 0));
6966 fprintf (file, "@GOTNTPOFF");
6967 return true;
6968 case UNSPEC_INDNTPOFF:
6969 output_addr_const (file, XVECEXP (x, 0, 0));
6970 fprintf (file, "@INDNTPOFF");
6971 return true;
6974 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 2)
6975 switch (XINT (x, 1))
6977 case UNSPEC_POOL_OFFSET:
6978 x = gen_rtx_MINUS (GET_MODE (x), XVECEXP (x, 0, 0), XVECEXP (x, 0, 1));
6979 output_addr_const (file, x);
6980 return true;
6982 return false;
6985 /* Output address operand ADDR in assembler syntax to
6986 stdio stream FILE. */
6988 void
6989 print_operand_address (FILE *file, rtx addr)
6991 struct s390_address ad;
6993 if (s390_loadrelative_operand_p (addr, NULL, NULL))
6995 if (!TARGET_Z10)
6997 output_operand_lossage ("symbolic memory references are "
6998 "only supported on z10 or later");
6999 return;
7001 output_addr_const (file, addr);
7002 return;
7005 if (!s390_decompose_address (addr, &ad)
7006 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7007 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
7008 output_operand_lossage ("cannot decompose address");
7010 if (ad.disp)
7011 output_addr_const (file, ad.disp);
7012 else
7013 fprintf (file, "0");
7015 if (ad.base && ad.indx)
7016 fprintf (file, "(%s,%s)", reg_names[REGNO (ad.indx)],
7017 reg_names[REGNO (ad.base)]);
7018 else if (ad.base)
7019 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
7022 /* Output operand X in assembler syntax to stdio stream FILE.
7023 CODE specified the format flag. The following format flags
7024 are recognized:
7026 'C': print opcode suffix for branch condition.
7027 'D': print opcode suffix for inverse branch condition.
7028 'E': print opcode suffix for branch on index instruction.
7029 'G': print the size of the operand in bytes.
7030 'J': print tls_load/tls_gdcall/tls_ldcall suffix
7031 'M': print the second word of a TImode operand.
7032 'N': print the second word of a DImode operand.
7033 'O': print only the displacement of a memory reference or address.
7034 'R': print only the base register of a memory reference or address.
7035 'S': print S-type memory reference (base+displacement).
7036 'Y': print shift count operand.
7038 'b': print integer X as if it's an unsigned byte.
7039 'c': print integer X as if it's an signed byte.
7040 'e': "end" contiguous bitmask X in either DImode or vector inner mode.
7041 'f': "end" contiguous bitmask X in SImode.
7042 'h': print integer X as if it's a signed halfword.
7043 'i': print the first nonzero HImode part of X.
7044 'j': print the first HImode part unequal to -1 of X.
7045 'k': print the first nonzero SImode part of X.
7046 'm': print the first SImode part unequal to -1 of X.
7047 'o': print integer X as if it's an unsigned 32bit word.
7048 's': "start" of contiguous bitmask X in either DImode or vector inner mode.
7049 't': CONST_INT: "start" of contiguous bitmask X in SImode.
7050 CONST_VECTOR: Generate a bitmask for vgbm instruction.
7051 'x': print integer X as if it's an unsigned halfword.
7052 'v': print register number as vector register (v1 instead of f1).
7055 void
7056 print_operand (FILE *file, rtx x, int code)
7058 HOST_WIDE_INT ival;
7060 switch (code)
7062 case 'C':
7063 fprintf (file, s390_branch_condition_mnemonic (x, FALSE));
7064 return;
7066 case 'D':
7067 fprintf (file, s390_branch_condition_mnemonic (x, TRUE));
7068 return;
7070 case 'E':
7071 if (GET_CODE (x) == LE)
7072 fprintf (file, "l");
7073 else if (GET_CODE (x) == GT)
7074 fprintf (file, "h");
7075 else
7076 output_operand_lossage ("invalid comparison operator "
7077 "for 'E' output modifier");
7078 return;
7080 case 'J':
7081 if (GET_CODE (x) == SYMBOL_REF)
7083 fprintf (file, "%s", ":tls_load:");
7084 output_addr_const (file, x);
7086 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
7088 fprintf (file, "%s", ":tls_gdcall:");
7089 output_addr_const (file, XVECEXP (x, 0, 0));
7091 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM)
7093 fprintf (file, "%s", ":tls_ldcall:");
7094 const char *name = get_some_local_dynamic_name ();
7095 gcc_assert (name);
7096 assemble_name (file, name);
7098 else
7099 output_operand_lossage ("invalid reference for 'J' output modifier");
7100 return;
7102 case 'G':
7103 fprintf (file, "%u", GET_MODE_SIZE (GET_MODE (x)));
7104 return;
7106 case 'O':
7108 struct s390_address ad;
7109 int ret;
7111 ret = s390_decompose_address (MEM_P (x) ? XEXP (x, 0) : x, &ad);
7113 if (!ret
7114 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7115 || ad.indx)
7117 output_operand_lossage ("invalid address for 'O' output modifier");
7118 return;
7121 if (ad.disp)
7122 output_addr_const (file, ad.disp);
7123 else
7124 fprintf (file, "0");
7126 return;
7128 case 'R':
7130 struct s390_address ad;
7131 int ret;
7133 ret = s390_decompose_address (MEM_P (x) ? XEXP (x, 0) : x, &ad);
7135 if (!ret
7136 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7137 || ad.indx)
7139 output_operand_lossage ("invalid address for 'R' output modifier");
7140 return;
7143 if (ad.base)
7144 fprintf (file, "%s", reg_names[REGNO (ad.base)]);
7145 else
7146 fprintf (file, "0");
7148 return;
7150 case 'S':
7152 struct s390_address ad;
7153 int ret;
7155 if (!MEM_P (x))
7157 output_operand_lossage ("memory reference expected for "
7158 "'S' output modifier");
7159 return;
7161 ret = s390_decompose_address (XEXP (x, 0), &ad);
7163 if (!ret
7164 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7165 || ad.indx)
7167 output_operand_lossage ("invalid address for 'S' output modifier");
7168 return;
7171 if (ad.disp)
7172 output_addr_const (file, ad.disp);
7173 else
7174 fprintf (file, "0");
7176 if (ad.base)
7177 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
7179 return;
7181 case 'N':
7182 if (GET_CODE (x) == REG)
7183 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
7184 else if (GET_CODE (x) == MEM)
7185 x = change_address (x, VOIDmode,
7186 plus_constant (Pmode, XEXP (x, 0), 4));
7187 else
7188 output_operand_lossage ("register or memory expression expected "
7189 "for 'N' output modifier");
7190 break;
7192 case 'M':
7193 if (GET_CODE (x) == REG)
7194 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
7195 else if (GET_CODE (x) == MEM)
7196 x = change_address (x, VOIDmode,
7197 plus_constant (Pmode, XEXP (x, 0), 8));
7198 else
7199 output_operand_lossage ("register or memory expression expected "
7200 "for 'M' output modifier");
7201 break;
7203 case 'Y':
7204 print_shift_count_operand (file, x);
7205 return;
7208 switch (GET_CODE (x))
7210 case REG:
7211 /* Print FP regs as fx instead of vx when they are accessed
7212 through non-vector mode. */
7213 if (code == 'v'
7214 || VECTOR_NOFP_REG_P (x)
7215 || (FP_REG_P (x) && VECTOR_MODE_P (GET_MODE (x)))
7216 || (VECTOR_REG_P (x)
7217 && (GET_MODE_SIZE (GET_MODE (x)) /
7218 s390_class_max_nregs (FP_REGS, GET_MODE (x))) > 8))
7219 fprintf (file, "%%v%s", reg_names[REGNO (x)] + 2);
7220 else
7221 fprintf (file, "%s", reg_names[REGNO (x)]);
7222 break;
7224 case MEM:
7225 output_address (GET_MODE (x), XEXP (x, 0));
7226 break;
7228 case CONST:
7229 case CODE_LABEL:
7230 case LABEL_REF:
7231 case SYMBOL_REF:
7232 output_addr_const (file, x);
7233 break;
7235 case CONST_INT:
7236 ival = INTVAL (x);
7237 switch (code)
7239 case 0:
7240 break;
7241 case 'b':
7242 ival &= 0xff;
7243 break;
7244 case 'c':
7245 ival = ((ival & 0xff) ^ 0x80) - 0x80;
7246 break;
7247 case 'x':
7248 ival &= 0xffff;
7249 break;
7250 case 'h':
7251 ival = ((ival & 0xffff) ^ 0x8000) - 0x8000;
7252 break;
7253 case 'i':
7254 ival = s390_extract_part (x, HImode, 0);
7255 break;
7256 case 'j':
7257 ival = s390_extract_part (x, HImode, -1);
7258 break;
7259 case 'k':
7260 ival = s390_extract_part (x, SImode, 0);
7261 break;
7262 case 'm':
7263 ival = s390_extract_part (x, SImode, -1);
7264 break;
7265 case 'o':
7266 ival &= 0xffffffff;
7267 break;
7268 case 'e': case 'f':
7269 case 's': case 't':
7271 int pos, len;
7272 bool ok;
7274 len = (code == 's' || code == 'e' ? 64 : 32);
7275 ok = s390_contiguous_bitmask_p (ival, len, &pos, &len);
7276 gcc_assert (ok);
7277 if (code == 's' || code == 't')
7278 ival = 64 - pos - len;
7279 else
7280 ival = 64 - 1 - pos;
7282 break;
7283 default:
7284 output_operand_lossage ("invalid constant for output modifier '%c'", code);
7286 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
7287 break;
7289 case CONST_WIDE_INT:
7290 if (code == 'b')
7291 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7292 CONST_WIDE_INT_ELT (x, 0) & 0xff);
7293 else if (code == 'x')
7294 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7295 CONST_WIDE_INT_ELT (x, 0) & 0xffff);
7296 else if (code == 'h')
7297 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7298 ((CONST_WIDE_INT_ELT (x, 0) & 0xffff) ^ 0x8000) - 0x8000);
7299 else
7301 if (code == 0)
7302 output_operand_lossage ("invalid constant - try using "
7303 "an output modifier");
7304 else
7305 output_operand_lossage ("invalid constant for output modifier '%c'",
7306 code);
7308 break;
7309 case CONST_VECTOR:
7310 switch (code)
7312 case 'h':
7313 gcc_assert (const_vec_duplicate_p (x));
7314 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7315 ((INTVAL (XVECEXP (x, 0, 0)) & 0xffff) ^ 0x8000) - 0x8000);
7316 break;
7317 case 'e':
7318 case 's':
7320 int start, stop, inner_len;
7321 bool ok;
7323 inner_len = GET_MODE_UNIT_BITSIZE (GET_MODE (x));
7324 ok = s390_contiguous_bitmask_vector_p (x, &start, &stop);
7325 gcc_assert (ok);
7326 if (code == 's' || code == 't')
7327 ival = inner_len - stop - 1;
7328 else
7329 ival = inner_len - start - 1;
7330 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
7332 break;
7333 case 't':
7335 unsigned mask;
7336 bool ok = s390_bytemask_vector_p (x, &mask);
7337 gcc_assert (ok);
7338 fprintf (file, "%u", mask);
7340 break;
7342 default:
7343 output_operand_lossage ("invalid constant vector for output "
7344 "modifier '%c'", code);
7346 break;
7348 default:
7349 if (code == 0)
7350 output_operand_lossage ("invalid expression - try using "
7351 "an output modifier");
7352 else
7353 output_operand_lossage ("invalid expression for output "
7354 "modifier '%c'", code);
7355 break;
7359 /* Target hook for assembling integer objects. We need to define it
7360 here to work a round a bug in some versions of GAS, which couldn't
7361 handle values smaller than INT_MIN when printed in decimal. */
7363 static bool
7364 s390_assemble_integer (rtx x, unsigned int size, int aligned_p)
7366 if (size == 8 && aligned_p
7367 && GET_CODE (x) == CONST_INT && INTVAL (x) < INT_MIN)
7369 fprintf (asm_out_file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n",
7370 INTVAL (x));
7371 return true;
7373 return default_assemble_integer (x, size, aligned_p);
7376 /* Returns true if register REGNO is used for forming
7377 a memory address in expression X. */
7379 static bool
7380 reg_used_in_mem_p (int regno, rtx x)
7382 enum rtx_code code = GET_CODE (x);
7383 int i, j;
7384 const char *fmt;
7386 if (code == MEM)
7388 if (refers_to_regno_p (regno, XEXP (x, 0)))
7389 return true;
7391 else if (code == SET
7392 && GET_CODE (SET_DEST (x)) == PC)
7394 if (refers_to_regno_p (regno, SET_SRC (x)))
7395 return true;
7398 fmt = GET_RTX_FORMAT (code);
7399 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
7401 if (fmt[i] == 'e'
7402 && reg_used_in_mem_p (regno, XEXP (x, i)))
7403 return true;
7405 else if (fmt[i] == 'E')
7406 for (j = 0; j < XVECLEN (x, i); j++)
7407 if (reg_used_in_mem_p (regno, XVECEXP (x, i, j)))
7408 return true;
7410 return false;
7413 /* Returns true if expression DEP_RTX sets an address register
7414 used by instruction INSN to address memory. */
7416 static bool
7417 addr_generation_dependency_p (rtx dep_rtx, rtx_insn *insn)
7419 rtx target, pat;
7421 if (NONJUMP_INSN_P (dep_rtx))
7422 dep_rtx = PATTERN (dep_rtx);
7424 if (GET_CODE (dep_rtx) == SET)
7426 target = SET_DEST (dep_rtx);
7427 if (GET_CODE (target) == STRICT_LOW_PART)
7428 target = XEXP (target, 0);
7429 while (GET_CODE (target) == SUBREG)
7430 target = SUBREG_REG (target);
7432 if (GET_CODE (target) == REG)
7434 int regno = REGNO (target);
7436 if (s390_safe_attr_type (insn) == TYPE_LA)
7438 pat = PATTERN (insn);
7439 if (GET_CODE (pat) == PARALLEL)
7441 gcc_assert (XVECLEN (pat, 0) == 2);
7442 pat = XVECEXP (pat, 0, 0);
7444 gcc_assert (GET_CODE (pat) == SET);
7445 return refers_to_regno_p (regno, SET_SRC (pat));
7447 else if (get_attr_atype (insn) == ATYPE_AGEN)
7448 return reg_used_in_mem_p (regno, PATTERN (insn));
7451 return false;
7454 /* Return 1, if dep_insn sets register used in insn in the agen unit. */
7457 s390_agen_dep_p (rtx_insn *dep_insn, rtx_insn *insn)
7459 rtx dep_rtx = PATTERN (dep_insn);
7460 int i;
7462 if (GET_CODE (dep_rtx) == SET
7463 && addr_generation_dependency_p (dep_rtx, insn))
7464 return 1;
7465 else if (GET_CODE (dep_rtx) == PARALLEL)
7467 for (i = 0; i < XVECLEN (dep_rtx, 0); i++)
7469 if (addr_generation_dependency_p (XVECEXP (dep_rtx, 0, i), insn))
7470 return 1;
7473 return 0;
7477 /* A C statement (sans semicolon) to update the integer scheduling priority
7478 INSN_PRIORITY (INSN). Increase the priority to execute the INSN earlier,
7479 reduce the priority to execute INSN later. Do not define this macro if
7480 you do not need to adjust the scheduling priorities of insns.
7482 A STD instruction should be scheduled earlier,
7483 in order to use the bypass. */
7484 static int
7485 s390_adjust_priority (rtx_insn *insn, int priority)
7487 if (! INSN_P (insn))
7488 return priority;
7490 if (s390_tune <= PROCESSOR_2064_Z900)
7491 return priority;
7493 switch (s390_safe_attr_type (insn))
7495 case TYPE_FSTOREDF:
7496 case TYPE_FSTORESF:
7497 priority = priority << 3;
7498 break;
7499 case TYPE_STORE:
7500 case TYPE_STM:
7501 priority = priority << 1;
7502 break;
7503 default:
7504 break;
7506 return priority;
7510 /* The number of instructions that can be issued per cycle. */
7512 static int
7513 s390_issue_rate (void)
7515 switch (s390_tune)
7517 case PROCESSOR_2084_Z990:
7518 case PROCESSOR_2094_Z9_109:
7519 case PROCESSOR_2094_Z9_EC:
7520 case PROCESSOR_2817_Z196:
7521 return 3;
7522 case PROCESSOR_2097_Z10:
7523 return 2;
7524 case PROCESSOR_9672_G5:
7525 case PROCESSOR_9672_G6:
7526 case PROCESSOR_2064_Z900:
7527 /* Starting with EC12 we use the sched_reorder hook to take care
7528 of instruction dispatch constraints. The algorithm only
7529 picks the best instruction and assumes only a single
7530 instruction gets issued per cycle. */
7531 case PROCESSOR_2827_ZEC12:
7532 case PROCESSOR_2964_Z13:
7533 default:
7534 return 1;
7538 static int
7539 s390_first_cycle_multipass_dfa_lookahead (void)
7541 return 4;
7544 /* Annotate every literal pool reference in X by an UNSPEC_LTREF expression.
7545 Fix up MEMs as required. */
7547 static void
7548 annotate_constant_pool_refs (rtx *x)
7550 int i, j;
7551 const char *fmt;
7553 gcc_assert (GET_CODE (*x) != SYMBOL_REF
7554 || !CONSTANT_POOL_ADDRESS_P (*x));
7556 /* Literal pool references can only occur inside a MEM ... */
7557 if (GET_CODE (*x) == MEM)
7559 rtx memref = XEXP (*x, 0);
7561 if (GET_CODE (memref) == SYMBOL_REF
7562 && CONSTANT_POOL_ADDRESS_P (memref))
7564 rtx base = cfun->machine->base_reg;
7565 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, memref, base),
7566 UNSPEC_LTREF);
7568 *x = replace_equiv_address (*x, addr);
7569 return;
7572 if (GET_CODE (memref) == CONST
7573 && GET_CODE (XEXP (memref, 0)) == PLUS
7574 && GET_CODE (XEXP (XEXP (memref, 0), 1)) == CONST_INT
7575 && GET_CODE (XEXP (XEXP (memref, 0), 0)) == SYMBOL_REF
7576 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (memref, 0), 0)))
7578 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (memref, 0), 1));
7579 rtx sym = XEXP (XEXP (memref, 0), 0);
7580 rtx base = cfun->machine->base_reg;
7581 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
7582 UNSPEC_LTREF);
7584 *x = replace_equiv_address (*x, plus_constant (Pmode, addr, off));
7585 return;
7589 /* ... or a load-address type pattern. */
7590 if (GET_CODE (*x) == SET)
7592 rtx addrref = SET_SRC (*x);
7594 if (GET_CODE (addrref) == SYMBOL_REF
7595 && CONSTANT_POOL_ADDRESS_P (addrref))
7597 rtx base = cfun->machine->base_reg;
7598 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addrref, base),
7599 UNSPEC_LTREF);
7601 SET_SRC (*x) = addr;
7602 return;
7605 if (GET_CODE (addrref) == CONST
7606 && GET_CODE (XEXP (addrref, 0)) == PLUS
7607 && GET_CODE (XEXP (XEXP (addrref, 0), 1)) == CONST_INT
7608 && GET_CODE (XEXP (XEXP (addrref, 0), 0)) == SYMBOL_REF
7609 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (addrref, 0), 0)))
7611 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (addrref, 0), 1));
7612 rtx sym = XEXP (XEXP (addrref, 0), 0);
7613 rtx base = cfun->machine->base_reg;
7614 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
7615 UNSPEC_LTREF);
7617 SET_SRC (*x) = plus_constant (Pmode, addr, off);
7618 return;
7622 /* Annotate LTREL_BASE as well. */
7623 if (GET_CODE (*x) == UNSPEC
7624 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
7626 rtx base = cfun->machine->base_reg;
7627 *x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XVECEXP (*x, 0, 0), base),
7628 UNSPEC_LTREL_BASE);
7629 return;
7632 fmt = GET_RTX_FORMAT (GET_CODE (*x));
7633 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
7635 if (fmt[i] == 'e')
7637 annotate_constant_pool_refs (&XEXP (*x, i));
7639 else if (fmt[i] == 'E')
7641 for (j = 0; j < XVECLEN (*x, i); j++)
7642 annotate_constant_pool_refs (&XVECEXP (*x, i, j));
7647 /* Split all branches that exceed the maximum distance.
7648 Returns true if this created a new literal pool entry. */
7650 static int
7651 s390_split_branches (void)
7653 rtx temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
7654 int new_literal = 0, ret;
7655 rtx_insn *insn;
7656 rtx pat, target;
7657 rtx *label;
7659 /* We need correct insn addresses. */
7661 shorten_branches (get_insns ());
7663 /* Find all branches that exceed 64KB, and split them. */
7665 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7667 if (! JUMP_P (insn) || tablejump_p (insn, NULL, NULL))
7668 continue;
7670 pat = PATTERN (insn);
7671 if (GET_CODE (pat) == PARALLEL)
7672 pat = XVECEXP (pat, 0, 0);
7673 if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx)
7674 continue;
7676 if (GET_CODE (SET_SRC (pat)) == LABEL_REF)
7678 label = &SET_SRC (pat);
7680 else if (GET_CODE (SET_SRC (pat)) == IF_THEN_ELSE)
7682 if (GET_CODE (XEXP (SET_SRC (pat), 1)) == LABEL_REF)
7683 label = &XEXP (SET_SRC (pat), 1);
7684 else if (GET_CODE (XEXP (SET_SRC (pat), 2)) == LABEL_REF)
7685 label = &XEXP (SET_SRC (pat), 2);
7686 else
7687 continue;
7689 else
7690 continue;
7692 if (get_attr_length (insn) <= 4)
7693 continue;
7695 /* We are going to use the return register as scratch register,
7696 make sure it will be saved/restored by the prologue/epilogue. */
7697 cfun_frame_layout.save_return_addr_p = 1;
7699 if (!flag_pic)
7701 new_literal = 1;
7702 rtx mem = force_const_mem (Pmode, *label);
7703 rtx_insn *set_insn = emit_insn_before (gen_rtx_SET (temp_reg, mem),
7704 insn);
7705 INSN_ADDRESSES_NEW (set_insn, -1);
7706 annotate_constant_pool_refs (&PATTERN (set_insn));
7708 target = temp_reg;
7710 else
7712 new_literal = 1;
7713 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, *label),
7714 UNSPEC_LTREL_OFFSET);
7715 target = gen_rtx_CONST (Pmode, target);
7716 target = force_const_mem (Pmode, target);
7717 rtx_insn *set_insn = emit_insn_before (gen_rtx_SET (temp_reg, target),
7718 insn);
7719 INSN_ADDRESSES_NEW (set_insn, -1);
7720 annotate_constant_pool_refs (&PATTERN (set_insn));
7722 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XEXP (target, 0),
7723 cfun->machine->base_reg),
7724 UNSPEC_LTREL_BASE);
7725 target = gen_rtx_PLUS (Pmode, temp_reg, target);
7728 ret = validate_change (insn, label, target, 0);
7729 gcc_assert (ret);
7732 return new_literal;
7736 /* Find an annotated literal pool symbol referenced in RTX X,
7737 and store it at REF. Will abort if X contains references to
7738 more than one such pool symbol; multiple references to the same
7739 symbol are allowed, however.
7741 The rtx pointed to by REF must be initialized to NULL_RTX
7742 by the caller before calling this routine. */
7744 static void
7745 find_constant_pool_ref (rtx x, rtx *ref)
7747 int i, j;
7748 const char *fmt;
7750 /* Ignore LTREL_BASE references. */
7751 if (GET_CODE (x) == UNSPEC
7752 && XINT (x, 1) == UNSPEC_LTREL_BASE)
7753 return;
7754 /* Likewise POOL_ENTRY insns. */
7755 if (GET_CODE (x) == UNSPEC_VOLATILE
7756 && XINT (x, 1) == UNSPECV_POOL_ENTRY)
7757 return;
7759 gcc_assert (GET_CODE (x) != SYMBOL_REF
7760 || !CONSTANT_POOL_ADDRESS_P (x));
7762 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_LTREF)
7764 rtx sym = XVECEXP (x, 0, 0);
7765 gcc_assert (GET_CODE (sym) == SYMBOL_REF
7766 && CONSTANT_POOL_ADDRESS_P (sym));
7768 if (*ref == NULL_RTX)
7769 *ref = sym;
7770 else
7771 gcc_assert (*ref == sym);
7773 return;
7776 fmt = GET_RTX_FORMAT (GET_CODE (x));
7777 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
7779 if (fmt[i] == 'e')
7781 find_constant_pool_ref (XEXP (x, i), ref);
7783 else if (fmt[i] == 'E')
7785 for (j = 0; j < XVECLEN (x, i); j++)
7786 find_constant_pool_ref (XVECEXP (x, i, j), ref);
7791 /* Replace every reference to the annotated literal pool
7792 symbol REF in X by its base plus OFFSET. */
7794 static void
7795 replace_constant_pool_ref (rtx *x, rtx ref, rtx offset)
7797 int i, j;
7798 const char *fmt;
7800 gcc_assert (*x != ref);
7802 if (GET_CODE (*x) == UNSPEC
7803 && XINT (*x, 1) == UNSPEC_LTREF
7804 && XVECEXP (*x, 0, 0) == ref)
7806 *x = gen_rtx_PLUS (Pmode, XVECEXP (*x, 0, 1), offset);
7807 return;
7810 if (GET_CODE (*x) == PLUS
7811 && GET_CODE (XEXP (*x, 1)) == CONST_INT
7812 && GET_CODE (XEXP (*x, 0)) == UNSPEC
7813 && XINT (XEXP (*x, 0), 1) == UNSPEC_LTREF
7814 && XVECEXP (XEXP (*x, 0), 0, 0) == ref)
7816 rtx addr = gen_rtx_PLUS (Pmode, XVECEXP (XEXP (*x, 0), 0, 1), offset);
7817 *x = plus_constant (Pmode, addr, INTVAL (XEXP (*x, 1)));
7818 return;
7821 fmt = GET_RTX_FORMAT (GET_CODE (*x));
7822 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
7824 if (fmt[i] == 'e')
7826 replace_constant_pool_ref (&XEXP (*x, i), ref, offset);
7828 else if (fmt[i] == 'E')
7830 for (j = 0; j < XVECLEN (*x, i); j++)
7831 replace_constant_pool_ref (&XVECEXP (*x, i, j), ref, offset);
7836 /* Check whether X contains an UNSPEC_LTREL_BASE.
7837 Return its constant pool symbol if found, NULL_RTX otherwise. */
7839 static rtx
7840 find_ltrel_base (rtx x)
7842 int i, j;
7843 const char *fmt;
7845 if (GET_CODE (x) == UNSPEC
7846 && XINT (x, 1) == UNSPEC_LTREL_BASE)
7847 return XVECEXP (x, 0, 0);
7849 fmt = GET_RTX_FORMAT (GET_CODE (x));
7850 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
7852 if (fmt[i] == 'e')
7854 rtx fnd = find_ltrel_base (XEXP (x, i));
7855 if (fnd)
7856 return fnd;
7858 else if (fmt[i] == 'E')
7860 for (j = 0; j < XVECLEN (x, i); j++)
7862 rtx fnd = find_ltrel_base (XVECEXP (x, i, j));
7863 if (fnd)
7864 return fnd;
7869 return NULL_RTX;
7872 /* Replace any occurrence of UNSPEC_LTREL_BASE in X with its base. */
7874 static void
7875 replace_ltrel_base (rtx *x)
7877 int i, j;
7878 const char *fmt;
7880 if (GET_CODE (*x) == UNSPEC
7881 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
7883 *x = XVECEXP (*x, 0, 1);
7884 return;
7887 fmt = GET_RTX_FORMAT (GET_CODE (*x));
7888 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
7890 if (fmt[i] == 'e')
7892 replace_ltrel_base (&XEXP (*x, i));
7894 else if (fmt[i] == 'E')
7896 for (j = 0; j < XVECLEN (*x, i); j++)
7897 replace_ltrel_base (&XVECEXP (*x, i, j));
7903 /* We keep a list of constants which we have to add to internal
7904 constant tables in the middle of large functions. */
7906 #define NR_C_MODES 32
7907 machine_mode constant_modes[NR_C_MODES] =
7909 TFmode, TImode, TDmode,
7910 V16QImode, V8HImode, V4SImode, V2DImode, V1TImode,
7911 V4SFmode, V2DFmode, V1TFmode,
7912 DFmode, DImode, DDmode,
7913 V8QImode, V4HImode, V2SImode, V1DImode, V2SFmode, V1DFmode,
7914 SFmode, SImode, SDmode,
7915 V4QImode, V2HImode, V1SImode, V1SFmode,
7916 HImode,
7917 V2QImode, V1HImode,
7918 QImode,
7919 V1QImode
7922 struct constant
7924 struct constant *next;
7925 rtx value;
7926 rtx_code_label *label;
7929 struct constant_pool
7931 struct constant_pool *next;
7932 rtx_insn *first_insn;
7933 rtx_insn *pool_insn;
7934 bitmap insns;
7935 rtx_insn *emit_pool_after;
7937 struct constant *constants[NR_C_MODES];
7938 struct constant *execute;
7939 rtx_code_label *label;
7940 int size;
7943 /* Allocate new constant_pool structure. */
7945 static struct constant_pool *
7946 s390_alloc_pool (void)
7948 struct constant_pool *pool;
7949 int i;
7951 pool = (struct constant_pool *) xmalloc (sizeof *pool);
7952 pool->next = NULL;
7953 for (i = 0; i < NR_C_MODES; i++)
7954 pool->constants[i] = NULL;
7956 pool->execute = NULL;
7957 pool->label = gen_label_rtx ();
7958 pool->first_insn = NULL;
7959 pool->pool_insn = NULL;
7960 pool->insns = BITMAP_ALLOC (NULL);
7961 pool->size = 0;
7962 pool->emit_pool_after = NULL;
7964 return pool;
7967 /* Create new constant pool covering instructions starting at INSN
7968 and chain it to the end of POOL_LIST. */
7970 static struct constant_pool *
7971 s390_start_pool (struct constant_pool **pool_list, rtx_insn *insn)
7973 struct constant_pool *pool, **prev;
7975 pool = s390_alloc_pool ();
7976 pool->first_insn = insn;
7978 for (prev = pool_list; *prev; prev = &(*prev)->next)
7980 *prev = pool;
7982 return pool;
7985 /* End range of instructions covered by POOL at INSN and emit
7986 placeholder insn representing the pool. */
7988 static void
7989 s390_end_pool (struct constant_pool *pool, rtx_insn *insn)
7991 rtx pool_size = GEN_INT (pool->size + 8 /* alignment slop */);
7993 if (!insn)
7994 insn = get_last_insn ();
7996 pool->pool_insn = emit_insn_after (gen_pool (pool_size), insn);
7997 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8000 /* Add INSN to the list of insns covered by POOL. */
8002 static void
8003 s390_add_pool_insn (struct constant_pool *pool, rtx insn)
8005 bitmap_set_bit (pool->insns, INSN_UID (insn));
8008 /* Return pool out of POOL_LIST that covers INSN. */
8010 static struct constant_pool *
8011 s390_find_pool (struct constant_pool *pool_list, rtx insn)
8013 struct constant_pool *pool;
8015 for (pool = pool_list; pool; pool = pool->next)
8016 if (bitmap_bit_p (pool->insns, INSN_UID (insn)))
8017 break;
8019 return pool;
8022 /* Add constant VAL of mode MODE to the constant pool POOL. */
8024 static void
8025 s390_add_constant (struct constant_pool *pool, rtx val, machine_mode mode)
8027 struct constant *c;
8028 int i;
8030 for (i = 0; i < NR_C_MODES; i++)
8031 if (constant_modes[i] == mode)
8032 break;
8033 gcc_assert (i != NR_C_MODES);
8035 for (c = pool->constants[i]; c != NULL; c = c->next)
8036 if (rtx_equal_p (val, c->value))
8037 break;
8039 if (c == NULL)
8041 c = (struct constant *) xmalloc (sizeof *c);
8042 c->value = val;
8043 c->label = gen_label_rtx ();
8044 c->next = pool->constants[i];
8045 pool->constants[i] = c;
8046 pool->size += GET_MODE_SIZE (mode);
8050 /* Return an rtx that represents the offset of X from the start of
8051 pool POOL. */
8053 static rtx
8054 s390_pool_offset (struct constant_pool *pool, rtx x)
8056 rtx label;
8058 label = gen_rtx_LABEL_REF (GET_MODE (x), pool->label);
8059 x = gen_rtx_UNSPEC (GET_MODE (x), gen_rtvec (2, x, label),
8060 UNSPEC_POOL_OFFSET);
8061 return gen_rtx_CONST (GET_MODE (x), x);
8064 /* Find constant VAL of mode MODE in the constant pool POOL.
8065 Return an RTX describing the distance from the start of
8066 the pool to the location of the new constant. */
8068 static rtx
8069 s390_find_constant (struct constant_pool *pool, rtx val,
8070 machine_mode mode)
8072 struct constant *c;
8073 int i;
8075 for (i = 0; i < NR_C_MODES; i++)
8076 if (constant_modes[i] == mode)
8077 break;
8078 gcc_assert (i != NR_C_MODES);
8080 for (c = pool->constants[i]; c != NULL; c = c->next)
8081 if (rtx_equal_p (val, c->value))
8082 break;
8084 gcc_assert (c);
8086 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
8089 /* Check whether INSN is an execute. Return the label_ref to its
8090 execute target template if so, NULL_RTX otherwise. */
8092 static rtx
8093 s390_execute_label (rtx insn)
8095 if (NONJUMP_INSN_P (insn)
8096 && GET_CODE (PATTERN (insn)) == PARALLEL
8097 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == UNSPEC
8098 && XINT (XVECEXP (PATTERN (insn), 0, 0), 1) == UNSPEC_EXECUTE)
8099 return XVECEXP (XVECEXP (PATTERN (insn), 0, 0), 0, 2);
8101 return NULL_RTX;
8104 /* Add execute target for INSN to the constant pool POOL. */
8106 static void
8107 s390_add_execute (struct constant_pool *pool, rtx insn)
8109 struct constant *c;
8111 for (c = pool->execute; c != NULL; c = c->next)
8112 if (INSN_UID (insn) == INSN_UID (c->value))
8113 break;
8115 if (c == NULL)
8117 c = (struct constant *) xmalloc (sizeof *c);
8118 c->value = insn;
8119 c->label = gen_label_rtx ();
8120 c->next = pool->execute;
8121 pool->execute = c;
8122 pool->size += 6;
8126 /* Find execute target for INSN in the constant pool POOL.
8127 Return an RTX describing the distance from the start of
8128 the pool to the location of the execute target. */
8130 static rtx
8131 s390_find_execute (struct constant_pool *pool, rtx insn)
8133 struct constant *c;
8135 for (c = pool->execute; c != NULL; c = c->next)
8136 if (INSN_UID (insn) == INSN_UID (c->value))
8137 break;
8139 gcc_assert (c);
8141 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
8144 /* For an execute INSN, extract the execute target template. */
8146 static rtx
8147 s390_execute_target (rtx insn)
8149 rtx pattern = PATTERN (insn);
8150 gcc_assert (s390_execute_label (insn));
8152 if (XVECLEN (pattern, 0) == 2)
8154 pattern = copy_rtx (XVECEXP (pattern, 0, 1));
8156 else
8158 rtvec vec = rtvec_alloc (XVECLEN (pattern, 0) - 1);
8159 int i;
8161 for (i = 0; i < XVECLEN (pattern, 0) - 1; i++)
8162 RTVEC_ELT (vec, i) = copy_rtx (XVECEXP (pattern, 0, i + 1));
8164 pattern = gen_rtx_PARALLEL (VOIDmode, vec);
8167 return pattern;
8170 /* Indicate that INSN cannot be duplicated. This is the case for
8171 execute insns that carry a unique label. */
8173 static bool
8174 s390_cannot_copy_insn_p (rtx_insn *insn)
8176 rtx label = s390_execute_label (insn);
8177 return label && label != const0_rtx;
8180 /* Dump out the constants in POOL. If REMOTE_LABEL is true,
8181 do not emit the pool base label. */
8183 static void
8184 s390_dump_pool (struct constant_pool *pool, bool remote_label)
8186 struct constant *c;
8187 rtx_insn *insn = pool->pool_insn;
8188 int i;
8190 /* Switch to rodata section. */
8191 if (TARGET_CPU_ZARCH)
8193 insn = emit_insn_after (gen_pool_section_start (), insn);
8194 INSN_ADDRESSES_NEW (insn, -1);
8197 /* Ensure minimum pool alignment. */
8198 if (TARGET_CPU_ZARCH)
8199 insn = emit_insn_after (gen_pool_align (GEN_INT (8)), insn);
8200 else
8201 insn = emit_insn_after (gen_pool_align (GEN_INT (4)), insn);
8202 INSN_ADDRESSES_NEW (insn, -1);
8204 /* Emit pool base label. */
8205 if (!remote_label)
8207 insn = emit_label_after (pool->label, insn);
8208 INSN_ADDRESSES_NEW (insn, -1);
8211 /* Dump constants in descending alignment requirement order,
8212 ensuring proper alignment for every constant. */
8213 for (i = 0; i < NR_C_MODES; i++)
8214 for (c = pool->constants[i]; c; c = c->next)
8216 /* Convert UNSPEC_LTREL_OFFSET unspecs to pool-relative references. */
8217 rtx value = copy_rtx (c->value);
8218 if (GET_CODE (value) == CONST
8219 && GET_CODE (XEXP (value, 0)) == UNSPEC
8220 && XINT (XEXP (value, 0), 1) == UNSPEC_LTREL_OFFSET
8221 && XVECLEN (XEXP (value, 0), 0) == 1)
8222 value = s390_pool_offset (pool, XVECEXP (XEXP (value, 0), 0, 0));
8224 insn = emit_label_after (c->label, insn);
8225 INSN_ADDRESSES_NEW (insn, -1);
8227 value = gen_rtx_UNSPEC_VOLATILE (constant_modes[i],
8228 gen_rtvec (1, value),
8229 UNSPECV_POOL_ENTRY);
8230 insn = emit_insn_after (value, insn);
8231 INSN_ADDRESSES_NEW (insn, -1);
8234 /* Ensure minimum alignment for instructions. */
8235 insn = emit_insn_after (gen_pool_align (GEN_INT (2)), insn);
8236 INSN_ADDRESSES_NEW (insn, -1);
8238 /* Output in-pool execute template insns. */
8239 for (c = pool->execute; c; c = c->next)
8241 insn = emit_label_after (c->label, insn);
8242 INSN_ADDRESSES_NEW (insn, -1);
8244 insn = emit_insn_after (s390_execute_target (c->value), insn);
8245 INSN_ADDRESSES_NEW (insn, -1);
8248 /* Switch back to previous section. */
8249 if (TARGET_CPU_ZARCH)
8251 insn = emit_insn_after (gen_pool_section_end (), insn);
8252 INSN_ADDRESSES_NEW (insn, -1);
8255 insn = emit_barrier_after (insn);
8256 INSN_ADDRESSES_NEW (insn, -1);
8258 /* Remove placeholder insn. */
8259 remove_insn (pool->pool_insn);
8262 /* Free all memory used by POOL. */
8264 static void
8265 s390_free_pool (struct constant_pool *pool)
8267 struct constant *c, *next;
8268 int i;
8270 for (i = 0; i < NR_C_MODES; i++)
8271 for (c = pool->constants[i]; c; c = next)
8273 next = c->next;
8274 free (c);
8277 for (c = pool->execute; c; c = next)
8279 next = c->next;
8280 free (c);
8283 BITMAP_FREE (pool->insns);
8284 free (pool);
8288 /* Collect main literal pool. Return NULL on overflow. */
8290 static struct constant_pool *
8291 s390_mainpool_start (void)
8293 struct constant_pool *pool;
8294 rtx_insn *insn;
8296 pool = s390_alloc_pool ();
8298 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8300 if (NONJUMP_INSN_P (insn)
8301 && GET_CODE (PATTERN (insn)) == SET
8302 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC_VOLATILE
8303 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPECV_MAIN_POOL)
8305 /* There might be two main_pool instructions if base_reg
8306 is call-clobbered; one for shrink-wrapped code and one
8307 for the rest. We want to keep the first. */
8308 if (pool->pool_insn)
8310 insn = PREV_INSN (insn);
8311 delete_insn (NEXT_INSN (insn));
8312 continue;
8314 pool->pool_insn = insn;
8317 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
8319 s390_add_execute (pool, insn);
8321 else if (NONJUMP_INSN_P (insn) || CALL_P (insn))
8323 rtx pool_ref = NULL_RTX;
8324 find_constant_pool_ref (PATTERN (insn), &pool_ref);
8325 if (pool_ref)
8327 rtx constant = get_pool_constant (pool_ref);
8328 machine_mode mode = get_pool_mode (pool_ref);
8329 s390_add_constant (pool, constant, mode);
8333 /* If hot/cold partitioning is enabled we have to make sure that
8334 the literal pool is emitted in the same section where the
8335 initialization of the literal pool base pointer takes place.
8336 emit_pool_after is only used in the non-overflow case on non
8337 Z cpus where we can emit the literal pool at the end of the
8338 function body within the text section. */
8339 if (NOTE_P (insn)
8340 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS
8341 && !pool->emit_pool_after)
8342 pool->emit_pool_after = PREV_INSN (insn);
8345 gcc_assert (pool->pool_insn || pool->size == 0);
8347 if (pool->size >= 4096)
8349 /* We're going to chunkify the pool, so remove the main
8350 pool placeholder insn. */
8351 remove_insn (pool->pool_insn);
8353 s390_free_pool (pool);
8354 pool = NULL;
8357 /* If the functions ends with the section where the literal pool
8358 should be emitted set the marker to its end. */
8359 if (pool && !pool->emit_pool_after)
8360 pool->emit_pool_after = get_last_insn ();
8362 return pool;
8365 /* POOL holds the main literal pool as collected by s390_mainpool_start.
8366 Modify the current function to output the pool constants as well as
8367 the pool register setup instruction. */
8369 static void
8370 s390_mainpool_finish (struct constant_pool *pool)
8372 rtx base_reg = cfun->machine->base_reg;
8374 /* If the pool is empty, we're done. */
8375 if (pool->size == 0)
8377 /* We don't actually need a base register after all. */
8378 cfun->machine->base_reg = NULL_RTX;
8380 if (pool->pool_insn)
8381 remove_insn (pool->pool_insn);
8382 s390_free_pool (pool);
8383 return;
8386 /* We need correct insn addresses. */
8387 shorten_branches (get_insns ());
8389 /* On zSeries, we use a LARL to load the pool register. The pool is
8390 located in the .rodata section, so we emit it after the function. */
8391 if (TARGET_CPU_ZARCH)
8393 rtx set = gen_main_base_64 (base_reg, pool->label);
8394 rtx_insn *insn = emit_insn_after (set, pool->pool_insn);
8395 INSN_ADDRESSES_NEW (insn, -1);
8396 remove_insn (pool->pool_insn);
8398 insn = get_last_insn ();
8399 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
8400 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8402 s390_dump_pool (pool, 0);
8405 /* On S/390, if the total size of the function's code plus literal pool
8406 does not exceed 4096 bytes, we use BASR to set up a function base
8407 pointer, and emit the literal pool at the end of the function. */
8408 else if (INSN_ADDRESSES (INSN_UID (pool->emit_pool_after))
8409 + pool->size + 8 /* alignment slop */ < 4096)
8411 rtx set = gen_main_base_31_small (base_reg, pool->label);
8412 rtx_insn *insn = emit_insn_after (set, pool->pool_insn);
8413 INSN_ADDRESSES_NEW (insn, -1);
8414 remove_insn (pool->pool_insn);
8416 insn = emit_label_after (pool->label, insn);
8417 INSN_ADDRESSES_NEW (insn, -1);
8419 /* emit_pool_after will be set by s390_mainpool_start to the
8420 last insn of the section where the literal pool should be
8421 emitted. */
8422 insn = pool->emit_pool_after;
8424 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
8425 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8427 s390_dump_pool (pool, 1);
8430 /* Otherwise, we emit an inline literal pool and use BASR to branch
8431 over it, setting up the pool register at the same time. */
8432 else
8434 rtx_code_label *pool_end = gen_label_rtx ();
8436 rtx pat = gen_main_base_31_large (base_reg, pool->label, pool_end);
8437 rtx_insn *insn = emit_jump_insn_after (pat, pool->pool_insn);
8438 JUMP_LABEL (insn) = pool_end;
8439 INSN_ADDRESSES_NEW (insn, -1);
8440 remove_insn (pool->pool_insn);
8442 insn = emit_label_after (pool->label, insn);
8443 INSN_ADDRESSES_NEW (insn, -1);
8445 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
8446 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8448 insn = emit_label_after (pool_end, pool->pool_insn);
8449 INSN_ADDRESSES_NEW (insn, -1);
8451 s390_dump_pool (pool, 1);
8455 /* Replace all literal pool references. */
8457 for (rtx_insn *insn = get_insns (); insn; insn = NEXT_INSN (insn))
8459 if (INSN_P (insn))
8460 replace_ltrel_base (&PATTERN (insn));
8462 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
8464 rtx addr, pool_ref = NULL_RTX;
8465 find_constant_pool_ref (PATTERN (insn), &pool_ref);
8466 if (pool_ref)
8468 if (s390_execute_label (insn))
8469 addr = s390_find_execute (pool, insn);
8470 else
8471 addr = s390_find_constant (pool, get_pool_constant (pool_ref),
8472 get_pool_mode (pool_ref));
8474 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
8475 INSN_CODE (insn) = -1;
8481 /* Free the pool. */
8482 s390_free_pool (pool);
8485 /* POOL holds the main literal pool as collected by s390_mainpool_start.
8486 We have decided we cannot use this pool, so revert all changes
8487 to the current function that were done by s390_mainpool_start. */
8488 static void
8489 s390_mainpool_cancel (struct constant_pool *pool)
8491 /* We didn't actually change the instruction stream, so simply
8492 free the pool memory. */
8493 s390_free_pool (pool);
8497 /* Chunkify the literal pool. */
8499 #define S390_POOL_CHUNK_MIN 0xc00
8500 #define S390_POOL_CHUNK_MAX 0xe00
8502 static struct constant_pool *
8503 s390_chunkify_start (void)
8505 struct constant_pool *curr_pool = NULL, *pool_list = NULL;
8506 int extra_size = 0;
8507 bitmap far_labels;
8508 rtx pending_ltrel = NULL_RTX;
8509 rtx_insn *insn;
8511 rtx (*gen_reload_base) (rtx, rtx) =
8512 TARGET_CPU_ZARCH? gen_reload_base_64 : gen_reload_base_31;
8515 /* We need correct insn addresses. */
8517 shorten_branches (get_insns ());
8519 /* Scan all insns and move literals to pool chunks. */
8521 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8523 bool section_switch_p = false;
8525 /* Check for pending LTREL_BASE. */
8526 if (INSN_P (insn))
8528 rtx ltrel_base = find_ltrel_base (PATTERN (insn));
8529 if (ltrel_base)
8531 gcc_assert (ltrel_base == pending_ltrel);
8532 pending_ltrel = NULL_RTX;
8536 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
8538 if (!curr_pool)
8539 curr_pool = s390_start_pool (&pool_list, insn);
8541 s390_add_execute (curr_pool, insn);
8542 s390_add_pool_insn (curr_pool, insn);
8544 else if (NONJUMP_INSN_P (insn) || CALL_P (insn))
8546 rtx pool_ref = NULL_RTX;
8547 find_constant_pool_ref (PATTERN (insn), &pool_ref);
8548 if (pool_ref)
8550 rtx constant = get_pool_constant (pool_ref);
8551 machine_mode mode = get_pool_mode (pool_ref);
8553 if (!curr_pool)
8554 curr_pool = s390_start_pool (&pool_list, insn);
8556 s390_add_constant (curr_pool, constant, mode);
8557 s390_add_pool_insn (curr_pool, insn);
8559 /* Don't split the pool chunk between a LTREL_OFFSET load
8560 and the corresponding LTREL_BASE. */
8561 if (GET_CODE (constant) == CONST
8562 && GET_CODE (XEXP (constant, 0)) == UNSPEC
8563 && XINT (XEXP (constant, 0), 1) == UNSPEC_LTREL_OFFSET)
8565 gcc_assert (!pending_ltrel);
8566 pending_ltrel = pool_ref;
8571 if (JUMP_P (insn) || JUMP_TABLE_DATA_P (insn) || LABEL_P (insn))
8573 if (curr_pool)
8574 s390_add_pool_insn (curr_pool, insn);
8575 /* An LTREL_BASE must follow within the same basic block. */
8576 gcc_assert (!pending_ltrel);
8579 if (NOTE_P (insn))
8580 switch (NOTE_KIND (insn))
8582 case NOTE_INSN_SWITCH_TEXT_SECTIONS:
8583 section_switch_p = true;
8584 break;
8585 case NOTE_INSN_VAR_LOCATION:
8586 case NOTE_INSN_CALL_ARG_LOCATION:
8587 continue;
8588 default:
8589 break;
8592 if (!curr_pool
8593 || INSN_ADDRESSES_SIZE () <= (size_t) INSN_UID (insn)
8594 || INSN_ADDRESSES (INSN_UID (insn)) == -1)
8595 continue;
8597 if (TARGET_CPU_ZARCH)
8599 if (curr_pool->size < S390_POOL_CHUNK_MAX)
8600 continue;
8602 s390_end_pool (curr_pool, NULL);
8603 curr_pool = NULL;
8605 else
8607 int chunk_size = INSN_ADDRESSES (INSN_UID (insn))
8608 - INSN_ADDRESSES (INSN_UID (curr_pool->first_insn))
8609 + extra_size;
8611 /* We will later have to insert base register reload insns.
8612 Those will have an effect on code size, which we need to
8613 consider here. This calculation makes rather pessimistic
8614 worst-case assumptions. */
8615 if (LABEL_P (insn))
8616 extra_size += 6;
8618 if (chunk_size < S390_POOL_CHUNK_MIN
8619 && curr_pool->size < S390_POOL_CHUNK_MIN
8620 && !section_switch_p)
8621 continue;
8623 /* Pool chunks can only be inserted after BARRIERs ... */
8624 if (BARRIER_P (insn))
8626 s390_end_pool (curr_pool, insn);
8627 curr_pool = NULL;
8628 extra_size = 0;
8631 /* ... so if we don't find one in time, create one. */
8632 else if (chunk_size > S390_POOL_CHUNK_MAX
8633 || curr_pool->size > S390_POOL_CHUNK_MAX
8634 || section_switch_p)
8636 rtx_insn *label, *jump, *barrier, *next, *prev;
8638 if (!section_switch_p)
8640 /* We can insert the barrier only after a 'real' insn. */
8641 if (! NONJUMP_INSN_P (insn) && ! CALL_P (insn))
8642 continue;
8643 if (get_attr_length (insn) == 0)
8644 continue;
8645 /* Don't separate LTREL_BASE from the corresponding
8646 LTREL_OFFSET load. */
8647 if (pending_ltrel)
8648 continue;
8649 next = insn;
8652 insn = next;
8653 next = NEXT_INSN (insn);
8655 while (next
8656 && NOTE_P (next)
8657 && (NOTE_KIND (next) == NOTE_INSN_VAR_LOCATION
8658 || NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION));
8660 else
8662 gcc_assert (!pending_ltrel);
8664 /* The old pool has to end before the section switch
8665 note in order to make it part of the current
8666 section. */
8667 insn = PREV_INSN (insn);
8670 label = gen_label_rtx ();
8671 prev = insn;
8672 if (prev && NOTE_P (prev))
8673 prev = prev_nonnote_insn (prev);
8674 if (prev)
8675 jump = emit_jump_insn_after_setloc (gen_jump (label), insn,
8676 INSN_LOCATION (prev));
8677 else
8678 jump = emit_jump_insn_after_noloc (gen_jump (label), insn);
8679 barrier = emit_barrier_after (jump);
8680 insn = emit_label_after (label, barrier);
8681 JUMP_LABEL (jump) = label;
8682 LABEL_NUSES (label) = 1;
8684 INSN_ADDRESSES_NEW (jump, -1);
8685 INSN_ADDRESSES_NEW (barrier, -1);
8686 INSN_ADDRESSES_NEW (insn, -1);
8688 s390_end_pool (curr_pool, barrier);
8689 curr_pool = NULL;
8690 extra_size = 0;
8695 if (curr_pool)
8696 s390_end_pool (curr_pool, NULL);
8697 gcc_assert (!pending_ltrel);
8699 /* Find all labels that are branched into
8700 from an insn belonging to a different chunk. */
8702 far_labels = BITMAP_ALLOC (NULL);
8704 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8706 rtx_jump_table_data *table;
8708 /* Labels marked with LABEL_PRESERVE_P can be target
8709 of non-local jumps, so we have to mark them.
8710 The same holds for named labels.
8712 Don't do that, however, if it is the label before
8713 a jump table. */
8715 if (LABEL_P (insn)
8716 && (LABEL_PRESERVE_P (insn) || LABEL_NAME (insn)))
8718 rtx_insn *vec_insn = NEXT_INSN (insn);
8719 if (! vec_insn || ! JUMP_TABLE_DATA_P (vec_insn))
8720 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (insn));
8722 /* Check potential targets in a table jump (casesi_jump). */
8723 else if (tablejump_p (insn, NULL, &table))
8725 rtx vec_pat = PATTERN (table);
8726 int i, diff_p = GET_CODE (vec_pat) == ADDR_DIFF_VEC;
8728 for (i = 0; i < XVECLEN (vec_pat, diff_p); i++)
8730 rtx label = XEXP (XVECEXP (vec_pat, diff_p, i), 0);
8732 if (s390_find_pool (pool_list, label)
8733 != s390_find_pool (pool_list, insn))
8734 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
8737 /* If we have a direct jump (conditional or unconditional),
8738 check all potential targets. */
8739 else if (JUMP_P (insn))
8741 rtx pat = PATTERN (insn);
8743 if (GET_CODE (pat) == PARALLEL)
8744 pat = XVECEXP (pat, 0, 0);
8746 if (GET_CODE (pat) == SET)
8748 rtx label = JUMP_LABEL (insn);
8749 if (label && !ANY_RETURN_P (label))
8751 if (s390_find_pool (pool_list, label)
8752 != s390_find_pool (pool_list, insn))
8753 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
8759 /* Insert base register reload insns before every pool. */
8761 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
8763 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
8764 curr_pool->label);
8765 rtx_insn *insn = curr_pool->first_insn;
8766 INSN_ADDRESSES_NEW (emit_insn_before (new_insn, insn), -1);
8769 /* Insert base register reload insns at every far label. */
8771 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8772 if (LABEL_P (insn)
8773 && bitmap_bit_p (far_labels, CODE_LABEL_NUMBER (insn)))
8775 struct constant_pool *pool = s390_find_pool (pool_list, insn);
8776 if (pool)
8778 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
8779 pool->label);
8780 INSN_ADDRESSES_NEW (emit_insn_after (new_insn, insn), -1);
8785 BITMAP_FREE (far_labels);
8788 /* Recompute insn addresses. */
8790 init_insn_lengths ();
8791 shorten_branches (get_insns ());
8793 return pool_list;
8796 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
8797 After we have decided to use this list, finish implementing
8798 all changes to the current function as required. */
8800 static void
8801 s390_chunkify_finish (struct constant_pool *pool_list)
8803 struct constant_pool *curr_pool = NULL;
8804 rtx_insn *insn;
8807 /* Replace all literal pool references. */
8809 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8811 if (INSN_P (insn))
8812 replace_ltrel_base (&PATTERN (insn));
8814 curr_pool = s390_find_pool (pool_list, insn);
8815 if (!curr_pool)
8816 continue;
8818 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
8820 rtx addr, pool_ref = NULL_RTX;
8821 find_constant_pool_ref (PATTERN (insn), &pool_ref);
8822 if (pool_ref)
8824 if (s390_execute_label (insn))
8825 addr = s390_find_execute (curr_pool, insn);
8826 else
8827 addr = s390_find_constant (curr_pool,
8828 get_pool_constant (pool_ref),
8829 get_pool_mode (pool_ref));
8831 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
8832 INSN_CODE (insn) = -1;
8837 /* Dump out all literal pools. */
8839 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
8840 s390_dump_pool (curr_pool, 0);
8842 /* Free pool list. */
8844 while (pool_list)
8846 struct constant_pool *next = pool_list->next;
8847 s390_free_pool (pool_list);
8848 pool_list = next;
8852 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
8853 We have decided we cannot use this list, so revert all changes
8854 to the current function that were done by s390_chunkify_start. */
8856 static void
8857 s390_chunkify_cancel (struct constant_pool *pool_list)
8859 struct constant_pool *curr_pool = NULL;
8860 rtx_insn *insn;
8862 /* Remove all pool placeholder insns. */
8864 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
8866 /* Did we insert an extra barrier? Remove it. */
8867 rtx_insn *barrier = PREV_INSN (curr_pool->pool_insn);
8868 rtx_insn *jump = barrier? PREV_INSN (barrier) : NULL;
8869 rtx_insn *label = NEXT_INSN (curr_pool->pool_insn);
8871 if (jump && JUMP_P (jump)
8872 && barrier && BARRIER_P (barrier)
8873 && label && LABEL_P (label)
8874 && GET_CODE (PATTERN (jump)) == SET
8875 && SET_DEST (PATTERN (jump)) == pc_rtx
8876 && GET_CODE (SET_SRC (PATTERN (jump))) == LABEL_REF
8877 && XEXP (SET_SRC (PATTERN (jump)), 0) == label)
8879 remove_insn (jump);
8880 remove_insn (barrier);
8881 remove_insn (label);
8884 remove_insn (curr_pool->pool_insn);
8887 /* Remove all base register reload insns. */
8889 for (insn = get_insns (); insn; )
8891 rtx_insn *next_insn = NEXT_INSN (insn);
8893 if (NONJUMP_INSN_P (insn)
8894 && GET_CODE (PATTERN (insn)) == SET
8895 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
8896 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_RELOAD_BASE)
8897 remove_insn (insn);
8899 insn = next_insn;
8902 /* Free pool list. */
8904 while (pool_list)
8906 struct constant_pool *next = pool_list->next;
8907 s390_free_pool (pool_list);
8908 pool_list = next;
8912 /* Output the constant pool entry EXP in mode MODE with alignment ALIGN. */
8914 void
8915 s390_output_pool_entry (rtx exp, machine_mode mode, unsigned int align)
8917 switch (GET_MODE_CLASS (mode))
8919 case MODE_FLOAT:
8920 case MODE_DECIMAL_FLOAT:
8921 gcc_assert (GET_CODE (exp) == CONST_DOUBLE);
8923 assemble_real (*CONST_DOUBLE_REAL_VALUE (exp), mode, align);
8924 break;
8926 case MODE_INT:
8927 assemble_integer (exp, GET_MODE_SIZE (mode), align, 1);
8928 mark_symbol_refs_as_used (exp);
8929 break;
8931 case MODE_VECTOR_INT:
8932 case MODE_VECTOR_FLOAT:
8934 int i;
8935 machine_mode inner_mode;
8936 gcc_assert (GET_CODE (exp) == CONST_VECTOR);
8938 inner_mode = GET_MODE_INNER (GET_MODE (exp));
8939 for (i = 0; i < XVECLEN (exp, 0); i++)
8940 s390_output_pool_entry (XVECEXP (exp, 0, i),
8941 inner_mode,
8942 i == 0
8943 ? align
8944 : GET_MODE_BITSIZE (inner_mode));
8946 break;
8948 default:
8949 gcc_unreachable ();
8954 /* Return an RTL expression representing the value of the return address
8955 for the frame COUNT steps up from the current frame. FRAME is the
8956 frame pointer of that frame. */
8959 s390_return_addr_rtx (int count, rtx frame ATTRIBUTE_UNUSED)
8961 int offset;
8962 rtx addr;
8964 /* Without backchain, we fail for all but the current frame. */
8966 if (!TARGET_BACKCHAIN && count > 0)
8967 return NULL_RTX;
8969 /* For the current frame, we need to make sure the initial
8970 value of RETURN_REGNUM is actually saved. */
8972 if (count == 0)
8974 /* On non-z architectures branch splitting could overwrite r14. */
8975 if (TARGET_CPU_ZARCH)
8976 return get_hard_reg_initial_val (Pmode, RETURN_REGNUM);
8977 else
8979 cfun_frame_layout.save_return_addr_p = true;
8980 return gen_rtx_MEM (Pmode, return_address_pointer_rtx);
8984 if (TARGET_PACKED_STACK)
8985 offset = -2 * UNITS_PER_LONG;
8986 else
8987 offset = RETURN_REGNUM * UNITS_PER_LONG;
8989 addr = plus_constant (Pmode, frame, offset);
8990 addr = memory_address (Pmode, addr);
8991 return gen_rtx_MEM (Pmode, addr);
8994 /* Return an RTL expression representing the back chain stored in
8995 the current stack frame. */
8998 s390_back_chain_rtx (void)
9000 rtx chain;
9002 gcc_assert (TARGET_BACKCHAIN);
9004 if (TARGET_PACKED_STACK)
9005 chain = plus_constant (Pmode, stack_pointer_rtx,
9006 STACK_POINTER_OFFSET - UNITS_PER_LONG);
9007 else
9008 chain = stack_pointer_rtx;
9010 chain = gen_rtx_MEM (Pmode, chain);
9011 return chain;
9014 /* Find first call clobbered register unused in a function.
9015 This could be used as base register in a leaf function
9016 or for holding the return address before epilogue. */
9018 static int
9019 find_unused_clobbered_reg (void)
9021 int i;
9022 for (i = 0; i < 6; i++)
9023 if (!df_regs_ever_live_p (i))
9024 return i;
9025 return 0;
9029 /* Helper function for s390_regs_ever_clobbered. Sets the fields in DATA for all
9030 clobbered hard regs in SETREG. */
9032 static void
9033 s390_reg_clobbered_rtx (rtx setreg, const_rtx set_insn ATTRIBUTE_UNUSED, void *data)
9035 char *regs_ever_clobbered = (char *)data;
9036 unsigned int i, regno;
9037 machine_mode mode = GET_MODE (setreg);
9039 if (GET_CODE (setreg) == SUBREG)
9041 rtx inner = SUBREG_REG (setreg);
9042 if (!GENERAL_REG_P (inner) && !FP_REG_P (inner))
9043 return;
9044 regno = subreg_regno (setreg);
9046 else if (GENERAL_REG_P (setreg) || FP_REG_P (setreg))
9047 regno = REGNO (setreg);
9048 else
9049 return;
9051 for (i = regno;
9052 i < regno + HARD_REGNO_NREGS (regno, mode);
9053 i++)
9054 regs_ever_clobbered[i] = 1;
9057 /* Walks through all basic blocks of the current function looking
9058 for clobbered hard regs using s390_reg_clobbered_rtx. The fields
9059 of the passed integer array REGS_EVER_CLOBBERED are set to one for
9060 each of those regs. */
9062 static void
9063 s390_regs_ever_clobbered (char regs_ever_clobbered[])
9065 basic_block cur_bb;
9066 rtx_insn *cur_insn;
9067 unsigned int i;
9069 memset (regs_ever_clobbered, 0, 32);
9071 /* For non-leaf functions we have to consider all call clobbered regs to be
9072 clobbered. */
9073 if (!crtl->is_leaf)
9075 for (i = 0; i < 32; i++)
9076 regs_ever_clobbered[i] = call_really_used_regs[i];
9079 /* Make the "magic" eh_return registers live if necessary. For regs_ever_live
9080 this work is done by liveness analysis (mark_regs_live_at_end).
9081 Special care is needed for functions containing landing pads. Landing pads
9082 may use the eh registers, but the code which sets these registers is not
9083 contained in that function. Hence s390_regs_ever_clobbered is not able to
9084 deal with this automatically. */
9085 if (crtl->calls_eh_return || cfun->machine->has_landing_pad_p)
9086 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM ; i++)
9087 if (crtl->calls_eh_return
9088 || (cfun->machine->has_landing_pad_p
9089 && df_regs_ever_live_p (EH_RETURN_DATA_REGNO (i))))
9090 regs_ever_clobbered[EH_RETURN_DATA_REGNO (i)] = 1;
9092 /* For nonlocal gotos all call-saved registers have to be saved.
9093 This flag is also set for the unwinding code in libgcc.
9094 See expand_builtin_unwind_init. For regs_ever_live this is done by
9095 reload. */
9096 if (crtl->saves_all_registers)
9097 for (i = 0; i < 32; i++)
9098 if (!call_really_used_regs[i])
9099 regs_ever_clobbered[i] = 1;
9101 FOR_EACH_BB_FN (cur_bb, cfun)
9103 FOR_BB_INSNS (cur_bb, cur_insn)
9105 rtx pat;
9107 if (!INSN_P (cur_insn))
9108 continue;
9110 pat = PATTERN (cur_insn);
9112 /* Ignore GPR restore insns. */
9113 if (epilogue_completed && RTX_FRAME_RELATED_P (cur_insn))
9115 if (GET_CODE (pat) == SET
9116 && GENERAL_REG_P (SET_DEST (pat)))
9118 /* lgdr */
9119 if (GET_MODE (SET_SRC (pat)) == DImode
9120 && FP_REG_P (SET_SRC (pat)))
9121 continue;
9123 /* l / lg */
9124 if (GET_CODE (SET_SRC (pat)) == MEM)
9125 continue;
9128 /* lm / lmg */
9129 if (GET_CODE (pat) == PARALLEL
9130 && load_multiple_operation (pat, VOIDmode))
9131 continue;
9134 note_stores (pat,
9135 s390_reg_clobbered_rtx,
9136 regs_ever_clobbered);
9141 /* Determine the frame area which actually has to be accessed
9142 in the function epilogue. The values are stored at the
9143 given pointers AREA_BOTTOM (address of the lowest used stack
9144 address) and AREA_TOP (address of the first item which does
9145 not belong to the stack frame). */
9147 static void
9148 s390_frame_area (int *area_bottom, int *area_top)
9150 int b, t;
9152 b = INT_MAX;
9153 t = INT_MIN;
9155 if (cfun_frame_layout.first_restore_gpr != -1)
9157 b = (cfun_frame_layout.gprs_offset
9158 + cfun_frame_layout.first_restore_gpr * UNITS_PER_LONG);
9159 t = b + (cfun_frame_layout.last_restore_gpr
9160 - cfun_frame_layout.first_restore_gpr + 1) * UNITS_PER_LONG;
9163 if (TARGET_64BIT && cfun_save_high_fprs_p)
9165 b = MIN (b, cfun_frame_layout.f8_offset);
9166 t = MAX (t, (cfun_frame_layout.f8_offset
9167 + cfun_frame_layout.high_fprs * 8));
9170 if (!TARGET_64BIT)
9172 if (cfun_fpr_save_p (FPR4_REGNUM))
9174 b = MIN (b, cfun_frame_layout.f4_offset);
9175 t = MAX (t, cfun_frame_layout.f4_offset + 8);
9177 if (cfun_fpr_save_p (FPR6_REGNUM))
9179 b = MIN (b, cfun_frame_layout.f4_offset + 8);
9180 t = MAX (t, cfun_frame_layout.f4_offset + 16);
9183 *area_bottom = b;
9184 *area_top = t;
9186 /* Update gpr_save_slots in the frame layout trying to make use of
9187 FPRs as GPR save slots.
9188 This is a helper routine of s390_register_info. */
9190 static void
9191 s390_register_info_gprtofpr ()
9193 int save_reg_slot = FPR0_REGNUM;
9194 int i, j;
9196 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
9197 return;
9199 for (i = 15; i >= 6; i--)
9201 if (cfun_gpr_save_slot (i) == 0)
9202 continue;
9204 /* Advance to the next FP register which can be used as a
9205 GPR save slot. */
9206 while ((!call_really_used_regs[save_reg_slot]
9207 || df_regs_ever_live_p (save_reg_slot)
9208 || cfun_fpr_save_p (save_reg_slot))
9209 && FP_REGNO_P (save_reg_slot))
9210 save_reg_slot++;
9211 if (!FP_REGNO_P (save_reg_slot))
9213 /* We only want to use ldgr/lgdr if we can get rid of
9214 stm/lm entirely. So undo the gpr slot allocation in
9215 case we ran out of FPR save slots. */
9216 for (j = 6; j <= 15; j++)
9217 if (FP_REGNO_P (cfun_gpr_save_slot (j)))
9218 cfun_gpr_save_slot (j) = -1;
9219 break;
9221 cfun_gpr_save_slot (i) = save_reg_slot++;
9225 /* Set the bits in fpr_bitmap for FPRs which need to be saved due to
9226 stdarg.
9227 This is a helper routine for s390_register_info. */
9229 static void
9230 s390_register_info_stdarg_fpr ()
9232 int i;
9233 int min_fpr;
9234 int max_fpr;
9236 /* Save the FP argument regs for stdarg. f0, f2 for 31 bit and
9237 f0-f4 for 64 bit. */
9238 if (!cfun->stdarg
9239 || !TARGET_HARD_FLOAT
9240 || !cfun->va_list_fpr_size
9241 || crtl->args.info.fprs >= FP_ARG_NUM_REG)
9242 return;
9244 min_fpr = crtl->args.info.fprs;
9245 max_fpr = min_fpr + cfun->va_list_fpr_size;
9246 if (max_fpr > FP_ARG_NUM_REG)
9247 max_fpr = FP_ARG_NUM_REG;
9249 for (i = min_fpr; i < max_fpr; i++)
9250 cfun_set_fpr_save (i + FPR0_REGNUM);
9253 /* Reserve the GPR save slots for GPRs which need to be saved due to
9254 stdarg.
9255 This is a helper routine for s390_register_info. */
9257 static void
9258 s390_register_info_stdarg_gpr ()
9260 int i;
9261 int min_gpr;
9262 int max_gpr;
9264 if (!cfun->stdarg
9265 || !cfun->va_list_gpr_size
9266 || crtl->args.info.gprs >= GP_ARG_NUM_REG)
9267 return;
9269 min_gpr = crtl->args.info.gprs;
9270 max_gpr = min_gpr + cfun->va_list_gpr_size;
9271 if (max_gpr > GP_ARG_NUM_REG)
9272 max_gpr = GP_ARG_NUM_REG;
9274 for (i = min_gpr; i < max_gpr; i++)
9275 cfun_gpr_save_slot (2 + i) = -1;
9278 /* The GPR and FPR save slots in cfun->machine->frame_layout are set
9279 for registers which need to be saved in function prologue.
9280 This function can be used until the insns emitted for save/restore
9281 of the regs are visible in the RTL stream. */
9283 static void
9284 s390_register_info ()
9286 int i, j;
9287 char clobbered_regs[32];
9289 gcc_assert (!epilogue_completed);
9291 if (reload_completed)
9292 /* After reload we rely on our own routine to determine which
9293 registers need saving. */
9294 s390_regs_ever_clobbered (clobbered_regs);
9295 else
9296 /* During reload we use regs_ever_live as a base since reload
9297 does changes in there which we otherwise would not be aware
9298 of. */
9299 for (i = 0; i < 32; i++)
9300 clobbered_regs[i] = df_regs_ever_live_p (i);
9302 for (i = 0; i < 32; i++)
9303 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i];
9305 /* Mark the call-saved FPRs which need to be saved.
9306 This needs to be done before checking the special GPRs since the
9307 stack pointer usage depends on whether high FPRs have to be saved
9308 or not. */
9309 cfun_frame_layout.fpr_bitmap = 0;
9310 cfun_frame_layout.high_fprs = 0;
9311 for (i = FPR0_REGNUM; i <= FPR15_REGNUM; i++)
9312 if (clobbered_regs[i] && !call_really_used_regs[i])
9314 cfun_set_fpr_save (i);
9315 if (i >= FPR8_REGNUM)
9316 cfun_frame_layout.high_fprs++;
9319 if (flag_pic)
9320 clobbered_regs[PIC_OFFSET_TABLE_REGNUM]
9321 |= !!df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM);
9323 clobbered_regs[BASE_REGNUM]
9324 |= (cfun->machine->base_reg
9325 && REGNO (cfun->machine->base_reg) == BASE_REGNUM);
9327 clobbered_regs[HARD_FRAME_POINTER_REGNUM]
9328 |= !!frame_pointer_needed;
9330 /* On pre z900 machines this might take until machine dependent
9331 reorg to decide.
9332 save_return_addr_p will only be set on non-zarch machines so
9333 there is no risk that r14 goes into an FPR instead of a stack
9334 slot. */
9335 clobbered_regs[RETURN_REGNUM]
9336 |= (!crtl->is_leaf
9337 || TARGET_TPF_PROFILING
9338 || cfun->machine->split_branches_pending_p
9339 || cfun_frame_layout.save_return_addr_p
9340 || crtl->calls_eh_return);
9342 clobbered_regs[STACK_POINTER_REGNUM]
9343 |= (!crtl->is_leaf
9344 || TARGET_TPF_PROFILING
9345 || cfun_save_high_fprs_p
9346 || get_frame_size () > 0
9347 || (reload_completed && cfun_frame_layout.frame_size > 0)
9348 || cfun->calls_alloca);
9350 memset (cfun_frame_layout.gpr_save_slots, 0, 16);
9352 for (i = 6; i < 16; i++)
9353 if (clobbered_regs[i])
9354 cfun_gpr_save_slot (i) = -1;
9356 s390_register_info_stdarg_fpr ();
9357 s390_register_info_gprtofpr ();
9359 /* First find the range of GPRs to be restored. Vararg regs don't
9360 need to be restored so we do it before assigning slots to the
9361 vararg GPRs. */
9362 for (i = 0; i < 16 && cfun_gpr_save_slot (i) != -1; i++);
9363 for (j = 15; j > i && cfun_gpr_save_slot (j) != -1; j--);
9364 cfun_frame_layout.first_restore_gpr = (i == 16) ? -1 : i;
9365 cfun_frame_layout.last_restore_gpr = (i == 16) ? -1 : j;
9367 /* stdarg functions might need to save GPRs 2 to 6. This might
9368 override the GPR->FPR save decision made above for r6 since
9369 vararg regs must go to the stack. */
9370 s390_register_info_stdarg_gpr ();
9372 /* Now the range of GPRs which need saving. */
9373 for (i = 0; i < 16 && cfun_gpr_save_slot (i) != -1; i++);
9374 for (j = 15; j > i && cfun_gpr_save_slot (j) != -1; j--);
9375 cfun_frame_layout.first_save_gpr = (i == 16) ? -1 : i;
9376 cfun_frame_layout.last_save_gpr = (i == 16) ? -1 : j;
9379 /* This function is called by s390_optimize_prologue in order to get
9380 rid of unnecessary GPR save/restore instructions. The register info
9381 for the GPRs is re-computed and the ranges are re-calculated. */
9383 static void
9384 s390_optimize_register_info ()
9386 char clobbered_regs[32];
9387 int i, j;
9389 gcc_assert (epilogue_completed);
9390 gcc_assert (!cfun->machine->split_branches_pending_p);
9392 s390_regs_ever_clobbered (clobbered_regs);
9394 for (i = 0; i < 32; i++)
9395 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i];
9397 /* There is still special treatment needed for cases invisible to
9398 s390_regs_ever_clobbered. */
9399 clobbered_regs[RETURN_REGNUM]
9400 |= (TARGET_TPF_PROFILING
9401 /* When expanding builtin_return_addr in ESA mode we do not
9402 know whether r14 will later be needed as scratch reg when
9403 doing branch splitting. So the builtin always accesses the
9404 r14 save slot and we need to stick to the save/restore
9405 decision for r14 even if it turns out that it didn't get
9406 clobbered. */
9407 || cfun_frame_layout.save_return_addr_p
9408 || crtl->calls_eh_return);
9410 memset (cfun_frame_layout.gpr_save_slots, 0, 6);
9412 for (i = 6; i < 16; i++)
9413 if (!clobbered_regs[i])
9414 cfun_gpr_save_slot (i) = 0;
9416 for (i = 0; i < 16 && cfun_gpr_save_slot (i) != -1; i++);
9417 for (j = 15; j > i && cfun_gpr_save_slot (j) != -1; j--);
9418 cfun_frame_layout.first_restore_gpr = (i == 16) ? -1 : i;
9419 cfun_frame_layout.last_restore_gpr = (i == 16) ? -1 : j;
9421 s390_register_info_stdarg_gpr ();
9423 for (i = 0; i < 16 && cfun_gpr_save_slot (i) != -1; i++);
9424 for (j = 15; j > i && cfun_gpr_save_slot (j) != -1; j--);
9425 cfun_frame_layout.first_save_gpr = (i == 16) ? -1 : i;
9426 cfun_frame_layout.last_save_gpr = (i == 16) ? -1 : j;
9429 /* Fill cfun->machine with info about frame of current function. */
9431 static void
9432 s390_frame_info (void)
9434 HOST_WIDE_INT lowest_offset;
9436 cfun_frame_layout.first_save_gpr_slot = cfun_frame_layout.first_save_gpr;
9437 cfun_frame_layout.last_save_gpr_slot = cfun_frame_layout.last_save_gpr;
9439 /* The va_arg builtin uses a constant distance of 16 *
9440 UNITS_PER_LONG (r0-r15) to reach the FPRs from the reg_save_area
9441 pointer. So even if we are going to save the stack pointer in an
9442 FPR we need the stack space in order to keep the offsets
9443 correct. */
9444 if (cfun->stdarg && cfun_save_arg_fprs_p)
9446 cfun_frame_layout.last_save_gpr_slot = STACK_POINTER_REGNUM;
9448 if (cfun_frame_layout.first_save_gpr_slot == -1)
9449 cfun_frame_layout.first_save_gpr_slot = STACK_POINTER_REGNUM;
9452 cfun_frame_layout.frame_size = get_frame_size ();
9453 if (!TARGET_64BIT && cfun_frame_layout.frame_size > 0x7fff0000)
9454 fatal_error (input_location,
9455 "total size of local variables exceeds architecture limit");
9457 if (!TARGET_PACKED_STACK)
9459 /* Fixed stack layout. */
9460 cfun_frame_layout.backchain_offset = 0;
9461 cfun_frame_layout.f0_offset = 16 * UNITS_PER_LONG;
9462 cfun_frame_layout.f4_offset = cfun_frame_layout.f0_offset + 2 * 8;
9463 cfun_frame_layout.f8_offset = -cfun_frame_layout.high_fprs * 8;
9464 cfun_frame_layout.gprs_offset = (cfun_frame_layout.first_save_gpr_slot
9465 * UNITS_PER_LONG);
9467 else if (TARGET_BACKCHAIN)
9469 /* Kernel stack layout - packed stack, backchain, no float */
9470 gcc_assert (TARGET_SOFT_FLOAT);
9471 cfun_frame_layout.backchain_offset = (STACK_POINTER_OFFSET
9472 - UNITS_PER_LONG);
9474 /* The distance between the backchain and the return address
9475 save slot must not change. So we always need a slot for the
9476 stack pointer which resides in between. */
9477 cfun_frame_layout.last_save_gpr_slot = STACK_POINTER_REGNUM;
9479 cfun_frame_layout.gprs_offset
9480 = cfun_frame_layout.backchain_offset - cfun_gprs_save_area_size;
9482 /* FPRs will not be saved. Nevertheless pick sane values to
9483 keep area calculations valid. */
9484 cfun_frame_layout.f0_offset =
9485 cfun_frame_layout.f4_offset =
9486 cfun_frame_layout.f8_offset = cfun_frame_layout.gprs_offset;
9488 else
9490 int num_fprs;
9492 /* Packed stack layout without backchain. */
9494 /* With stdarg FPRs need their dedicated slots. */
9495 num_fprs = (TARGET_64BIT && cfun->stdarg ? 2
9496 : (cfun_fpr_save_p (FPR4_REGNUM) +
9497 cfun_fpr_save_p (FPR6_REGNUM)));
9498 cfun_frame_layout.f4_offset = STACK_POINTER_OFFSET - 8 * num_fprs;
9500 num_fprs = (cfun->stdarg ? 2
9501 : (cfun_fpr_save_p (FPR0_REGNUM)
9502 + cfun_fpr_save_p (FPR2_REGNUM)));
9503 cfun_frame_layout.f0_offset = cfun_frame_layout.f4_offset - 8 * num_fprs;
9505 cfun_frame_layout.gprs_offset
9506 = cfun_frame_layout.f0_offset - cfun_gprs_save_area_size;
9508 cfun_frame_layout.f8_offset = (cfun_frame_layout.gprs_offset
9509 - cfun_frame_layout.high_fprs * 8);
9512 if (cfun_save_high_fprs_p)
9513 cfun_frame_layout.frame_size += cfun_frame_layout.high_fprs * 8;
9515 if (!crtl->is_leaf)
9516 cfun_frame_layout.frame_size += crtl->outgoing_args_size;
9518 /* In the following cases we have to allocate a STACK_POINTER_OFFSET
9519 sized area at the bottom of the stack. This is required also for
9520 leaf functions. When GCC generates a local stack reference it
9521 will always add STACK_POINTER_OFFSET to all these references. */
9522 if (crtl->is_leaf
9523 && !TARGET_TPF_PROFILING
9524 && cfun_frame_layout.frame_size == 0
9525 && !cfun->calls_alloca)
9526 return;
9528 /* Calculate the number of bytes we have used in our own register
9529 save area. With the packed stack layout we can re-use the
9530 remaining bytes for normal stack elements. */
9532 if (TARGET_PACKED_STACK)
9533 lowest_offset = MIN (MIN (cfun_frame_layout.f0_offset,
9534 cfun_frame_layout.f4_offset),
9535 cfun_frame_layout.gprs_offset);
9536 else
9537 lowest_offset = 0;
9539 if (TARGET_BACKCHAIN)
9540 lowest_offset = MIN (lowest_offset, cfun_frame_layout.backchain_offset);
9542 cfun_frame_layout.frame_size += STACK_POINTER_OFFSET - lowest_offset;
9544 /* If under 31 bit an odd number of gprs has to be saved we have to
9545 adjust the frame size to sustain 8 byte alignment of stack
9546 frames. */
9547 cfun_frame_layout.frame_size = ((cfun_frame_layout.frame_size +
9548 STACK_BOUNDARY / BITS_PER_UNIT - 1)
9549 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1));
9552 /* Generate frame layout. Fills in register and frame data for the current
9553 function in cfun->machine. This routine can be called multiple times;
9554 it will re-do the complete frame layout every time. */
9556 static void
9557 s390_init_frame_layout (void)
9559 HOST_WIDE_INT frame_size;
9560 int base_used;
9562 /* After LRA the frame layout is supposed to be read-only and should
9563 not be re-computed. */
9564 if (reload_completed)
9565 return;
9567 /* On S/390 machines, we may need to perform branch splitting, which
9568 will require both base and return address register. We have no
9569 choice but to assume we're going to need them until right at the
9570 end of the machine dependent reorg phase. */
9571 if (!TARGET_CPU_ZARCH)
9572 cfun->machine->split_branches_pending_p = true;
9576 frame_size = cfun_frame_layout.frame_size;
9578 /* Try to predict whether we'll need the base register. */
9579 base_used = cfun->machine->split_branches_pending_p
9580 || crtl->uses_const_pool
9581 || (!DISP_IN_RANGE (frame_size)
9582 && !CONST_OK_FOR_K (frame_size));
9584 /* Decide which register to use as literal pool base. In small
9585 leaf functions, try to use an unused call-clobbered register
9586 as base register to avoid save/restore overhead. */
9587 if (!base_used)
9588 cfun->machine->base_reg = NULL_RTX;
9589 else
9591 int br = 0;
9593 if (crtl->is_leaf)
9594 /* Prefer r5 (most likely to be free). */
9595 for (br = 5; br >= 2 && df_regs_ever_live_p (br); br--)
9597 cfun->machine->base_reg =
9598 gen_rtx_REG (Pmode, (br >= 2) ? br : BASE_REGNUM);
9601 s390_register_info ();
9602 s390_frame_info ();
9604 while (frame_size != cfun_frame_layout.frame_size);
9607 /* Remove the FPR clobbers from a tbegin insn if it can be proven that
9608 the TX is nonescaping. A transaction is considered escaping if
9609 there is at least one path from tbegin returning CC0 to the
9610 function exit block without an tend.
9612 The check so far has some limitations:
9613 - only single tbegin/tend BBs are supported
9614 - the first cond jump after tbegin must separate the CC0 path from ~CC0
9615 - when CC is copied to a GPR and the CC0 check is done with the GPR
9616 this is not supported
9619 static void
9620 s390_optimize_nonescaping_tx (void)
9622 const unsigned int CC0 = 1 << 3;
9623 basic_block tbegin_bb = NULL;
9624 basic_block tend_bb = NULL;
9625 basic_block bb;
9626 rtx_insn *insn;
9627 bool result = true;
9628 int bb_index;
9629 rtx_insn *tbegin_insn = NULL;
9631 if (!cfun->machine->tbegin_p)
9632 return;
9634 for (bb_index = 0; bb_index < n_basic_blocks_for_fn (cfun); bb_index++)
9636 bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
9638 if (!bb)
9639 continue;
9641 FOR_BB_INSNS (bb, insn)
9643 rtx ite, cc, pat, target;
9644 unsigned HOST_WIDE_INT mask;
9646 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
9647 continue;
9649 pat = PATTERN (insn);
9651 if (GET_CODE (pat) == PARALLEL)
9652 pat = XVECEXP (pat, 0, 0);
9654 if (GET_CODE (pat) != SET
9655 || GET_CODE (SET_SRC (pat)) != UNSPEC_VOLATILE)
9656 continue;
9658 if (XINT (SET_SRC (pat), 1) == UNSPECV_TBEGIN)
9660 rtx_insn *tmp;
9662 tbegin_insn = insn;
9664 /* Just return if the tbegin doesn't have clobbers. */
9665 if (GET_CODE (PATTERN (insn)) != PARALLEL)
9666 return;
9668 if (tbegin_bb != NULL)
9669 return;
9671 /* Find the next conditional jump. */
9672 for (tmp = NEXT_INSN (insn);
9673 tmp != NULL_RTX;
9674 tmp = NEXT_INSN (tmp))
9676 if (reg_set_p (gen_rtx_REG (CCmode, CC_REGNUM), tmp))
9677 return;
9678 if (!JUMP_P (tmp))
9679 continue;
9681 ite = SET_SRC (PATTERN (tmp));
9682 if (GET_CODE (ite) != IF_THEN_ELSE)
9683 continue;
9685 cc = XEXP (XEXP (ite, 0), 0);
9686 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc))
9687 || GET_MODE (cc) != CCRAWmode
9688 || GET_CODE (XEXP (XEXP (ite, 0), 1)) != CONST_INT)
9689 return;
9691 if (bb->succs->length () != 2)
9692 return;
9694 mask = INTVAL (XEXP (XEXP (ite, 0), 1));
9695 if (GET_CODE (XEXP (ite, 0)) == NE)
9696 mask ^= 0xf;
9698 if (mask == CC0)
9699 target = XEXP (ite, 1);
9700 else if (mask == (CC0 ^ 0xf))
9701 target = XEXP (ite, 2);
9702 else
9703 return;
9706 edge_iterator ei;
9707 edge e1, e2;
9709 ei = ei_start (bb->succs);
9710 e1 = ei_safe_edge (ei);
9711 ei_next (&ei);
9712 e2 = ei_safe_edge (ei);
9714 if (e2->flags & EDGE_FALLTHRU)
9716 e2 = e1;
9717 e1 = ei_safe_edge (ei);
9720 if (!(e1->flags & EDGE_FALLTHRU))
9721 return;
9723 tbegin_bb = (target == pc_rtx) ? e1->dest : e2->dest;
9725 if (tmp == BB_END (bb))
9726 break;
9730 if (XINT (SET_SRC (pat), 1) == UNSPECV_TEND)
9732 if (tend_bb != NULL)
9733 return;
9734 tend_bb = bb;
9739 /* Either we successfully remove the FPR clobbers here or we are not
9740 able to do anything for this TX. Both cases don't qualify for
9741 another look. */
9742 cfun->machine->tbegin_p = false;
9744 if (tbegin_bb == NULL || tend_bb == NULL)
9745 return;
9747 calculate_dominance_info (CDI_POST_DOMINATORS);
9748 result = dominated_by_p (CDI_POST_DOMINATORS, tbegin_bb, tend_bb);
9749 free_dominance_info (CDI_POST_DOMINATORS);
9751 if (!result)
9752 return;
9754 PATTERN (tbegin_insn) = gen_rtx_PARALLEL (VOIDmode,
9755 gen_rtvec (2,
9756 XVECEXP (PATTERN (tbegin_insn), 0, 0),
9757 XVECEXP (PATTERN (tbegin_insn), 0, 1)));
9758 INSN_CODE (tbegin_insn) = -1;
9759 df_insn_rescan (tbegin_insn);
9761 return;
9764 /* Return true if it is legal to put a value with MODE into REGNO. */
9766 bool
9767 s390_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
9769 if (!TARGET_VX && VECTOR_NOFP_REGNO_P (regno))
9770 return false;
9772 switch (REGNO_REG_CLASS (regno))
9774 case VEC_REGS:
9775 return ((GET_MODE_CLASS (mode) == MODE_INT
9776 && s390_class_max_nregs (VEC_REGS, mode) == 1)
9777 || mode == DFmode
9778 || s390_vector_mode_supported_p (mode));
9779 break;
9780 case FP_REGS:
9781 if (TARGET_VX
9782 && ((GET_MODE_CLASS (mode) == MODE_INT
9783 && s390_class_max_nregs (FP_REGS, mode) == 1)
9784 || mode == DFmode
9785 || s390_vector_mode_supported_p (mode)))
9786 return true;
9788 if (REGNO_PAIR_OK (regno, mode))
9790 if (mode == SImode || mode == DImode)
9791 return true;
9793 if (FLOAT_MODE_P (mode) && GET_MODE_CLASS (mode) != MODE_VECTOR_FLOAT)
9794 return true;
9796 break;
9797 case ADDR_REGS:
9798 if (FRAME_REGNO_P (regno) && mode == Pmode)
9799 return true;
9801 /* fallthrough */
9802 case GENERAL_REGS:
9803 if (REGNO_PAIR_OK (regno, mode))
9805 if (TARGET_ZARCH
9806 || (mode != TFmode && mode != TCmode && mode != TDmode))
9807 return true;
9809 break;
9810 case CC_REGS:
9811 if (GET_MODE_CLASS (mode) == MODE_CC)
9812 return true;
9813 break;
9814 case ACCESS_REGS:
9815 if (REGNO_PAIR_OK (regno, mode))
9817 if (mode == SImode || mode == Pmode)
9818 return true;
9820 break;
9821 default:
9822 return false;
9825 return false;
9828 /* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */
9830 bool
9831 s390_hard_regno_rename_ok (unsigned int old_reg, unsigned int new_reg)
9833 /* Once we've decided upon a register to use as base register, it must
9834 no longer be used for any other purpose. */
9835 if (cfun->machine->base_reg)
9836 if (REGNO (cfun->machine->base_reg) == old_reg
9837 || REGNO (cfun->machine->base_reg) == new_reg)
9838 return false;
9840 /* Prevent regrename from using call-saved regs which haven't
9841 actually been saved. This is necessary since regrename assumes
9842 the backend save/restore decisions are based on
9843 df_regs_ever_live. Since we have our own routine we have to tell
9844 regrename manually about it. */
9845 if (GENERAL_REGNO_P (new_reg)
9846 && !call_really_used_regs[new_reg]
9847 && cfun_gpr_save_slot (new_reg) == 0)
9848 return false;
9850 return true;
9853 /* Return nonzero if register REGNO can be used as a scratch register
9854 in peephole2. */
9856 static bool
9857 s390_hard_regno_scratch_ok (unsigned int regno)
9859 /* See s390_hard_regno_rename_ok. */
9860 if (GENERAL_REGNO_P (regno)
9861 && !call_really_used_regs[regno]
9862 && cfun_gpr_save_slot (regno) == 0)
9863 return false;
9865 return true;
9868 /* Maximum number of registers to represent a value of mode MODE
9869 in a register of class RCLASS. */
9872 s390_class_max_nregs (enum reg_class rclass, machine_mode mode)
9874 int reg_size;
9875 bool reg_pair_required_p = false;
9877 switch (rclass)
9879 case FP_REGS:
9880 case VEC_REGS:
9881 reg_size = TARGET_VX ? 16 : 8;
9883 /* TF and TD modes would fit into a VR but we put them into a
9884 register pair since we do not have 128bit FP instructions on
9885 full VRs. */
9886 if (TARGET_VX
9887 && SCALAR_FLOAT_MODE_P (mode)
9888 && GET_MODE_SIZE (mode) >= 16)
9889 reg_pair_required_p = true;
9891 /* Even if complex types would fit into a single FPR/VR we force
9892 them into a register pair to deal with the parts more easily.
9893 (FIXME: What about complex ints?) */
9894 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
9895 reg_pair_required_p = true;
9896 break;
9897 case ACCESS_REGS:
9898 reg_size = 4;
9899 break;
9900 default:
9901 reg_size = UNITS_PER_WORD;
9902 break;
9905 if (reg_pair_required_p)
9906 return 2 * ((GET_MODE_SIZE (mode) / 2 + reg_size - 1) / reg_size);
9908 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
9911 /* Return TRUE if changing mode from FROM to TO should not be allowed
9912 for register class CLASS. */
9915 s390_cannot_change_mode_class (machine_mode from_mode,
9916 machine_mode to_mode,
9917 enum reg_class rclass)
9919 machine_mode small_mode;
9920 machine_mode big_mode;
9922 if (GET_MODE_SIZE (from_mode) == GET_MODE_SIZE (to_mode))
9923 return 0;
9925 if (GET_MODE_SIZE (from_mode) < GET_MODE_SIZE (to_mode))
9927 small_mode = from_mode;
9928 big_mode = to_mode;
9930 else
9932 small_mode = to_mode;
9933 big_mode = from_mode;
9936 /* Values residing in VRs are little-endian style. All modes are
9937 placed left-aligned in an VR. This means that we cannot allow
9938 switching between modes with differing sizes. Also if the vector
9939 facility is available we still place TFmode values in VR register
9940 pairs, since the only instructions we have operating on TFmodes
9941 only deal with register pairs. Therefore we have to allow DFmode
9942 subregs of TFmodes to enable the TFmode splitters. */
9943 if (reg_classes_intersect_p (VEC_REGS, rclass)
9944 && (GET_MODE_SIZE (small_mode) < 8
9945 || s390_class_max_nregs (VEC_REGS, big_mode) == 1))
9946 return 1;
9948 /* Likewise for access registers, since they have only half the
9949 word size on 64-bit. */
9950 if (reg_classes_intersect_p (ACCESS_REGS, rclass))
9951 return 1;
9953 return 0;
9956 /* Return true if we use LRA instead of reload pass. */
9957 static bool
9958 s390_lra_p (void)
9960 return s390_lra_flag;
9963 /* Return true if register FROM can be eliminated via register TO. */
9965 static bool
9966 s390_can_eliminate (const int from, const int to)
9968 /* On zSeries machines, we have not marked the base register as fixed.
9969 Instead, we have an elimination rule BASE_REGNUM -> BASE_REGNUM.
9970 If a function requires the base register, we say here that this
9971 elimination cannot be performed. This will cause reload to free
9972 up the base register (as if it were fixed). On the other hand,
9973 if the current function does *not* require the base register, we
9974 say here the elimination succeeds, which in turn allows reload
9975 to allocate the base register for any other purpose. */
9976 if (from == BASE_REGNUM && to == BASE_REGNUM)
9978 if (TARGET_CPU_ZARCH)
9980 s390_init_frame_layout ();
9981 return cfun->machine->base_reg == NULL_RTX;
9984 return false;
9987 /* Everything else must point into the stack frame. */
9988 gcc_assert (to == STACK_POINTER_REGNUM
9989 || to == HARD_FRAME_POINTER_REGNUM);
9991 gcc_assert (from == FRAME_POINTER_REGNUM
9992 || from == ARG_POINTER_REGNUM
9993 || from == RETURN_ADDRESS_POINTER_REGNUM);
9995 /* Make sure we actually saved the return address. */
9996 if (from == RETURN_ADDRESS_POINTER_REGNUM)
9997 if (!crtl->calls_eh_return
9998 && !cfun->stdarg
9999 && !cfun_frame_layout.save_return_addr_p)
10000 return false;
10002 return true;
10005 /* Return offset between register FROM and TO initially after prolog. */
10007 HOST_WIDE_INT
10008 s390_initial_elimination_offset (int from, int to)
10010 HOST_WIDE_INT offset;
10012 /* ??? Why are we called for non-eliminable pairs? */
10013 if (!s390_can_eliminate (from, to))
10014 return 0;
10016 switch (from)
10018 case FRAME_POINTER_REGNUM:
10019 offset = (get_frame_size()
10020 + STACK_POINTER_OFFSET
10021 + crtl->outgoing_args_size);
10022 break;
10024 case ARG_POINTER_REGNUM:
10025 s390_init_frame_layout ();
10026 offset = cfun_frame_layout.frame_size + STACK_POINTER_OFFSET;
10027 break;
10029 case RETURN_ADDRESS_POINTER_REGNUM:
10030 s390_init_frame_layout ();
10032 if (cfun_frame_layout.first_save_gpr_slot == -1)
10034 /* If it turns out that for stdarg nothing went into the reg
10035 save area we also do not need the return address
10036 pointer. */
10037 if (cfun->stdarg && !cfun_save_arg_fprs_p)
10038 return 0;
10040 gcc_unreachable ();
10043 /* In order to make the following work it is not necessary for
10044 r14 to have a save slot. It is sufficient if one other GPR
10045 got one. Since the GPRs are always stored without gaps we
10046 are able to calculate where the r14 save slot would
10047 reside. */
10048 offset = (cfun_frame_layout.frame_size + cfun_frame_layout.gprs_offset +
10049 (RETURN_REGNUM - cfun_frame_layout.first_save_gpr_slot) *
10050 UNITS_PER_LONG);
10051 break;
10053 case BASE_REGNUM:
10054 offset = 0;
10055 break;
10057 default:
10058 gcc_unreachable ();
10061 return offset;
10064 /* Emit insn to save fpr REGNUM at offset OFFSET relative
10065 to register BASE. Return generated insn. */
10067 static rtx
10068 save_fpr (rtx base, int offset, int regnum)
10070 rtx addr;
10071 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
10073 if (regnum >= 16 && regnum <= (16 + FP_ARG_NUM_REG))
10074 set_mem_alias_set (addr, get_varargs_alias_set ());
10075 else
10076 set_mem_alias_set (addr, get_frame_alias_set ());
10078 return emit_move_insn (addr, gen_rtx_REG (DFmode, regnum));
10081 /* Emit insn to restore fpr REGNUM from offset OFFSET relative
10082 to register BASE. Return generated insn. */
10084 static rtx
10085 restore_fpr (rtx base, int offset, int regnum)
10087 rtx addr;
10088 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
10089 set_mem_alias_set (addr, get_frame_alias_set ());
10091 return emit_move_insn (gen_rtx_REG (DFmode, regnum), addr);
10094 /* Return true if REGNO is a global register, but not one
10095 of the special ones that need to be saved/restored in anyway. */
10097 static inline bool
10098 global_not_special_regno_p (int regno)
10100 return (global_regs[regno]
10101 /* These registers are special and need to be
10102 restored in any case. */
10103 && !(regno == STACK_POINTER_REGNUM
10104 || regno == RETURN_REGNUM
10105 || regno == BASE_REGNUM
10106 || (flag_pic && regno == (int)PIC_OFFSET_TABLE_REGNUM)));
10109 /* Generate insn to save registers FIRST to LAST into
10110 the register save area located at offset OFFSET
10111 relative to register BASE. */
10113 static rtx
10114 save_gprs (rtx base, int offset, int first, int last)
10116 rtx addr, insn, note;
10117 int i;
10119 addr = plus_constant (Pmode, base, offset);
10120 addr = gen_rtx_MEM (Pmode, addr);
10122 set_mem_alias_set (addr, get_frame_alias_set ());
10124 /* Special-case single register. */
10125 if (first == last)
10127 if (TARGET_64BIT)
10128 insn = gen_movdi (addr, gen_rtx_REG (Pmode, first));
10129 else
10130 insn = gen_movsi (addr, gen_rtx_REG (Pmode, first));
10132 if (!global_not_special_regno_p (first))
10133 RTX_FRAME_RELATED_P (insn) = 1;
10134 return insn;
10138 insn = gen_store_multiple (addr,
10139 gen_rtx_REG (Pmode, first),
10140 GEN_INT (last - first + 1));
10142 if (first <= 6 && cfun->stdarg)
10143 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
10145 rtx mem = XEXP (XVECEXP (PATTERN (insn), 0, i), 0);
10147 if (first + i <= 6)
10148 set_mem_alias_set (mem, get_varargs_alias_set ());
10151 /* We need to set the FRAME_RELATED flag on all SETs
10152 inside the store-multiple pattern.
10154 However, we must not emit DWARF records for registers 2..5
10155 if they are stored for use by variable arguments ...
10157 ??? Unfortunately, it is not enough to simply not the
10158 FRAME_RELATED flags for those SETs, because the first SET
10159 of the PARALLEL is always treated as if it had the flag
10160 set, even if it does not. Therefore we emit a new pattern
10161 without those registers as REG_FRAME_RELATED_EXPR note. */
10163 if (first >= 6 && !global_not_special_regno_p (first))
10165 rtx pat = PATTERN (insn);
10167 for (i = 0; i < XVECLEN (pat, 0); i++)
10168 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
10169 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (pat,
10170 0, i)))))
10171 RTX_FRAME_RELATED_P (XVECEXP (pat, 0, i)) = 1;
10173 RTX_FRAME_RELATED_P (insn) = 1;
10175 else if (last >= 6)
10177 int start;
10179 for (start = first >= 6 ? first : 6; start <= last; start++)
10180 if (!global_not_special_regno_p (start))
10181 break;
10183 if (start > last)
10184 return insn;
10186 addr = plus_constant (Pmode, base,
10187 offset + (start - first) * UNITS_PER_LONG);
10189 if (start == last)
10191 if (TARGET_64BIT)
10192 note = gen_movdi (gen_rtx_MEM (Pmode, addr),
10193 gen_rtx_REG (Pmode, start));
10194 else
10195 note = gen_movsi (gen_rtx_MEM (Pmode, addr),
10196 gen_rtx_REG (Pmode, start));
10197 note = PATTERN (note);
10199 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
10200 RTX_FRAME_RELATED_P (insn) = 1;
10202 return insn;
10205 note = gen_store_multiple (gen_rtx_MEM (Pmode, addr),
10206 gen_rtx_REG (Pmode, start),
10207 GEN_INT (last - start + 1));
10208 note = PATTERN (note);
10210 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
10212 for (i = 0; i < XVECLEN (note, 0); i++)
10213 if (GET_CODE (XVECEXP (note, 0, i)) == SET
10214 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (note,
10215 0, i)))))
10216 RTX_FRAME_RELATED_P (XVECEXP (note, 0, i)) = 1;
10218 RTX_FRAME_RELATED_P (insn) = 1;
10221 return insn;
10224 /* Generate insn to restore registers FIRST to LAST from
10225 the register save area located at offset OFFSET
10226 relative to register BASE. */
10228 static rtx
10229 restore_gprs (rtx base, int offset, int first, int last)
10231 rtx addr, insn;
10233 addr = plus_constant (Pmode, base, offset);
10234 addr = gen_rtx_MEM (Pmode, addr);
10235 set_mem_alias_set (addr, get_frame_alias_set ());
10237 /* Special-case single register. */
10238 if (first == last)
10240 if (TARGET_64BIT)
10241 insn = gen_movdi (gen_rtx_REG (Pmode, first), addr);
10242 else
10243 insn = gen_movsi (gen_rtx_REG (Pmode, first), addr);
10245 RTX_FRAME_RELATED_P (insn) = 1;
10246 return insn;
10249 insn = gen_load_multiple (gen_rtx_REG (Pmode, first),
10250 addr,
10251 GEN_INT (last - first + 1));
10252 RTX_FRAME_RELATED_P (insn) = 1;
10253 return insn;
10256 /* Return insn sequence to load the GOT register. */
10258 static GTY(()) rtx got_symbol;
10259 rtx_insn *
10260 s390_load_got (void)
10262 rtx_insn *insns;
10264 /* We cannot use pic_offset_table_rtx here since we use this
10265 function also for non-pic if __tls_get_offset is called and in
10266 that case PIC_OFFSET_TABLE_REGNUM as well as pic_offset_table_rtx
10267 aren't usable. */
10268 rtx got_rtx = gen_rtx_REG (Pmode, 12);
10270 if (!got_symbol)
10272 got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
10273 SYMBOL_REF_FLAGS (got_symbol) = SYMBOL_FLAG_LOCAL;
10276 start_sequence ();
10278 if (TARGET_CPU_ZARCH)
10280 emit_move_insn (got_rtx, got_symbol);
10282 else
10284 rtx offset;
10286 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, got_symbol),
10287 UNSPEC_LTREL_OFFSET);
10288 offset = gen_rtx_CONST (Pmode, offset);
10289 offset = force_const_mem (Pmode, offset);
10291 emit_move_insn (got_rtx, offset);
10293 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (offset, 0)),
10294 UNSPEC_LTREL_BASE);
10295 offset = gen_rtx_PLUS (Pmode, got_rtx, offset);
10297 emit_move_insn (got_rtx, offset);
10300 insns = get_insns ();
10301 end_sequence ();
10302 return insns;
10305 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
10306 and the change to the stack pointer. */
10308 static void
10309 s390_emit_stack_tie (void)
10311 rtx mem = gen_frame_mem (BLKmode,
10312 gen_rtx_REG (Pmode, STACK_POINTER_REGNUM));
10314 emit_insn (gen_stack_tie (mem));
10317 /* Copy GPRS into FPR save slots. */
10319 static void
10320 s390_save_gprs_to_fprs (void)
10322 int i;
10324 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
10325 return;
10327 for (i = 6; i < 16; i++)
10329 if (FP_REGNO_P (cfun_gpr_save_slot (i)))
10331 rtx_insn *insn =
10332 emit_move_insn (gen_rtx_REG (DImode, cfun_gpr_save_slot (i)),
10333 gen_rtx_REG (DImode, i));
10334 RTX_FRAME_RELATED_P (insn) = 1;
10335 /* This prevents dwarf2cfi from interpreting the set. Doing
10336 so it might emit def_cfa_register infos setting an FPR as
10337 new CFA. */
10338 add_reg_note (insn, REG_CFA_REGISTER, PATTERN (insn));
10343 /* Restore GPRs from FPR save slots. */
10345 static void
10346 s390_restore_gprs_from_fprs (void)
10348 int i;
10350 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
10351 return;
10353 for (i = 6; i < 16; i++)
10355 if (FP_REGNO_P (cfun_gpr_save_slot (i)))
10357 rtx_insn *insn =
10358 emit_move_insn (gen_rtx_REG (DImode, i),
10359 gen_rtx_REG (DImode, cfun_gpr_save_slot (i)));
10360 df_set_regs_ever_live (i, true);
10361 add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DImode, i));
10362 if (i == STACK_POINTER_REGNUM)
10363 add_reg_note (insn, REG_CFA_DEF_CFA,
10364 plus_constant (Pmode, stack_pointer_rtx,
10365 STACK_POINTER_OFFSET));
10366 RTX_FRAME_RELATED_P (insn) = 1;
10372 /* A pass run immediately before shrink-wrapping and prologue and epilogue
10373 generation. */
10375 namespace {
10377 const pass_data pass_data_s390_early_mach =
10379 RTL_PASS, /* type */
10380 "early_mach", /* name */
10381 OPTGROUP_NONE, /* optinfo_flags */
10382 TV_MACH_DEP, /* tv_id */
10383 0, /* properties_required */
10384 0, /* properties_provided */
10385 0, /* properties_destroyed */
10386 0, /* todo_flags_start */
10387 ( TODO_df_verify | TODO_df_finish ), /* todo_flags_finish */
10390 class pass_s390_early_mach : public rtl_opt_pass
10392 public:
10393 pass_s390_early_mach (gcc::context *ctxt)
10394 : rtl_opt_pass (pass_data_s390_early_mach, ctxt)
10397 /* opt_pass methods: */
10398 virtual unsigned int execute (function *);
10400 }; // class pass_s390_early_mach
10402 unsigned int
10403 pass_s390_early_mach::execute (function *fun)
10405 rtx_insn *insn;
10407 /* Try to get rid of the FPR clobbers. */
10408 s390_optimize_nonescaping_tx ();
10410 /* Re-compute register info. */
10411 s390_register_info ();
10413 /* If we're using a base register, ensure that it is always valid for
10414 the first non-prologue instruction. */
10415 if (fun->machine->base_reg)
10416 emit_insn_at_entry (gen_main_pool (fun->machine->base_reg));
10418 /* Annotate all constant pool references to let the scheduler know
10419 they implicitly use the base register. */
10420 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10421 if (INSN_P (insn))
10423 annotate_constant_pool_refs (&PATTERN (insn));
10424 df_insn_rescan (insn);
10426 return 0;
10429 } // anon namespace
10431 /* Expand the prologue into a bunch of separate insns. */
10433 void
10434 s390_emit_prologue (void)
10436 rtx insn, addr;
10437 rtx temp_reg;
10438 int i;
10439 int offset;
10440 int next_fpr = 0;
10442 /* Choose best register to use for temp use within prologue.
10443 See below for why TPF must use the register 1. */
10445 if (!has_hard_reg_initial_val (Pmode, RETURN_REGNUM)
10446 && !crtl->is_leaf
10447 && !TARGET_TPF_PROFILING)
10448 temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
10449 else
10450 temp_reg = gen_rtx_REG (Pmode, 1);
10452 s390_save_gprs_to_fprs ();
10454 /* Save call saved gprs. */
10455 if (cfun_frame_layout.first_save_gpr != -1)
10457 insn = save_gprs (stack_pointer_rtx,
10458 cfun_frame_layout.gprs_offset +
10459 UNITS_PER_LONG * (cfun_frame_layout.first_save_gpr
10460 - cfun_frame_layout.first_save_gpr_slot),
10461 cfun_frame_layout.first_save_gpr,
10462 cfun_frame_layout.last_save_gpr);
10463 emit_insn (insn);
10466 /* Dummy insn to mark literal pool slot. */
10468 if (cfun->machine->base_reg)
10469 emit_insn (gen_main_pool (cfun->machine->base_reg));
10471 offset = cfun_frame_layout.f0_offset;
10473 /* Save f0 and f2. */
10474 for (i = FPR0_REGNUM; i <= FPR0_REGNUM + 1; i++)
10476 if (cfun_fpr_save_p (i))
10478 save_fpr (stack_pointer_rtx, offset, i);
10479 offset += 8;
10481 else if (!TARGET_PACKED_STACK || cfun->stdarg)
10482 offset += 8;
10485 /* Save f4 and f6. */
10486 offset = cfun_frame_layout.f4_offset;
10487 for (i = FPR4_REGNUM; i <= FPR4_REGNUM + 1; i++)
10489 if (cfun_fpr_save_p (i))
10491 insn = save_fpr (stack_pointer_rtx, offset, i);
10492 offset += 8;
10494 /* If f4 and f6 are call clobbered they are saved due to
10495 stdargs and therefore are not frame related. */
10496 if (!call_really_used_regs[i])
10497 RTX_FRAME_RELATED_P (insn) = 1;
10499 else if (!TARGET_PACKED_STACK || call_really_used_regs[i])
10500 offset += 8;
10503 if (TARGET_PACKED_STACK
10504 && cfun_save_high_fprs_p
10505 && cfun_frame_layout.f8_offset + cfun_frame_layout.high_fprs * 8 > 0)
10507 offset = (cfun_frame_layout.f8_offset
10508 + (cfun_frame_layout.high_fprs - 1) * 8);
10510 for (i = FPR15_REGNUM; i >= FPR8_REGNUM && offset >= 0; i--)
10511 if (cfun_fpr_save_p (i))
10513 insn = save_fpr (stack_pointer_rtx, offset, i);
10515 RTX_FRAME_RELATED_P (insn) = 1;
10516 offset -= 8;
10518 if (offset >= cfun_frame_layout.f8_offset)
10519 next_fpr = i;
10522 if (!TARGET_PACKED_STACK)
10523 next_fpr = cfun_save_high_fprs_p ? FPR15_REGNUM : 0;
10525 if (flag_stack_usage_info)
10526 current_function_static_stack_size = cfun_frame_layout.frame_size;
10528 /* Decrement stack pointer. */
10530 if (cfun_frame_layout.frame_size > 0)
10532 rtx frame_off = GEN_INT (-cfun_frame_layout.frame_size);
10533 rtx real_frame_off;
10535 if (s390_stack_size)
10537 HOST_WIDE_INT stack_guard;
10539 if (s390_stack_guard)
10540 stack_guard = s390_stack_guard;
10541 else
10543 /* If no value for stack guard is provided the smallest power of 2
10544 larger than the current frame size is chosen. */
10545 stack_guard = 1;
10546 while (stack_guard < cfun_frame_layout.frame_size)
10547 stack_guard <<= 1;
10550 if (cfun_frame_layout.frame_size >= s390_stack_size)
10552 warning (0, "frame size of function %qs is %wd"
10553 " bytes exceeding user provided stack limit of "
10554 "%d bytes. "
10555 "An unconditional trap is added.",
10556 current_function_name(), cfun_frame_layout.frame_size,
10557 s390_stack_size);
10558 emit_insn (gen_trap ());
10559 emit_barrier ();
10561 else
10563 /* stack_guard has to be smaller than s390_stack_size.
10564 Otherwise we would emit an AND with zero which would
10565 not match the test under mask pattern. */
10566 if (stack_guard >= s390_stack_size)
10568 warning (0, "frame size of function %qs is %wd"
10569 " bytes which is more than half the stack size. "
10570 "The dynamic check would not be reliable. "
10571 "No check emitted for this function.",
10572 current_function_name(),
10573 cfun_frame_layout.frame_size);
10575 else
10577 HOST_WIDE_INT stack_check_mask = ((s390_stack_size - 1)
10578 & ~(stack_guard - 1));
10580 rtx t = gen_rtx_AND (Pmode, stack_pointer_rtx,
10581 GEN_INT (stack_check_mask));
10582 if (TARGET_64BIT)
10583 emit_insn (gen_ctrapdi4 (gen_rtx_EQ (VOIDmode,
10584 t, const0_rtx),
10585 t, const0_rtx, const0_rtx));
10586 else
10587 emit_insn (gen_ctrapsi4 (gen_rtx_EQ (VOIDmode,
10588 t, const0_rtx),
10589 t, const0_rtx, const0_rtx));
10594 if (s390_warn_framesize > 0
10595 && cfun_frame_layout.frame_size >= s390_warn_framesize)
10596 warning (0, "frame size of %qs is %wd bytes",
10597 current_function_name (), cfun_frame_layout.frame_size);
10599 if (s390_warn_dynamicstack_p && cfun->calls_alloca)
10600 warning (0, "%qs uses dynamic stack allocation", current_function_name ());
10602 /* Save incoming stack pointer into temp reg. */
10603 if (TARGET_BACKCHAIN || next_fpr)
10604 insn = emit_insn (gen_move_insn (temp_reg, stack_pointer_rtx));
10606 /* Subtract frame size from stack pointer. */
10608 if (DISP_IN_RANGE (INTVAL (frame_off)))
10610 insn = gen_rtx_SET (stack_pointer_rtx,
10611 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
10612 frame_off));
10613 insn = emit_insn (insn);
10615 else
10617 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
10618 frame_off = force_const_mem (Pmode, frame_off);
10620 insn = emit_insn (gen_add2_insn (stack_pointer_rtx, frame_off));
10621 annotate_constant_pool_refs (&PATTERN (insn));
10624 RTX_FRAME_RELATED_P (insn) = 1;
10625 real_frame_off = GEN_INT (-cfun_frame_layout.frame_size);
10626 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
10627 gen_rtx_SET (stack_pointer_rtx,
10628 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
10629 real_frame_off)));
10631 /* Set backchain. */
10633 if (TARGET_BACKCHAIN)
10635 if (cfun_frame_layout.backchain_offset)
10636 addr = gen_rtx_MEM (Pmode,
10637 plus_constant (Pmode, stack_pointer_rtx,
10638 cfun_frame_layout.backchain_offset));
10639 else
10640 addr = gen_rtx_MEM (Pmode, stack_pointer_rtx);
10641 set_mem_alias_set (addr, get_frame_alias_set ());
10642 insn = emit_insn (gen_move_insn (addr, temp_reg));
10645 /* If we support non-call exceptions (e.g. for Java),
10646 we need to make sure the backchain pointer is set up
10647 before any possibly trapping memory access. */
10648 if (TARGET_BACKCHAIN && cfun->can_throw_non_call_exceptions)
10650 addr = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode));
10651 emit_clobber (addr);
10655 /* Save fprs 8 - 15 (64 bit ABI). */
10657 if (cfun_save_high_fprs_p && next_fpr)
10659 /* If the stack might be accessed through a different register
10660 we have to make sure that the stack pointer decrement is not
10661 moved below the use of the stack slots. */
10662 s390_emit_stack_tie ();
10664 insn = emit_insn (gen_add2_insn (temp_reg,
10665 GEN_INT (cfun_frame_layout.f8_offset)));
10667 offset = 0;
10669 for (i = FPR8_REGNUM; i <= next_fpr; i++)
10670 if (cfun_fpr_save_p (i))
10672 rtx addr = plus_constant (Pmode, stack_pointer_rtx,
10673 cfun_frame_layout.frame_size
10674 + cfun_frame_layout.f8_offset
10675 + offset);
10677 insn = save_fpr (temp_reg, offset, i);
10678 offset += 8;
10679 RTX_FRAME_RELATED_P (insn) = 1;
10680 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
10681 gen_rtx_SET (gen_rtx_MEM (DFmode, addr),
10682 gen_rtx_REG (DFmode, i)));
10686 /* Set frame pointer, if needed. */
10688 if (frame_pointer_needed)
10690 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
10691 RTX_FRAME_RELATED_P (insn) = 1;
10694 /* Set up got pointer, if needed. */
10696 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
10698 rtx_insn *insns = s390_load_got ();
10700 for (rtx_insn *insn = insns; insn; insn = NEXT_INSN (insn))
10701 annotate_constant_pool_refs (&PATTERN (insn));
10703 emit_insn (insns);
10706 if (TARGET_TPF_PROFILING)
10708 /* Generate a BAS instruction to serve as a function
10709 entry intercept to facilitate the use of tracing
10710 algorithms located at the branch target. */
10711 emit_insn (gen_prologue_tpf ());
10713 /* Emit a blockage here so that all code
10714 lies between the profiling mechanisms. */
10715 emit_insn (gen_blockage ());
10719 /* Expand the epilogue into a bunch of separate insns. */
10721 void
10722 s390_emit_epilogue (bool sibcall)
10724 rtx frame_pointer, return_reg, cfa_restores = NULL_RTX;
10725 int area_bottom, area_top, offset = 0;
10726 int next_offset;
10727 rtvec p;
10728 int i;
10730 if (TARGET_TPF_PROFILING)
10733 /* Generate a BAS instruction to serve as a function
10734 entry intercept to facilitate the use of tracing
10735 algorithms located at the branch target. */
10737 /* Emit a blockage here so that all code
10738 lies between the profiling mechanisms. */
10739 emit_insn (gen_blockage ());
10741 emit_insn (gen_epilogue_tpf ());
10744 /* Check whether to use frame or stack pointer for restore. */
10746 frame_pointer = (frame_pointer_needed
10747 ? hard_frame_pointer_rtx : stack_pointer_rtx);
10749 s390_frame_area (&area_bottom, &area_top);
10751 /* Check whether we can access the register save area.
10752 If not, increment the frame pointer as required. */
10754 if (area_top <= area_bottom)
10756 /* Nothing to restore. */
10758 else if (DISP_IN_RANGE (cfun_frame_layout.frame_size + area_bottom)
10759 && DISP_IN_RANGE (cfun_frame_layout.frame_size + area_top - 1))
10761 /* Area is in range. */
10762 offset = cfun_frame_layout.frame_size;
10764 else
10766 rtx insn, frame_off, cfa;
10768 offset = area_bottom < 0 ? -area_bottom : 0;
10769 frame_off = GEN_INT (cfun_frame_layout.frame_size - offset);
10771 cfa = gen_rtx_SET (frame_pointer,
10772 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
10773 if (DISP_IN_RANGE (INTVAL (frame_off)))
10775 insn = gen_rtx_SET (frame_pointer,
10776 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
10777 insn = emit_insn (insn);
10779 else
10781 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
10782 frame_off = force_const_mem (Pmode, frame_off);
10784 insn = emit_insn (gen_add2_insn (frame_pointer, frame_off));
10785 annotate_constant_pool_refs (&PATTERN (insn));
10787 add_reg_note (insn, REG_CFA_ADJUST_CFA, cfa);
10788 RTX_FRAME_RELATED_P (insn) = 1;
10791 /* Restore call saved fprs. */
10793 if (TARGET_64BIT)
10795 if (cfun_save_high_fprs_p)
10797 next_offset = cfun_frame_layout.f8_offset;
10798 for (i = FPR8_REGNUM; i <= FPR15_REGNUM; i++)
10800 if (cfun_fpr_save_p (i))
10802 restore_fpr (frame_pointer,
10803 offset + next_offset, i);
10804 cfa_restores
10805 = alloc_reg_note (REG_CFA_RESTORE,
10806 gen_rtx_REG (DFmode, i), cfa_restores);
10807 next_offset += 8;
10813 else
10815 next_offset = cfun_frame_layout.f4_offset;
10816 /* f4, f6 */
10817 for (i = FPR4_REGNUM; i <= FPR4_REGNUM + 1; i++)
10819 if (cfun_fpr_save_p (i))
10821 restore_fpr (frame_pointer,
10822 offset + next_offset, i);
10823 cfa_restores
10824 = alloc_reg_note (REG_CFA_RESTORE,
10825 gen_rtx_REG (DFmode, i), cfa_restores);
10826 next_offset += 8;
10828 else if (!TARGET_PACKED_STACK)
10829 next_offset += 8;
10834 /* Return register. */
10836 return_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
10838 /* Restore call saved gprs. */
10840 if (cfun_frame_layout.first_restore_gpr != -1)
10842 rtx insn, addr;
10843 int i;
10845 /* Check for global register and save them
10846 to stack location from where they get restored. */
10848 for (i = cfun_frame_layout.first_restore_gpr;
10849 i <= cfun_frame_layout.last_restore_gpr;
10850 i++)
10852 if (global_not_special_regno_p (i))
10854 addr = plus_constant (Pmode, frame_pointer,
10855 offset + cfun_frame_layout.gprs_offset
10856 + (i - cfun_frame_layout.first_save_gpr_slot)
10857 * UNITS_PER_LONG);
10858 addr = gen_rtx_MEM (Pmode, addr);
10859 set_mem_alias_set (addr, get_frame_alias_set ());
10860 emit_move_insn (addr, gen_rtx_REG (Pmode, i));
10862 else
10863 cfa_restores
10864 = alloc_reg_note (REG_CFA_RESTORE,
10865 gen_rtx_REG (Pmode, i), cfa_restores);
10868 if (! sibcall)
10870 /* Fetch return address from stack before load multiple,
10871 this will do good for scheduling.
10873 Only do this if we already decided that r14 needs to be
10874 saved to a stack slot. (And not just because r14 happens to
10875 be in between two GPRs which need saving.) Otherwise it
10876 would be difficult to take that decision back in
10877 s390_optimize_prologue. */
10878 if (cfun_gpr_save_slot (RETURN_REGNUM) == -1)
10880 int return_regnum = find_unused_clobbered_reg();
10881 if (!return_regnum)
10882 return_regnum = 4;
10883 return_reg = gen_rtx_REG (Pmode, return_regnum);
10885 addr = plus_constant (Pmode, frame_pointer,
10886 offset + cfun_frame_layout.gprs_offset
10887 + (RETURN_REGNUM
10888 - cfun_frame_layout.first_save_gpr_slot)
10889 * UNITS_PER_LONG);
10890 addr = gen_rtx_MEM (Pmode, addr);
10891 set_mem_alias_set (addr, get_frame_alias_set ());
10892 emit_move_insn (return_reg, addr);
10894 /* Once we did that optimization we have to make sure
10895 s390_optimize_prologue does not try to remove the
10896 store of r14 since we will not be able to find the
10897 load issued here. */
10898 cfun_frame_layout.save_return_addr_p = true;
10902 insn = restore_gprs (frame_pointer,
10903 offset + cfun_frame_layout.gprs_offset
10904 + (cfun_frame_layout.first_restore_gpr
10905 - cfun_frame_layout.first_save_gpr_slot)
10906 * UNITS_PER_LONG,
10907 cfun_frame_layout.first_restore_gpr,
10908 cfun_frame_layout.last_restore_gpr);
10909 insn = emit_insn (insn);
10910 REG_NOTES (insn) = cfa_restores;
10911 add_reg_note (insn, REG_CFA_DEF_CFA,
10912 plus_constant (Pmode, stack_pointer_rtx,
10913 STACK_POINTER_OFFSET));
10914 RTX_FRAME_RELATED_P (insn) = 1;
10917 s390_restore_gprs_from_fprs ();
10919 if (! sibcall)
10922 /* Return to caller. */
10924 p = rtvec_alloc (2);
10926 RTVEC_ELT (p, 0) = ret_rtx;
10927 RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode, return_reg);
10928 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
10932 /* Implement TARGET_SET_UP_BY_PROLOGUE. */
10934 static void
10935 s300_set_up_by_prologue (hard_reg_set_container *regs)
10937 if (cfun->machine->base_reg
10938 && !call_really_used_regs[REGNO (cfun->machine->base_reg)])
10939 SET_HARD_REG_BIT (regs->set, REGNO (cfun->machine->base_reg));
10942 /* Return true if the function can use simple_return to return outside
10943 of a shrink-wrapped region. At present shrink-wrapping is supported
10944 in all cases. */
10946 bool
10947 s390_can_use_simple_return_insn (void)
10949 return true;
10952 /* Return true if the epilogue is guaranteed to contain only a return
10953 instruction and if a direct return can therefore be used instead.
10954 One of the main advantages of using direct return instructions
10955 is that we can then use conditional returns. */
10957 bool
10958 s390_can_use_return_insn (void)
10960 int i;
10962 if (!reload_completed)
10963 return false;
10965 if (crtl->profile)
10966 return false;
10968 if (TARGET_TPF_PROFILING)
10969 return false;
10971 for (i = 0; i < 16; i++)
10972 if (cfun_gpr_save_slot (i))
10973 return false;
10975 /* For 31 bit this is not covered by the frame_size check below
10976 since f4, f6 are saved in the register save area without needing
10977 additional stack space. */
10978 if (!TARGET_64BIT
10979 && (cfun_fpr_save_p (FPR4_REGNUM) || cfun_fpr_save_p (FPR6_REGNUM)))
10980 return false;
10982 if (cfun->machine->base_reg
10983 && !call_really_used_regs[REGNO (cfun->machine->base_reg)])
10984 return false;
10986 return cfun_frame_layout.frame_size == 0;
10989 /* The VX ABI differs for vararg functions. Therefore we need the
10990 prototype of the callee to be available when passing vector type
10991 values. */
10992 static const char *
10993 s390_invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
10995 return ((TARGET_VX_ABI
10996 && typelist == 0
10997 && VECTOR_TYPE_P (TREE_TYPE (val))
10998 && (funcdecl == NULL_TREE
10999 || (TREE_CODE (funcdecl) == FUNCTION_DECL
11000 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
11001 ? N_("Vector argument passed to unprototyped function")
11002 : NULL);
11006 /* Return the size in bytes of a function argument of
11007 type TYPE and/or mode MODE. At least one of TYPE or
11008 MODE must be specified. */
11010 static int
11011 s390_function_arg_size (machine_mode mode, const_tree type)
11013 if (type)
11014 return int_size_in_bytes (type);
11016 /* No type info available for some library calls ... */
11017 if (mode != BLKmode)
11018 return GET_MODE_SIZE (mode);
11020 /* If we have neither type nor mode, abort */
11021 gcc_unreachable ();
11024 /* Return true if a function argument of type TYPE and mode MODE
11025 is to be passed in a vector register, if available. */
11027 bool
11028 s390_function_arg_vector (machine_mode mode, const_tree type)
11030 if (!TARGET_VX_ABI)
11031 return false;
11033 if (s390_function_arg_size (mode, type) > 16)
11034 return false;
11036 /* No type info available for some library calls ... */
11037 if (!type)
11038 return VECTOR_MODE_P (mode);
11040 /* The ABI says that record types with a single member are treated
11041 just like that member would be. */
11042 while (TREE_CODE (type) == RECORD_TYPE)
11044 tree field, single = NULL_TREE;
11046 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
11048 if (TREE_CODE (field) != FIELD_DECL)
11049 continue;
11051 if (single == NULL_TREE)
11052 single = TREE_TYPE (field);
11053 else
11054 return false;
11057 if (single == NULL_TREE)
11058 return false;
11059 else
11061 /* If the field declaration adds extra byte due to
11062 e.g. padding this is not accepted as vector type. */
11063 if (int_size_in_bytes (single) <= 0
11064 || int_size_in_bytes (single) != int_size_in_bytes (type))
11065 return false;
11066 type = single;
11070 return VECTOR_TYPE_P (type);
11073 /* Return true if a function argument of type TYPE and mode MODE
11074 is to be passed in a floating-point register, if available. */
11076 static bool
11077 s390_function_arg_float (machine_mode mode, const_tree type)
11079 if (s390_function_arg_size (mode, type) > 8)
11080 return false;
11082 /* Soft-float changes the ABI: no floating-point registers are used. */
11083 if (TARGET_SOFT_FLOAT)
11084 return false;
11086 /* No type info available for some library calls ... */
11087 if (!type)
11088 return mode == SFmode || mode == DFmode || mode == SDmode || mode == DDmode;
11090 /* The ABI says that record types with a single member are treated
11091 just like that member would be. */
11092 while (TREE_CODE (type) == RECORD_TYPE)
11094 tree field, single = NULL_TREE;
11096 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
11098 if (TREE_CODE (field) != FIELD_DECL)
11099 continue;
11101 if (single == NULL_TREE)
11102 single = TREE_TYPE (field);
11103 else
11104 return false;
11107 if (single == NULL_TREE)
11108 return false;
11109 else
11110 type = single;
11113 return TREE_CODE (type) == REAL_TYPE;
11116 /* Return true if a function argument of type TYPE and mode MODE
11117 is to be passed in an integer register, or a pair of integer
11118 registers, if available. */
11120 static bool
11121 s390_function_arg_integer (machine_mode mode, const_tree type)
11123 int size = s390_function_arg_size (mode, type);
11124 if (size > 8)
11125 return false;
11127 /* No type info available for some library calls ... */
11128 if (!type)
11129 return GET_MODE_CLASS (mode) == MODE_INT
11130 || (TARGET_SOFT_FLOAT && SCALAR_FLOAT_MODE_P (mode));
11132 /* We accept small integral (and similar) types. */
11133 if (INTEGRAL_TYPE_P (type)
11134 || POINTER_TYPE_P (type)
11135 || TREE_CODE (type) == NULLPTR_TYPE
11136 || TREE_CODE (type) == OFFSET_TYPE
11137 || (TARGET_SOFT_FLOAT && TREE_CODE (type) == REAL_TYPE))
11138 return true;
11140 /* We also accept structs of size 1, 2, 4, 8 that are not
11141 passed in floating-point registers. */
11142 if (AGGREGATE_TYPE_P (type)
11143 && exact_log2 (size) >= 0
11144 && !s390_function_arg_float (mode, type))
11145 return true;
11147 return false;
11150 /* Return 1 if a function argument of type TYPE and mode MODE
11151 is to be passed by reference. The ABI specifies that only
11152 structures of size 1, 2, 4, or 8 bytes are passed by value,
11153 all other structures (and complex numbers) are passed by
11154 reference. */
11156 static bool
11157 s390_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
11158 machine_mode mode, const_tree type,
11159 bool named ATTRIBUTE_UNUSED)
11161 int size = s390_function_arg_size (mode, type);
11163 if (s390_function_arg_vector (mode, type))
11164 return false;
11166 if (size > 8)
11167 return true;
11169 if (type)
11171 if (AGGREGATE_TYPE_P (type) && exact_log2 (size) < 0)
11172 return true;
11174 if (TREE_CODE (type) == COMPLEX_TYPE
11175 || TREE_CODE (type) == VECTOR_TYPE)
11176 return true;
11179 return false;
11182 /* Update the data in CUM to advance over an argument of mode MODE and
11183 data type TYPE. (TYPE is null for libcalls where that information
11184 may not be available.). The boolean NAMED specifies whether the
11185 argument is a named argument (as opposed to an unnamed argument
11186 matching an ellipsis). */
11188 static void
11189 s390_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
11190 const_tree type, bool named)
11192 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
11194 if (s390_function_arg_vector (mode, type))
11196 /* We are called for unnamed vector stdarg arguments which are
11197 passed on the stack. In this case this hook does not have to
11198 do anything since stack arguments are tracked by common
11199 code. */
11200 if (!named)
11201 return;
11202 cum->vrs += 1;
11204 else if (s390_function_arg_float (mode, type))
11206 cum->fprs += 1;
11208 else if (s390_function_arg_integer (mode, type))
11210 int size = s390_function_arg_size (mode, type);
11211 cum->gprs += ((size + UNITS_PER_LONG - 1) / UNITS_PER_LONG);
11213 else
11214 gcc_unreachable ();
11217 /* Define where to put the arguments to a function.
11218 Value is zero to push the argument on the stack,
11219 or a hard register in which to store the argument.
11221 MODE is the argument's machine mode.
11222 TYPE is the data type of the argument (as a tree).
11223 This is null for libcalls where that information may
11224 not be available.
11225 CUM is a variable of type CUMULATIVE_ARGS which gives info about
11226 the preceding args and about the function being called.
11227 NAMED is nonzero if this argument is a named parameter
11228 (otherwise it is an extra parameter matching an ellipsis).
11230 On S/390, we use general purpose registers 2 through 6 to
11231 pass integer, pointer, and certain structure arguments, and
11232 floating point registers 0 and 2 (0, 2, 4, and 6 on 64-bit)
11233 to pass floating point arguments. All remaining arguments
11234 are pushed to the stack. */
11236 static rtx
11237 s390_function_arg (cumulative_args_t cum_v, machine_mode mode,
11238 const_tree type, bool named)
11240 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
11242 if (!named)
11243 s390_check_type_for_vector_abi (type, true, false);
11245 if (s390_function_arg_vector (mode, type))
11247 /* Vector arguments being part of the ellipsis are passed on the
11248 stack. */
11249 if (!named || (cum->vrs + 1 > VEC_ARG_NUM_REG))
11250 return NULL_RTX;
11252 return gen_rtx_REG (mode, cum->vrs + FIRST_VEC_ARG_REGNO);
11254 else if (s390_function_arg_float (mode, type))
11256 if (cum->fprs + 1 > FP_ARG_NUM_REG)
11257 return NULL_RTX;
11258 else
11259 return gen_rtx_REG (mode, cum->fprs + 16);
11261 else if (s390_function_arg_integer (mode, type))
11263 int size = s390_function_arg_size (mode, type);
11264 int n_gprs = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
11266 if (cum->gprs + n_gprs > GP_ARG_NUM_REG)
11267 return NULL_RTX;
11268 else if (n_gprs == 1 || UNITS_PER_WORD == UNITS_PER_LONG)
11269 return gen_rtx_REG (mode, cum->gprs + 2);
11270 else if (n_gprs == 2)
11272 rtvec p = rtvec_alloc (2);
11274 RTVEC_ELT (p, 0)
11275 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 2),
11276 const0_rtx);
11277 RTVEC_ELT (p, 1)
11278 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 3),
11279 GEN_INT (4));
11281 return gen_rtx_PARALLEL (mode, p);
11285 /* After the real arguments, expand_call calls us once again
11286 with a void_type_node type. Whatever we return here is
11287 passed as operand 2 to the call expanders.
11289 We don't need this feature ... */
11290 else if (type == void_type_node)
11291 return const0_rtx;
11293 gcc_unreachable ();
11296 /* Return true if return values of type TYPE should be returned
11297 in a memory buffer whose address is passed by the caller as
11298 hidden first argument. */
11300 static bool
11301 s390_return_in_memory (const_tree type, const_tree fundecl ATTRIBUTE_UNUSED)
11303 /* We accept small integral (and similar) types. */
11304 if (INTEGRAL_TYPE_P (type)
11305 || POINTER_TYPE_P (type)
11306 || TREE_CODE (type) == OFFSET_TYPE
11307 || TREE_CODE (type) == REAL_TYPE)
11308 return int_size_in_bytes (type) > 8;
11310 /* vector types which fit into a VR. */
11311 if (TARGET_VX_ABI
11312 && VECTOR_TYPE_P (type)
11313 && int_size_in_bytes (type) <= 16)
11314 return false;
11316 /* Aggregates and similar constructs are always returned
11317 in memory. */
11318 if (AGGREGATE_TYPE_P (type)
11319 || TREE_CODE (type) == COMPLEX_TYPE
11320 || VECTOR_TYPE_P (type))
11321 return true;
11323 /* ??? We get called on all sorts of random stuff from
11324 aggregate_value_p. We can't abort, but it's not clear
11325 what's safe to return. Pretend it's a struct I guess. */
11326 return true;
11329 /* Function arguments and return values are promoted to word size. */
11331 static machine_mode
11332 s390_promote_function_mode (const_tree type, machine_mode mode,
11333 int *punsignedp,
11334 const_tree fntype ATTRIBUTE_UNUSED,
11335 int for_return ATTRIBUTE_UNUSED)
11337 if (INTEGRAL_MODE_P (mode)
11338 && GET_MODE_SIZE (mode) < UNITS_PER_LONG)
11340 if (type != NULL_TREE && POINTER_TYPE_P (type))
11341 *punsignedp = POINTERS_EXTEND_UNSIGNED;
11342 return Pmode;
11345 return mode;
11348 /* Define where to return a (scalar) value of type RET_TYPE.
11349 If RET_TYPE is null, define where to return a (scalar)
11350 value of mode MODE from a libcall. */
11352 static rtx
11353 s390_function_and_libcall_value (machine_mode mode,
11354 const_tree ret_type,
11355 const_tree fntype_or_decl,
11356 bool outgoing ATTRIBUTE_UNUSED)
11358 /* For vector return types it is important to use the RET_TYPE
11359 argument whenever available since the middle-end might have
11360 changed the mode to a scalar mode. */
11361 bool vector_ret_type_p = ((ret_type && VECTOR_TYPE_P (ret_type))
11362 || (!ret_type && VECTOR_MODE_P (mode)));
11364 /* For normal functions perform the promotion as
11365 promote_function_mode would do. */
11366 if (ret_type)
11368 int unsignedp = TYPE_UNSIGNED (ret_type);
11369 mode = promote_function_mode (ret_type, mode, &unsignedp,
11370 fntype_or_decl, 1);
11373 gcc_assert (GET_MODE_CLASS (mode) == MODE_INT
11374 || SCALAR_FLOAT_MODE_P (mode)
11375 || (TARGET_VX_ABI && vector_ret_type_p));
11376 gcc_assert (GET_MODE_SIZE (mode) <= (TARGET_VX_ABI ? 16 : 8));
11378 if (TARGET_VX_ABI && vector_ret_type_p)
11379 return gen_rtx_REG (mode, FIRST_VEC_ARG_REGNO);
11380 else if (TARGET_HARD_FLOAT && SCALAR_FLOAT_MODE_P (mode))
11381 return gen_rtx_REG (mode, 16);
11382 else if (GET_MODE_SIZE (mode) <= UNITS_PER_LONG
11383 || UNITS_PER_LONG == UNITS_PER_WORD)
11384 return gen_rtx_REG (mode, 2);
11385 else if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_LONG)
11387 /* This case is triggered when returning a 64 bit value with
11388 -m31 -mzarch. Although the value would fit into a single
11389 register it has to be forced into a 32 bit register pair in
11390 order to match the ABI. */
11391 rtvec p = rtvec_alloc (2);
11393 RTVEC_ELT (p, 0)
11394 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 2), const0_rtx);
11395 RTVEC_ELT (p, 1)
11396 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 3), GEN_INT (4));
11398 return gen_rtx_PARALLEL (mode, p);
11401 gcc_unreachable ();
11404 /* Define where to return a scalar return value of type RET_TYPE. */
11406 static rtx
11407 s390_function_value (const_tree ret_type, const_tree fn_decl_or_type,
11408 bool outgoing)
11410 return s390_function_and_libcall_value (TYPE_MODE (ret_type), ret_type,
11411 fn_decl_or_type, outgoing);
11414 /* Define where to return a scalar libcall return value of mode
11415 MODE. */
11417 static rtx
11418 s390_libcall_value (machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
11420 return s390_function_and_libcall_value (mode, NULL_TREE,
11421 NULL_TREE, true);
11425 /* Create and return the va_list datatype.
11427 On S/390, va_list is an array type equivalent to
11429 typedef struct __va_list_tag
11431 long __gpr;
11432 long __fpr;
11433 void *__overflow_arg_area;
11434 void *__reg_save_area;
11435 } va_list[1];
11437 where __gpr and __fpr hold the number of general purpose
11438 or floating point arguments used up to now, respectively,
11439 __overflow_arg_area points to the stack location of the
11440 next argument passed on the stack, and __reg_save_area
11441 always points to the start of the register area in the
11442 call frame of the current function. The function prologue
11443 saves all registers used for argument passing into this
11444 area if the function uses variable arguments. */
11446 static tree
11447 s390_build_builtin_va_list (void)
11449 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
11451 record = lang_hooks.types.make_type (RECORD_TYPE);
11453 type_decl =
11454 build_decl (BUILTINS_LOCATION,
11455 TYPE_DECL, get_identifier ("__va_list_tag"), record);
11457 f_gpr = build_decl (BUILTINS_LOCATION,
11458 FIELD_DECL, get_identifier ("__gpr"),
11459 long_integer_type_node);
11460 f_fpr = build_decl (BUILTINS_LOCATION,
11461 FIELD_DECL, get_identifier ("__fpr"),
11462 long_integer_type_node);
11463 f_ovf = build_decl (BUILTINS_LOCATION,
11464 FIELD_DECL, get_identifier ("__overflow_arg_area"),
11465 ptr_type_node);
11466 f_sav = build_decl (BUILTINS_LOCATION,
11467 FIELD_DECL, get_identifier ("__reg_save_area"),
11468 ptr_type_node);
11470 va_list_gpr_counter_field = f_gpr;
11471 va_list_fpr_counter_field = f_fpr;
11473 DECL_FIELD_CONTEXT (f_gpr) = record;
11474 DECL_FIELD_CONTEXT (f_fpr) = record;
11475 DECL_FIELD_CONTEXT (f_ovf) = record;
11476 DECL_FIELD_CONTEXT (f_sav) = record;
11478 TYPE_STUB_DECL (record) = type_decl;
11479 TYPE_NAME (record) = type_decl;
11480 TYPE_FIELDS (record) = f_gpr;
11481 DECL_CHAIN (f_gpr) = f_fpr;
11482 DECL_CHAIN (f_fpr) = f_ovf;
11483 DECL_CHAIN (f_ovf) = f_sav;
11485 layout_type (record);
11487 /* The correct type is an array type of one element. */
11488 return build_array_type (record, build_index_type (size_zero_node));
11491 /* Implement va_start by filling the va_list structure VALIST.
11492 STDARG_P is always true, and ignored.
11493 NEXTARG points to the first anonymous stack argument.
11495 The following global variables are used to initialize
11496 the va_list structure:
11498 crtl->args.info:
11499 holds number of gprs and fprs used for named arguments.
11500 crtl->args.arg_offset_rtx:
11501 holds the offset of the first anonymous stack argument
11502 (relative to the virtual arg pointer). */
11504 static void
11505 s390_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
11507 HOST_WIDE_INT n_gpr, n_fpr;
11508 int off;
11509 tree f_gpr, f_fpr, f_ovf, f_sav;
11510 tree gpr, fpr, ovf, sav, t;
11512 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
11513 f_fpr = DECL_CHAIN (f_gpr);
11514 f_ovf = DECL_CHAIN (f_fpr);
11515 f_sav = DECL_CHAIN (f_ovf);
11517 valist = build_simple_mem_ref (valist);
11518 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
11519 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
11520 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
11521 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
11523 /* Count number of gp and fp argument registers used. */
11525 n_gpr = crtl->args.info.gprs;
11526 n_fpr = crtl->args.info.fprs;
11528 if (cfun->va_list_gpr_size)
11530 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
11531 build_int_cst (NULL_TREE, n_gpr));
11532 TREE_SIDE_EFFECTS (t) = 1;
11533 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
11536 if (cfun->va_list_fpr_size)
11538 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
11539 build_int_cst (NULL_TREE, n_fpr));
11540 TREE_SIDE_EFFECTS (t) = 1;
11541 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
11544 /* Find the overflow area.
11545 FIXME: This currently is too pessimistic when the vector ABI is
11546 enabled. In that case we *always* set up the overflow area
11547 pointer. */
11548 if (n_gpr + cfun->va_list_gpr_size > GP_ARG_NUM_REG
11549 || n_fpr + cfun->va_list_fpr_size > FP_ARG_NUM_REG
11550 || TARGET_VX_ABI)
11552 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
11554 off = INTVAL (crtl->args.arg_offset_rtx);
11555 off = off < 0 ? 0 : off;
11556 if (TARGET_DEBUG_ARG)
11557 fprintf (stderr, "va_start: n_gpr = %d, n_fpr = %d off %d\n",
11558 (int)n_gpr, (int)n_fpr, off);
11560 t = fold_build_pointer_plus_hwi (t, off);
11562 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
11563 TREE_SIDE_EFFECTS (t) = 1;
11564 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
11567 /* Find the register save area. */
11568 if ((cfun->va_list_gpr_size && n_gpr < GP_ARG_NUM_REG)
11569 || (cfun->va_list_fpr_size && n_fpr < FP_ARG_NUM_REG))
11571 t = make_tree (TREE_TYPE (sav), return_address_pointer_rtx);
11572 t = fold_build_pointer_plus_hwi (t, -RETURN_REGNUM * UNITS_PER_LONG);
11574 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
11575 TREE_SIDE_EFFECTS (t) = 1;
11576 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
11580 /* Implement va_arg by updating the va_list structure
11581 VALIST as required to retrieve an argument of type
11582 TYPE, and returning that argument.
11584 Generates code equivalent to:
11586 if (integral value) {
11587 if (size <= 4 && args.gpr < 5 ||
11588 size > 4 && args.gpr < 4 )
11589 ret = args.reg_save_area[args.gpr+8]
11590 else
11591 ret = *args.overflow_arg_area++;
11592 } else if (vector value) {
11593 ret = *args.overflow_arg_area;
11594 args.overflow_arg_area += size / 8;
11595 } else if (float value) {
11596 if (args.fgpr < 2)
11597 ret = args.reg_save_area[args.fpr+64]
11598 else
11599 ret = *args.overflow_arg_area++;
11600 } else if (aggregate value) {
11601 if (args.gpr < 5)
11602 ret = *args.reg_save_area[args.gpr]
11603 else
11604 ret = **args.overflow_arg_area++;
11605 } */
11607 static tree
11608 s390_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
11609 gimple_seq *post_p ATTRIBUTE_UNUSED)
11611 tree f_gpr, f_fpr, f_ovf, f_sav;
11612 tree gpr, fpr, ovf, sav, reg, t, u;
11613 int indirect_p, size, n_reg, sav_ofs, sav_scale, max_reg;
11614 tree lab_false, lab_over;
11615 tree addr = create_tmp_var (ptr_type_node, "addr");
11616 bool left_align_p; /* How a value < UNITS_PER_LONG is aligned within
11617 a stack slot. */
11619 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
11620 f_fpr = DECL_CHAIN (f_gpr);
11621 f_ovf = DECL_CHAIN (f_fpr);
11622 f_sav = DECL_CHAIN (f_ovf);
11624 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
11625 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
11626 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
11628 /* The tree for args* cannot be shared between gpr/fpr and ovf since
11629 both appear on a lhs. */
11630 valist = unshare_expr (valist);
11631 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
11633 size = int_size_in_bytes (type);
11635 s390_check_type_for_vector_abi (type, true, false);
11637 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
11639 if (TARGET_DEBUG_ARG)
11641 fprintf (stderr, "va_arg: aggregate type");
11642 debug_tree (type);
11645 /* Aggregates are passed by reference. */
11646 indirect_p = 1;
11647 reg = gpr;
11648 n_reg = 1;
11650 /* kernel stack layout on 31 bit: It is assumed here that no padding
11651 will be added by s390_frame_info because for va_args always an even
11652 number of gprs has to be saved r15-r2 = 14 regs. */
11653 sav_ofs = 2 * UNITS_PER_LONG;
11654 sav_scale = UNITS_PER_LONG;
11655 size = UNITS_PER_LONG;
11656 max_reg = GP_ARG_NUM_REG - n_reg;
11657 left_align_p = false;
11659 else if (s390_function_arg_vector (TYPE_MODE (type), type))
11661 if (TARGET_DEBUG_ARG)
11663 fprintf (stderr, "va_arg: vector type");
11664 debug_tree (type);
11667 indirect_p = 0;
11668 reg = NULL_TREE;
11669 n_reg = 0;
11670 sav_ofs = 0;
11671 sav_scale = 8;
11672 max_reg = 0;
11673 left_align_p = true;
11675 else if (s390_function_arg_float (TYPE_MODE (type), type))
11677 if (TARGET_DEBUG_ARG)
11679 fprintf (stderr, "va_arg: float type");
11680 debug_tree (type);
11683 /* FP args go in FP registers, if present. */
11684 indirect_p = 0;
11685 reg = fpr;
11686 n_reg = 1;
11687 sav_ofs = 16 * UNITS_PER_LONG;
11688 sav_scale = 8;
11689 max_reg = FP_ARG_NUM_REG - n_reg;
11690 left_align_p = false;
11692 else
11694 if (TARGET_DEBUG_ARG)
11696 fprintf (stderr, "va_arg: other type");
11697 debug_tree (type);
11700 /* Otherwise into GP registers. */
11701 indirect_p = 0;
11702 reg = gpr;
11703 n_reg = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
11705 /* kernel stack layout on 31 bit: It is assumed here that no padding
11706 will be added by s390_frame_info because for va_args always an even
11707 number of gprs has to be saved r15-r2 = 14 regs. */
11708 sav_ofs = 2 * UNITS_PER_LONG;
11710 if (size < UNITS_PER_LONG)
11711 sav_ofs += UNITS_PER_LONG - size;
11713 sav_scale = UNITS_PER_LONG;
11714 max_reg = GP_ARG_NUM_REG - n_reg;
11715 left_align_p = false;
11718 /* Pull the value out of the saved registers ... */
11720 if (reg != NULL_TREE)
11723 if (reg > ((typeof (reg))max_reg))
11724 goto lab_false;
11726 addr = sav + sav_ofs + reg * save_scale;
11728 goto lab_over;
11730 lab_false:
11733 lab_false = create_artificial_label (UNKNOWN_LOCATION);
11734 lab_over = create_artificial_label (UNKNOWN_LOCATION);
11736 t = fold_convert (TREE_TYPE (reg), size_int (max_reg));
11737 t = build2 (GT_EXPR, boolean_type_node, reg, t);
11738 u = build1 (GOTO_EXPR, void_type_node, lab_false);
11739 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
11740 gimplify_and_add (t, pre_p);
11742 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
11743 u = build2 (MULT_EXPR, TREE_TYPE (reg), reg,
11744 fold_convert (TREE_TYPE (reg), size_int (sav_scale)));
11745 t = fold_build_pointer_plus (t, u);
11747 gimplify_assign (addr, t, pre_p);
11749 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
11751 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
11754 /* ... Otherwise out of the overflow area. */
11756 t = ovf;
11757 if (size < UNITS_PER_LONG && !left_align_p)
11758 t = fold_build_pointer_plus_hwi (t, UNITS_PER_LONG - size);
11760 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
11762 gimplify_assign (addr, t, pre_p);
11764 if (size < UNITS_PER_LONG && left_align_p)
11765 t = fold_build_pointer_plus_hwi (t, UNITS_PER_LONG);
11766 else
11767 t = fold_build_pointer_plus_hwi (t, size);
11769 gimplify_assign (ovf, t, pre_p);
11771 if (reg != NULL_TREE)
11772 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
11775 /* Increment register save count. */
11777 if (n_reg > 0)
11779 u = build2 (PREINCREMENT_EXPR, TREE_TYPE (reg), reg,
11780 fold_convert (TREE_TYPE (reg), size_int (n_reg)));
11781 gimplify_and_add (u, pre_p);
11784 if (indirect_p)
11786 t = build_pointer_type_for_mode (build_pointer_type (type),
11787 ptr_mode, true);
11788 addr = fold_convert (t, addr);
11789 addr = build_va_arg_indirect_ref (addr);
11791 else
11793 t = build_pointer_type_for_mode (type, ptr_mode, true);
11794 addr = fold_convert (t, addr);
11797 return build_va_arg_indirect_ref (addr);
11800 /* Emit rtl for the tbegin or tbegin_retry (RETRY != NULL_RTX)
11801 expanders.
11802 DEST - Register location where CC will be stored.
11803 TDB - Pointer to a 256 byte area where to store the transaction.
11804 diagnostic block. NULL if TDB is not needed.
11805 RETRY - Retry count value. If non-NULL a retry loop for CC2
11806 is emitted
11807 CLOBBER_FPRS_P - If true clobbers for all FPRs are emitted as part
11808 of the tbegin instruction pattern. */
11810 void
11811 s390_expand_tbegin (rtx dest, rtx tdb, rtx retry, bool clobber_fprs_p)
11813 rtx retry_plus_two = gen_reg_rtx (SImode);
11814 rtx retry_reg = gen_reg_rtx (SImode);
11815 rtx_code_label *retry_label = NULL;
11817 if (retry != NULL_RTX)
11819 emit_move_insn (retry_reg, retry);
11820 emit_insn (gen_addsi3 (retry_plus_two, retry_reg, const2_rtx));
11821 emit_insn (gen_addsi3 (retry_reg, retry_reg, const1_rtx));
11822 retry_label = gen_label_rtx ();
11823 emit_label (retry_label);
11826 if (clobber_fprs_p)
11828 if (TARGET_VX)
11829 emit_insn (gen_tbegin_1_z13 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
11830 tdb));
11831 else
11832 emit_insn (gen_tbegin_1 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
11833 tdb));
11835 else
11836 emit_insn (gen_tbegin_nofloat_1 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
11837 tdb));
11839 emit_move_insn (dest, gen_rtx_UNSPEC (SImode,
11840 gen_rtvec (1, gen_rtx_REG (CCRAWmode,
11841 CC_REGNUM)),
11842 UNSPEC_CC_TO_INT));
11843 if (retry != NULL_RTX)
11845 const int CC0 = 1 << 3;
11846 const int CC1 = 1 << 2;
11847 const int CC3 = 1 << 0;
11848 rtx jump;
11849 rtx count = gen_reg_rtx (SImode);
11850 rtx_code_label *leave_label = gen_label_rtx ();
11852 /* Exit for success and permanent failures. */
11853 jump = s390_emit_jump (leave_label,
11854 gen_rtx_EQ (VOIDmode,
11855 gen_rtx_REG (CCRAWmode, CC_REGNUM),
11856 gen_rtx_CONST_INT (VOIDmode, CC0 | CC1 | CC3)));
11857 LABEL_NUSES (leave_label) = 1;
11859 /* CC2 - transient failure. Perform retry with ppa. */
11860 emit_move_insn (count, retry_plus_two);
11861 emit_insn (gen_subsi3 (count, count, retry_reg));
11862 emit_insn (gen_tx_assist (count));
11863 jump = emit_jump_insn (gen_doloop_si64 (retry_label,
11864 retry_reg,
11865 retry_reg));
11866 JUMP_LABEL (jump) = retry_label;
11867 LABEL_NUSES (retry_label) = 1;
11868 emit_label (leave_label);
11873 /* Return the decl for the target specific builtin with the function
11874 code FCODE. */
11876 static tree
11877 s390_builtin_decl (unsigned fcode, bool initialized_p ATTRIBUTE_UNUSED)
11879 if (fcode >= S390_BUILTIN_MAX)
11880 return error_mark_node;
11882 return s390_builtin_decls[fcode];
11885 /* We call mcount before the function prologue. So a profiled leaf
11886 function should stay a leaf function. */
11888 static bool
11889 s390_keep_leaf_when_profiled ()
11891 return true;
11894 /* Output assembly code for the trampoline template to
11895 stdio stream FILE.
11897 On S/390, we use gpr 1 internally in the trampoline code;
11898 gpr 0 is used to hold the static chain. */
11900 static void
11901 s390_asm_trampoline_template (FILE *file)
11903 rtx op[2];
11904 op[0] = gen_rtx_REG (Pmode, 0);
11905 op[1] = gen_rtx_REG (Pmode, 1);
11907 if (TARGET_64BIT)
11909 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
11910 output_asm_insn ("lmg\t%0,%1,14(%1)", op); /* 6 byte */
11911 output_asm_insn ("br\t%1", op); /* 2 byte */
11912 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 10));
11914 else
11916 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
11917 output_asm_insn ("lm\t%0,%1,6(%1)", op); /* 4 byte */
11918 output_asm_insn ("br\t%1", op); /* 2 byte */
11919 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 8));
11923 /* Emit RTL insns to initialize the variable parts of a trampoline.
11924 FNADDR is an RTX for the address of the function's pure code.
11925 CXT is an RTX for the static chain value for the function. */
11927 static void
11928 s390_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
11930 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
11931 rtx mem;
11933 emit_block_move (m_tramp, assemble_trampoline_template (),
11934 GEN_INT (2 * UNITS_PER_LONG), BLOCK_OP_NORMAL);
11936 mem = adjust_address (m_tramp, Pmode, 2 * UNITS_PER_LONG);
11937 emit_move_insn (mem, cxt);
11938 mem = adjust_address (m_tramp, Pmode, 3 * UNITS_PER_LONG);
11939 emit_move_insn (mem, fnaddr);
11942 /* Output assembler code to FILE to increment profiler label # LABELNO
11943 for profiling a function entry. */
11945 void
11946 s390_function_profiler (FILE *file, int labelno)
11948 rtx op[7];
11950 char label[128];
11951 ASM_GENERATE_INTERNAL_LABEL (label, "LP", labelno);
11953 fprintf (file, "# function profiler \n");
11955 op[0] = gen_rtx_REG (Pmode, RETURN_REGNUM);
11956 op[1] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
11957 op[1] = gen_rtx_MEM (Pmode, plus_constant (Pmode, op[1], UNITS_PER_LONG));
11959 op[2] = gen_rtx_REG (Pmode, 1);
11960 op[3] = gen_rtx_SYMBOL_REF (Pmode, label);
11961 SYMBOL_REF_FLAGS (op[3]) = SYMBOL_FLAG_LOCAL;
11963 op[4] = gen_rtx_SYMBOL_REF (Pmode, "_mcount");
11964 if (flag_pic)
11966 op[4] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[4]), UNSPEC_PLT);
11967 op[4] = gen_rtx_CONST (Pmode, op[4]);
11970 if (TARGET_64BIT)
11972 output_asm_insn ("stg\t%0,%1", op);
11973 output_asm_insn ("larl\t%2,%3", op);
11974 output_asm_insn ("brasl\t%0,%4", op);
11975 output_asm_insn ("lg\t%0,%1", op);
11977 else if (!flag_pic)
11979 op[6] = gen_label_rtx ();
11981 output_asm_insn ("st\t%0,%1", op);
11982 output_asm_insn ("bras\t%2,%l6", op);
11983 output_asm_insn (".long\t%4", op);
11984 output_asm_insn (".long\t%3", op);
11985 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
11986 output_asm_insn ("l\t%0,0(%2)", op);
11987 output_asm_insn ("l\t%2,4(%2)", op);
11988 output_asm_insn ("basr\t%0,%0", op);
11989 output_asm_insn ("l\t%0,%1", op);
11991 else
11993 op[5] = gen_label_rtx ();
11994 op[6] = gen_label_rtx ();
11996 output_asm_insn ("st\t%0,%1", op);
11997 output_asm_insn ("bras\t%2,%l6", op);
11998 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[5]));
11999 output_asm_insn (".long\t%4-%l5", op);
12000 output_asm_insn (".long\t%3-%l5", op);
12001 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
12002 output_asm_insn ("lr\t%0,%2", op);
12003 output_asm_insn ("a\t%0,0(%2)", op);
12004 output_asm_insn ("a\t%2,4(%2)", op);
12005 output_asm_insn ("basr\t%0,%0", op);
12006 output_asm_insn ("l\t%0,%1", op);
12010 /* Encode symbol attributes (local vs. global, tls model) of a SYMBOL_REF
12011 into its SYMBOL_REF_FLAGS. */
12013 static void
12014 s390_encode_section_info (tree decl, rtx rtl, int first)
12016 default_encode_section_info (decl, rtl, first);
12018 if (TREE_CODE (decl) == VAR_DECL)
12020 /* Store the alignment to be able to check if we can use
12021 a larl/load-relative instruction. We only handle the cases
12022 that can go wrong (i.e. no FUNC_DECLs). If a symref does
12023 not have any flag we assume it to be correctly aligned. */
12025 if (DECL_ALIGN (decl) % 64)
12026 SYMBOL_FLAG_SET_NOTALIGN8 (XEXP (rtl, 0));
12028 if (DECL_ALIGN (decl) % 32)
12029 SYMBOL_FLAG_SET_NOTALIGN4 (XEXP (rtl, 0));
12031 if (DECL_ALIGN (decl) == 0 || DECL_ALIGN (decl) % 16)
12032 SYMBOL_FLAG_SET_NOTALIGN2 (XEXP (rtl, 0));
12035 /* Literal pool references don't have a decl so they are handled
12036 differently here. We rely on the information in the MEM_ALIGN
12037 entry to decide upon the alignment. */
12038 if (MEM_P (rtl)
12039 && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF
12040 && TREE_CONSTANT_POOL_ADDRESS_P (XEXP (rtl, 0))
12041 && MEM_ALIGN (rtl) != 0
12042 && GET_MODE_BITSIZE (GET_MODE (rtl)) != 0)
12044 if (MEM_ALIGN (rtl) % 64)
12045 SYMBOL_FLAG_SET_NOTALIGN8 (XEXP (rtl, 0));
12047 if (MEM_ALIGN (rtl) % 32)
12048 SYMBOL_FLAG_SET_NOTALIGN4 (XEXP (rtl, 0));
12050 if (MEM_ALIGN (rtl) == 0 || MEM_ALIGN (rtl) % 16)
12051 SYMBOL_FLAG_SET_NOTALIGN2 (XEXP (rtl, 0));
12055 /* Output thunk to FILE that implements a C++ virtual function call (with
12056 multiple inheritance) to FUNCTION. The thunk adjusts the this pointer
12057 by DELTA, and unless VCALL_OFFSET is zero, applies an additional adjustment
12058 stored at VCALL_OFFSET in the vtable whose address is located at offset 0
12059 relative to the resulting this pointer. */
12061 static void
12062 s390_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
12063 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
12064 tree function)
12066 rtx op[10];
12067 int nonlocal = 0;
12069 /* Make sure unwind info is emitted for the thunk if needed. */
12070 final_start_function (emit_barrier (), file, 1);
12072 /* Operand 0 is the target function. */
12073 op[0] = XEXP (DECL_RTL (function), 0);
12074 if (flag_pic && !SYMBOL_REF_LOCAL_P (op[0]))
12076 nonlocal = 1;
12077 op[0] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[0]),
12078 TARGET_64BIT ? UNSPEC_PLT : UNSPEC_GOT);
12079 op[0] = gen_rtx_CONST (Pmode, op[0]);
12082 /* Operand 1 is the 'this' pointer. */
12083 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
12084 op[1] = gen_rtx_REG (Pmode, 3);
12085 else
12086 op[1] = gen_rtx_REG (Pmode, 2);
12088 /* Operand 2 is the delta. */
12089 op[2] = GEN_INT (delta);
12091 /* Operand 3 is the vcall_offset. */
12092 op[3] = GEN_INT (vcall_offset);
12094 /* Operand 4 is the temporary register. */
12095 op[4] = gen_rtx_REG (Pmode, 1);
12097 /* Operands 5 to 8 can be used as labels. */
12098 op[5] = NULL_RTX;
12099 op[6] = NULL_RTX;
12100 op[7] = NULL_RTX;
12101 op[8] = NULL_RTX;
12103 /* Operand 9 can be used for temporary register. */
12104 op[9] = NULL_RTX;
12106 /* Generate code. */
12107 if (TARGET_64BIT)
12109 /* Setup literal pool pointer if required. */
12110 if ((!DISP_IN_RANGE (delta)
12111 && !CONST_OK_FOR_K (delta)
12112 && !CONST_OK_FOR_Os (delta))
12113 || (!DISP_IN_RANGE (vcall_offset)
12114 && !CONST_OK_FOR_K (vcall_offset)
12115 && !CONST_OK_FOR_Os (vcall_offset)))
12117 op[5] = gen_label_rtx ();
12118 output_asm_insn ("larl\t%4,%5", op);
12121 /* Add DELTA to this pointer. */
12122 if (delta)
12124 if (CONST_OK_FOR_J (delta))
12125 output_asm_insn ("la\t%1,%2(%1)", op);
12126 else if (DISP_IN_RANGE (delta))
12127 output_asm_insn ("lay\t%1,%2(%1)", op);
12128 else if (CONST_OK_FOR_K (delta))
12129 output_asm_insn ("aghi\t%1,%2", op);
12130 else if (CONST_OK_FOR_Os (delta))
12131 output_asm_insn ("agfi\t%1,%2", op);
12132 else
12134 op[6] = gen_label_rtx ();
12135 output_asm_insn ("agf\t%1,%6-%5(%4)", op);
12139 /* Perform vcall adjustment. */
12140 if (vcall_offset)
12142 if (DISP_IN_RANGE (vcall_offset))
12144 output_asm_insn ("lg\t%4,0(%1)", op);
12145 output_asm_insn ("ag\t%1,%3(%4)", op);
12147 else if (CONST_OK_FOR_K (vcall_offset))
12149 output_asm_insn ("lghi\t%4,%3", op);
12150 output_asm_insn ("ag\t%4,0(%1)", op);
12151 output_asm_insn ("ag\t%1,0(%4)", op);
12153 else if (CONST_OK_FOR_Os (vcall_offset))
12155 output_asm_insn ("lgfi\t%4,%3", op);
12156 output_asm_insn ("ag\t%4,0(%1)", op);
12157 output_asm_insn ("ag\t%1,0(%4)", op);
12159 else
12161 op[7] = gen_label_rtx ();
12162 output_asm_insn ("llgf\t%4,%7-%5(%4)", op);
12163 output_asm_insn ("ag\t%4,0(%1)", op);
12164 output_asm_insn ("ag\t%1,0(%4)", op);
12168 /* Jump to target. */
12169 output_asm_insn ("jg\t%0", op);
12171 /* Output literal pool if required. */
12172 if (op[5])
12174 output_asm_insn (".align\t4", op);
12175 targetm.asm_out.internal_label (file, "L",
12176 CODE_LABEL_NUMBER (op[5]));
12178 if (op[6])
12180 targetm.asm_out.internal_label (file, "L",
12181 CODE_LABEL_NUMBER (op[6]));
12182 output_asm_insn (".long\t%2", op);
12184 if (op[7])
12186 targetm.asm_out.internal_label (file, "L",
12187 CODE_LABEL_NUMBER (op[7]));
12188 output_asm_insn (".long\t%3", op);
12191 else
12193 /* Setup base pointer if required. */
12194 if (!vcall_offset
12195 || (!DISP_IN_RANGE (delta)
12196 && !CONST_OK_FOR_K (delta)
12197 && !CONST_OK_FOR_Os (delta))
12198 || (!DISP_IN_RANGE (delta)
12199 && !CONST_OK_FOR_K (vcall_offset)
12200 && !CONST_OK_FOR_Os (vcall_offset)))
12202 op[5] = gen_label_rtx ();
12203 output_asm_insn ("basr\t%4,0", op);
12204 targetm.asm_out.internal_label (file, "L",
12205 CODE_LABEL_NUMBER (op[5]));
12208 /* Add DELTA to this pointer. */
12209 if (delta)
12211 if (CONST_OK_FOR_J (delta))
12212 output_asm_insn ("la\t%1,%2(%1)", op);
12213 else if (DISP_IN_RANGE (delta))
12214 output_asm_insn ("lay\t%1,%2(%1)", op);
12215 else if (CONST_OK_FOR_K (delta))
12216 output_asm_insn ("ahi\t%1,%2", op);
12217 else if (CONST_OK_FOR_Os (delta))
12218 output_asm_insn ("afi\t%1,%2", op);
12219 else
12221 op[6] = gen_label_rtx ();
12222 output_asm_insn ("a\t%1,%6-%5(%4)", op);
12226 /* Perform vcall adjustment. */
12227 if (vcall_offset)
12229 if (CONST_OK_FOR_J (vcall_offset))
12231 output_asm_insn ("l\t%4,0(%1)", op);
12232 output_asm_insn ("a\t%1,%3(%4)", op);
12234 else if (DISP_IN_RANGE (vcall_offset))
12236 output_asm_insn ("l\t%4,0(%1)", op);
12237 output_asm_insn ("ay\t%1,%3(%4)", op);
12239 else if (CONST_OK_FOR_K (vcall_offset))
12241 output_asm_insn ("lhi\t%4,%3", op);
12242 output_asm_insn ("a\t%4,0(%1)", op);
12243 output_asm_insn ("a\t%1,0(%4)", op);
12245 else if (CONST_OK_FOR_Os (vcall_offset))
12247 output_asm_insn ("iilf\t%4,%3", op);
12248 output_asm_insn ("a\t%4,0(%1)", op);
12249 output_asm_insn ("a\t%1,0(%4)", op);
12251 else
12253 op[7] = gen_label_rtx ();
12254 output_asm_insn ("l\t%4,%7-%5(%4)", op);
12255 output_asm_insn ("a\t%4,0(%1)", op);
12256 output_asm_insn ("a\t%1,0(%4)", op);
12259 /* We had to clobber the base pointer register.
12260 Re-setup the base pointer (with a different base). */
12261 op[5] = gen_label_rtx ();
12262 output_asm_insn ("basr\t%4,0", op);
12263 targetm.asm_out.internal_label (file, "L",
12264 CODE_LABEL_NUMBER (op[5]));
12267 /* Jump to target. */
12268 op[8] = gen_label_rtx ();
12270 if (!flag_pic)
12271 output_asm_insn ("l\t%4,%8-%5(%4)", op);
12272 else if (!nonlocal)
12273 output_asm_insn ("a\t%4,%8-%5(%4)", op);
12274 /* We cannot call through .plt, since .plt requires %r12 loaded. */
12275 else if (flag_pic == 1)
12277 output_asm_insn ("a\t%4,%8-%5(%4)", op);
12278 output_asm_insn ("l\t%4,%0(%4)", op);
12280 else if (flag_pic == 2)
12282 op[9] = gen_rtx_REG (Pmode, 0);
12283 output_asm_insn ("l\t%9,%8-4-%5(%4)", op);
12284 output_asm_insn ("a\t%4,%8-%5(%4)", op);
12285 output_asm_insn ("ar\t%4,%9", op);
12286 output_asm_insn ("l\t%4,0(%4)", op);
12289 output_asm_insn ("br\t%4", op);
12291 /* Output literal pool. */
12292 output_asm_insn (".align\t4", op);
12294 if (nonlocal && flag_pic == 2)
12295 output_asm_insn (".long\t%0", op);
12296 if (nonlocal)
12298 op[0] = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
12299 SYMBOL_REF_FLAGS (op[0]) = SYMBOL_FLAG_LOCAL;
12302 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[8]));
12303 if (!flag_pic)
12304 output_asm_insn (".long\t%0", op);
12305 else
12306 output_asm_insn (".long\t%0-%5", op);
12308 if (op[6])
12310 targetm.asm_out.internal_label (file, "L",
12311 CODE_LABEL_NUMBER (op[6]));
12312 output_asm_insn (".long\t%2", op);
12314 if (op[7])
12316 targetm.asm_out.internal_label (file, "L",
12317 CODE_LABEL_NUMBER (op[7]));
12318 output_asm_insn (".long\t%3", op);
12321 final_end_function ();
12324 static bool
12325 s390_valid_pointer_mode (machine_mode mode)
12327 return (mode == SImode || (TARGET_64BIT && mode == DImode));
12330 /* Checks whether the given CALL_EXPR would use a caller
12331 saved register. This is used to decide whether sibling call
12332 optimization could be performed on the respective function
12333 call. */
12335 static bool
12336 s390_call_saved_register_used (tree call_expr)
12338 CUMULATIVE_ARGS cum_v;
12339 cumulative_args_t cum;
12340 tree parameter;
12341 machine_mode mode;
12342 tree type;
12343 rtx parm_rtx;
12344 int reg, i;
12346 INIT_CUMULATIVE_ARGS (cum_v, NULL, NULL, 0, 0);
12347 cum = pack_cumulative_args (&cum_v);
12349 for (i = 0; i < call_expr_nargs (call_expr); i++)
12351 parameter = CALL_EXPR_ARG (call_expr, i);
12352 gcc_assert (parameter);
12354 /* For an undeclared variable passed as parameter we will get
12355 an ERROR_MARK node here. */
12356 if (TREE_CODE (parameter) == ERROR_MARK)
12357 return true;
12359 type = TREE_TYPE (parameter);
12360 gcc_assert (type);
12362 mode = TYPE_MODE (type);
12363 gcc_assert (mode);
12365 /* We assume that in the target function all parameters are
12366 named. This only has an impact on vector argument register
12367 usage none of which is call-saved. */
12368 if (pass_by_reference (&cum_v, mode, type, true))
12370 mode = Pmode;
12371 type = build_pointer_type (type);
12374 parm_rtx = s390_function_arg (cum, mode, type, true);
12376 s390_function_arg_advance (cum, mode, type, true);
12378 if (!parm_rtx)
12379 continue;
12381 if (REG_P (parm_rtx))
12383 for (reg = 0;
12384 reg < HARD_REGNO_NREGS (REGNO (parm_rtx), GET_MODE (parm_rtx));
12385 reg++)
12386 if (!call_used_regs[reg + REGNO (parm_rtx)])
12387 return true;
12390 if (GET_CODE (parm_rtx) == PARALLEL)
12392 int i;
12394 for (i = 0; i < XVECLEN (parm_rtx, 0); i++)
12396 rtx r = XEXP (XVECEXP (parm_rtx, 0, i), 0);
12398 gcc_assert (REG_P (r));
12400 for (reg = 0;
12401 reg < HARD_REGNO_NREGS (REGNO (r), GET_MODE (r));
12402 reg++)
12403 if (!call_used_regs[reg + REGNO (r)])
12404 return true;
12409 return false;
12412 /* Return true if the given call expression can be
12413 turned into a sibling call.
12414 DECL holds the declaration of the function to be called whereas
12415 EXP is the call expression itself. */
12417 static bool
12418 s390_function_ok_for_sibcall (tree decl, tree exp)
12420 /* The TPF epilogue uses register 1. */
12421 if (TARGET_TPF_PROFILING)
12422 return false;
12424 /* The 31 bit PLT code uses register 12 (GOT pointer - caller saved)
12425 which would have to be restored before the sibcall. */
12426 if (!TARGET_64BIT && flag_pic && decl && !targetm.binds_local_p (decl))
12427 return false;
12429 /* Register 6 on s390 is available as an argument register but unfortunately
12430 "caller saved". This makes functions needing this register for arguments
12431 not suitable for sibcalls. */
12432 return !s390_call_saved_register_used (exp);
12435 /* Return the fixed registers used for condition codes. */
12437 static bool
12438 s390_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
12440 *p1 = CC_REGNUM;
12441 *p2 = INVALID_REGNUM;
12443 return true;
12446 /* This function is used by the call expanders of the machine description.
12447 It emits the call insn itself together with the necessary operations
12448 to adjust the target address and returns the emitted insn.
12449 ADDR_LOCATION is the target address rtx
12450 TLS_CALL the location of the thread-local symbol
12451 RESULT_REG the register where the result of the call should be stored
12452 RETADDR_REG the register where the return address should be stored
12453 If this parameter is NULL_RTX the call is considered
12454 to be a sibling call. */
12456 rtx_insn *
12457 s390_emit_call (rtx addr_location, rtx tls_call, rtx result_reg,
12458 rtx retaddr_reg)
12460 bool plt_call = false;
12461 rtx_insn *insn;
12462 rtx call;
12463 rtx clobber;
12464 rtvec vec;
12466 /* Direct function calls need special treatment. */
12467 if (GET_CODE (addr_location) == SYMBOL_REF)
12469 /* When calling a global routine in PIC mode, we must
12470 replace the symbol itself with the PLT stub. */
12471 if (flag_pic && !SYMBOL_REF_LOCAL_P (addr_location))
12473 if (retaddr_reg != NULL_RTX)
12475 addr_location = gen_rtx_UNSPEC (Pmode,
12476 gen_rtvec (1, addr_location),
12477 UNSPEC_PLT);
12478 addr_location = gen_rtx_CONST (Pmode, addr_location);
12479 plt_call = true;
12481 else
12482 /* For -fpic code the PLT entries might use r12 which is
12483 call-saved. Therefore we cannot do a sibcall when
12484 calling directly using a symbol ref. When reaching
12485 this point we decided (in s390_function_ok_for_sibcall)
12486 to do a sibcall for a function pointer but one of the
12487 optimizers was able to get rid of the function pointer
12488 by propagating the symbol ref into the call. This
12489 optimization is illegal for S/390 so we turn the direct
12490 call into a indirect call again. */
12491 addr_location = force_reg (Pmode, addr_location);
12494 /* Unless we can use the bras(l) insn, force the
12495 routine address into a register. */
12496 if (!TARGET_SMALL_EXEC && !TARGET_CPU_ZARCH)
12498 if (flag_pic)
12499 addr_location = legitimize_pic_address (addr_location, 0);
12500 else
12501 addr_location = force_reg (Pmode, addr_location);
12505 /* If it is already an indirect call or the code above moved the
12506 SYMBOL_REF to somewhere else make sure the address can be found in
12507 register 1. */
12508 if (retaddr_reg == NULL_RTX
12509 && GET_CODE (addr_location) != SYMBOL_REF
12510 && !plt_call)
12512 emit_move_insn (gen_rtx_REG (Pmode, SIBCALL_REGNUM), addr_location);
12513 addr_location = gen_rtx_REG (Pmode, SIBCALL_REGNUM);
12516 addr_location = gen_rtx_MEM (QImode, addr_location);
12517 call = gen_rtx_CALL (VOIDmode, addr_location, const0_rtx);
12519 if (result_reg != NULL_RTX)
12520 call = gen_rtx_SET (result_reg, call);
12522 if (retaddr_reg != NULL_RTX)
12524 clobber = gen_rtx_CLOBBER (VOIDmode, retaddr_reg);
12526 if (tls_call != NULL_RTX)
12527 vec = gen_rtvec (3, call, clobber,
12528 gen_rtx_USE (VOIDmode, tls_call));
12529 else
12530 vec = gen_rtvec (2, call, clobber);
12532 call = gen_rtx_PARALLEL (VOIDmode, vec);
12535 insn = emit_call_insn (call);
12537 /* 31-bit PLT stubs and tls calls use the GOT register implicitly. */
12538 if ((!TARGET_64BIT && plt_call) || tls_call != NULL_RTX)
12540 /* s390_function_ok_for_sibcall should
12541 have denied sibcalls in this case. */
12542 gcc_assert (retaddr_reg != NULL_RTX);
12543 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, 12));
12545 return insn;
12548 /* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
12550 static void
12551 s390_conditional_register_usage (void)
12553 int i;
12555 if (flag_pic)
12557 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
12558 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
12560 if (TARGET_CPU_ZARCH)
12562 fixed_regs[BASE_REGNUM] = 0;
12563 call_used_regs[BASE_REGNUM] = 0;
12564 fixed_regs[RETURN_REGNUM] = 0;
12565 call_used_regs[RETURN_REGNUM] = 0;
12567 if (TARGET_64BIT)
12569 for (i = FPR8_REGNUM; i <= FPR15_REGNUM; i++)
12570 call_used_regs[i] = call_really_used_regs[i] = 0;
12572 else
12574 call_used_regs[FPR4_REGNUM] = call_really_used_regs[FPR4_REGNUM] = 0;
12575 call_used_regs[FPR6_REGNUM] = call_really_used_regs[FPR6_REGNUM] = 0;
12578 if (TARGET_SOFT_FLOAT)
12580 for (i = FPR0_REGNUM; i <= FPR15_REGNUM; i++)
12581 call_used_regs[i] = fixed_regs[i] = 1;
12584 /* Disable v16 - v31 for non-vector target. */
12585 if (!TARGET_VX)
12587 for (i = VR16_REGNUM; i <= VR31_REGNUM; i++)
12588 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
12592 /* Corresponding function to eh_return expander. */
12594 static GTY(()) rtx s390_tpf_eh_return_symbol;
12595 void
12596 s390_emit_tpf_eh_return (rtx target)
12598 rtx_insn *insn;
12599 rtx reg, orig_ra;
12601 if (!s390_tpf_eh_return_symbol)
12602 s390_tpf_eh_return_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tpf_eh_return");
12604 reg = gen_rtx_REG (Pmode, 2);
12605 orig_ra = gen_rtx_REG (Pmode, 3);
12607 emit_move_insn (reg, target);
12608 emit_move_insn (orig_ra, get_hard_reg_initial_val (Pmode, RETURN_REGNUM));
12609 insn = s390_emit_call (s390_tpf_eh_return_symbol, NULL_RTX, reg,
12610 gen_rtx_REG (Pmode, RETURN_REGNUM));
12611 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), reg);
12612 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), orig_ra);
12614 emit_move_insn (EH_RETURN_HANDLER_RTX, reg);
12617 /* Rework the prologue/epilogue to avoid saving/restoring
12618 registers unnecessarily. */
12620 static void
12621 s390_optimize_prologue (void)
12623 rtx_insn *insn, *new_insn, *next_insn;
12625 /* Do a final recompute of the frame-related data. */
12626 s390_optimize_register_info ();
12628 /* If all special registers are in fact used, there's nothing we
12629 can do, so no point in walking the insn list. */
12631 if (cfun_frame_layout.first_save_gpr <= BASE_REGNUM
12632 && cfun_frame_layout.last_save_gpr >= BASE_REGNUM
12633 && (TARGET_CPU_ZARCH
12634 || (cfun_frame_layout.first_save_gpr <= RETURN_REGNUM
12635 && cfun_frame_layout.last_save_gpr >= RETURN_REGNUM)))
12636 return;
12638 /* Search for prologue/epilogue insns and replace them. */
12640 for (insn = get_insns (); insn; insn = next_insn)
12642 int first, last, off;
12643 rtx set, base, offset;
12644 rtx pat;
12646 next_insn = NEXT_INSN (insn);
12648 if (! NONJUMP_INSN_P (insn) || ! RTX_FRAME_RELATED_P (insn))
12649 continue;
12651 pat = PATTERN (insn);
12653 /* Remove ldgr/lgdr instructions used for saving and restore
12654 GPRs if possible. */
12655 if (TARGET_Z10
12656 && GET_CODE (pat) == SET
12657 && GET_MODE (SET_SRC (pat)) == DImode
12658 && REG_P (SET_SRC (pat))
12659 && REG_P (SET_DEST (pat)))
12661 int src_regno = REGNO (SET_SRC (pat));
12662 int dest_regno = REGNO (SET_DEST (pat));
12663 int gpr_regno;
12664 int fpr_regno;
12666 if (!((GENERAL_REGNO_P (src_regno) && FP_REGNO_P (dest_regno))
12667 || (FP_REGNO_P (src_regno) && GENERAL_REGNO_P (dest_regno))))
12668 continue;
12670 gpr_regno = GENERAL_REGNO_P (src_regno) ? src_regno : dest_regno;
12671 fpr_regno = FP_REGNO_P (src_regno) ? src_regno : dest_regno;
12673 /* GPR must be call-saved, FPR must be call-clobbered. */
12674 if (!call_really_used_regs[fpr_regno]
12675 || call_really_used_regs[gpr_regno])
12676 continue;
12678 /* It must not happen that what we once saved in an FPR now
12679 needs a stack slot. */
12680 gcc_assert (cfun_gpr_save_slot (gpr_regno) != -1);
12682 if (cfun_gpr_save_slot (gpr_regno) == 0)
12684 remove_insn (insn);
12685 continue;
12689 if (GET_CODE (pat) == PARALLEL
12690 && store_multiple_operation (pat, VOIDmode))
12692 set = XVECEXP (pat, 0, 0);
12693 first = REGNO (SET_SRC (set));
12694 last = first + XVECLEN (pat, 0) - 1;
12695 offset = const0_rtx;
12696 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
12697 off = INTVAL (offset);
12699 if (GET_CODE (base) != REG || off < 0)
12700 continue;
12701 if (cfun_frame_layout.first_save_gpr != -1
12702 && (cfun_frame_layout.first_save_gpr < first
12703 || cfun_frame_layout.last_save_gpr > last))
12704 continue;
12705 if (REGNO (base) != STACK_POINTER_REGNUM
12706 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
12707 continue;
12708 if (first > BASE_REGNUM || last < BASE_REGNUM)
12709 continue;
12711 if (cfun_frame_layout.first_save_gpr != -1)
12713 rtx s_pat = save_gprs (base,
12714 off + (cfun_frame_layout.first_save_gpr
12715 - first) * UNITS_PER_LONG,
12716 cfun_frame_layout.first_save_gpr,
12717 cfun_frame_layout.last_save_gpr);
12718 new_insn = emit_insn_before (s_pat, insn);
12719 INSN_ADDRESSES_NEW (new_insn, -1);
12722 remove_insn (insn);
12723 continue;
12726 if (cfun_frame_layout.first_save_gpr == -1
12727 && GET_CODE (pat) == SET
12728 && GENERAL_REG_P (SET_SRC (pat))
12729 && GET_CODE (SET_DEST (pat)) == MEM)
12731 set = pat;
12732 first = REGNO (SET_SRC (set));
12733 offset = const0_rtx;
12734 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
12735 off = INTVAL (offset);
12737 if (GET_CODE (base) != REG || off < 0)
12738 continue;
12739 if (REGNO (base) != STACK_POINTER_REGNUM
12740 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
12741 continue;
12743 remove_insn (insn);
12744 continue;
12747 if (GET_CODE (pat) == PARALLEL
12748 && load_multiple_operation (pat, VOIDmode))
12750 set = XVECEXP (pat, 0, 0);
12751 first = REGNO (SET_DEST (set));
12752 last = first + XVECLEN (pat, 0) - 1;
12753 offset = const0_rtx;
12754 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
12755 off = INTVAL (offset);
12757 if (GET_CODE (base) != REG || off < 0)
12758 continue;
12760 if (cfun_frame_layout.first_restore_gpr != -1
12761 && (cfun_frame_layout.first_restore_gpr < first
12762 || cfun_frame_layout.last_restore_gpr > last))
12763 continue;
12764 if (REGNO (base) != STACK_POINTER_REGNUM
12765 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
12766 continue;
12767 if (first > BASE_REGNUM || last < BASE_REGNUM)
12768 continue;
12770 if (cfun_frame_layout.first_restore_gpr != -1)
12772 rtx rpat = restore_gprs (base,
12773 off + (cfun_frame_layout.first_restore_gpr
12774 - first) * UNITS_PER_LONG,
12775 cfun_frame_layout.first_restore_gpr,
12776 cfun_frame_layout.last_restore_gpr);
12778 /* Remove REG_CFA_RESTOREs for registers that we no
12779 longer need to save. */
12780 REG_NOTES (rpat) = REG_NOTES (insn);
12781 for (rtx *ptr = &REG_NOTES (rpat); *ptr; )
12782 if (REG_NOTE_KIND (*ptr) == REG_CFA_RESTORE
12783 && ((int) REGNO (XEXP (*ptr, 0))
12784 < cfun_frame_layout.first_restore_gpr))
12785 *ptr = XEXP (*ptr, 1);
12786 else
12787 ptr = &XEXP (*ptr, 1);
12788 new_insn = emit_insn_before (rpat, insn);
12789 RTX_FRAME_RELATED_P (new_insn) = 1;
12790 INSN_ADDRESSES_NEW (new_insn, -1);
12793 remove_insn (insn);
12794 continue;
12797 if (cfun_frame_layout.first_restore_gpr == -1
12798 && GET_CODE (pat) == SET
12799 && GENERAL_REG_P (SET_DEST (pat))
12800 && GET_CODE (SET_SRC (pat)) == MEM)
12802 set = pat;
12803 first = REGNO (SET_DEST (set));
12804 offset = const0_rtx;
12805 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
12806 off = INTVAL (offset);
12808 if (GET_CODE (base) != REG || off < 0)
12809 continue;
12811 if (REGNO (base) != STACK_POINTER_REGNUM
12812 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
12813 continue;
12815 remove_insn (insn);
12816 continue;
12821 /* On z10 and later the dynamic branch prediction must see the
12822 backward jump within a certain windows. If not it falls back to
12823 the static prediction. This function rearranges the loop backward
12824 branch in a way which makes the static prediction always correct.
12825 The function returns true if it added an instruction. */
12826 static bool
12827 s390_fix_long_loop_prediction (rtx_insn *insn)
12829 rtx set = single_set (insn);
12830 rtx code_label, label_ref, new_label;
12831 rtx_insn *uncond_jump;
12832 rtx_insn *cur_insn;
12833 rtx tmp;
12834 int distance;
12836 /* This will exclude branch on count and branch on index patterns
12837 since these are correctly statically predicted. */
12838 if (!set
12839 || SET_DEST (set) != pc_rtx
12840 || GET_CODE (SET_SRC(set)) != IF_THEN_ELSE)
12841 return false;
12843 /* Skip conditional returns. */
12844 if (ANY_RETURN_P (XEXP (SET_SRC (set), 1))
12845 && XEXP (SET_SRC (set), 2) == pc_rtx)
12846 return false;
12848 label_ref = (GET_CODE (XEXP (SET_SRC (set), 1)) == LABEL_REF ?
12849 XEXP (SET_SRC (set), 1) : XEXP (SET_SRC (set), 2));
12851 gcc_assert (GET_CODE (label_ref) == LABEL_REF);
12853 code_label = XEXP (label_ref, 0);
12855 if (INSN_ADDRESSES (INSN_UID (code_label)) == -1
12856 || INSN_ADDRESSES (INSN_UID (insn)) == -1
12857 || (INSN_ADDRESSES (INSN_UID (insn))
12858 - INSN_ADDRESSES (INSN_UID (code_label)) < PREDICT_DISTANCE))
12859 return false;
12861 for (distance = 0, cur_insn = PREV_INSN (insn);
12862 distance < PREDICT_DISTANCE - 6;
12863 distance += get_attr_length (cur_insn), cur_insn = PREV_INSN (cur_insn))
12864 if (!cur_insn || JUMP_P (cur_insn) || LABEL_P (cur_insn))
12865 return false;
12867 new_label = gen_label_rtx ();
12868 uncond_jump = emit_jump_insn_after (
12869 gen_rtx_SET (pc_rtx,
12870 gen_rtx_LABEL_REF (VOIDmode, code_label)),
12871 insn);
12872 emit_label_after (new_label, uncond_jump);
12874 tmp = XEXP (SET_SRC (set), 1);
12875 XEXP (SET_SRC (set), 1) = XEXP (SET_SRC (set), 2);
12876 XEXP (SET_SRC (set), 2) = tmp;
12877 INSN_CODE (insn) = -1;
12879 XEXP (label_ref, 0) = new_label;
12880 JUMP_LABEL (insn) = new_label;
12881 JUMP_LABEL (uncond_jump) = code_label;
12883 return true;
12886 /* Returns 1 if INSN reads the value of REG for purposes not related
12887 to addressing of memory, and 0 otherwise. */
12888 static int
12889 s390_non_addr_reg_read_p (rtx reg, rtx_insn *insn)
12891 return reg_referenced_p (reg, PATTERN (insn))
12892 && !reg_used_in_mem_p (REGNO (reg), PATTERN (insn));
12895 /* Starting from INSN find_cond_jump looks downwards in the insn
12896 stream for a single jump insn which is the last user of the
12897 condition code set in INSN. */
12898 static rtx_insn *
12899 find_cond_jump (rtx_insn *insn)
12901 for (; insn; insn = NEXT_INSN (insn))
12903 rtx ite, cc;
12905 if (LABEL_P (insn))
12906 break;
12908 if (!JUMP_P (insn))
12910 if (reg_mentioned_p (gen_rtx_REG (CCmode, CC_REGNUM), insn))
12911 break;
12912 continue;
12915 /* This will be triggered by a return. */
12916 if (GET_CODE (PATTERN (insn)) != SET)
12917 break;
12919 gcc_assert (SET_DEST (PATTERN (insn)) == pc_rtx);
12920 ite = SET_SRC (PATTERN (insn));
12922 if (GET_CODE (ite) != IF_THEN_ELSE)
12923 break;
12925 cc = XEXP (XEXP (ite, 0), 0);
12926 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc)))
12927 break;
12929 if (find_reg_note (insn, REG_DEAD, cc))
12930 return insn;
12931 break;
12934 return NULL;
12937 /* Swap the condition in COND and the operands in OP0 and OP1 so that
12938 the semantics does not change. If NULL_RTX is passed as COND the
12939 function tries to find the conditional jump starting with INSN. */
12940 static void
12941 s390_swap_cmp (rtx cond, rtx *op0, rtx *op1, rtx_insn *insn)
12943 rtx tmp = *op0;
12945 if (cond == NULL_RTX)
12947 rtx_insn *jump = find_cond_jump (NEXT_INSN (insn));
12948 rtx set = jump ? single_set (jump) : NULL_RTX;
12950 if (set == NULL_RTX)
12951 return;
12953 cond = XEXP (SET_SRC (set), 0);
12956 *op0 = *op1;
12957 *op1 = tmp;
12958 PUT_CODE (cond, swap_condition (GET_CODE (cond)));
12961 /* On z10, instructions of the compare-and-branch family have the
12962 property to access the register occurring as second operand with
12963 its bits complemented. If such a compare is grouped with a second
12964 instruction that accesses the same register non-complemented, and
12965 if that register's value is delivered via a bypass, then the
12966 pipeline recycles, thereby causing significant performance decline.
12967 This function locates such situations and exchanges the two
12968 operands of the compare. The function return true whenever it
12969 added an insn. */
12970 static bool
12971 s390_z10_optimize_cmp (rtx_insn *insn)
12973 rtx_insn *prev_insn, *next_insn;
12974 bool insn_added_p = false;
12975 rtx cond, *op0, *op1;
12977 if (GET_CODE (PATTERN (insn)) == PARALLEL)
12979 /* Handle compare and branch and branch on count
12980 instructions. */
12981 rtx pattern = single_set (insn);
12983 if (!pattern
12984 || SET_DEST (pattern) != pc_rtx
12985 || GET_CODE (SET_SRC (pattern)) != IF_THEN_ELSE)
12986 return false;
12988 cond = XEXP (SET_SRC (pattern), 0);
12989 op0 = &XEXP (cond, 0);
12990 op1 = &XEXP (cond, 1);
12992 else if (GET_CODE (PATTERN (insn)) == SET)
12994 rtx src, dest;
12996 /* Handle normal compare instructions. */
12997 src = SET_SRC (PATTERN (insn));
12998 dest = SET_DEST (PATTERN (insn));
13000 if (!REG_P (dest)
13001 || !CC_REGNO_P (REGNO (dest))
13002 || GET_CODE (src) != COMPARE)
13003 return false;
13005 /* s390_swap_cmp will try to find the conditional
13006 jump when passing NULL_RTX as condition. */
13007 cond = NULL_RTX;
13008 op0 = &XEXP (src, 0);
13009 op1 = &XEXP (src, 1);
13011 else
13012 return false;
13014 if (!REG_P (*op0) || !REG_P (*op1))
13015 return false;
13017 if (GET_MODE_CLASS (GET_MODE (*op0)) != MODE_INT)
13018 return false;
13020 /* Swap the COMPARE arguments and its mask if there is a
13021 conflicting access in the previous insn. */
13022 prev_insn = prev_active_insn (insn);
13023 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
13024 && reg_referenced_p (*op1, PATTERN (prev_insn)))
13025 s390_swap_cmp (cond, op0, op1, insn);
13027 /* Check if there is a conflict with the next insn. If there
13028 was no conflict with the previous insn, then swap the
13029 COMPARE arguments and its mask. If we already swapped
13030 the operands, or if swapping them would cause a conflict
13031 with the previous insn, issue a NOP after the COMPARE in
13032 order to separate the two instuctions. */
13033 next_insn = next_active_insn (insn);
13034 if (next_insn != NULL_RTX && INSN_P (next_insn)
13035 && s390_non_addr_reg_read_p (*op1, next_insn))
13037 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
13038 && s390_non_addr_reg_read_p (*op0, prev_insn))
13040 if (REGNO (*op1) == 0)
13041 emit_insn_after (gen_nop1 (), insn);
13042 else
13043 emit_insn_after (gen_nop (), insn);
13044 insn_added_p = true;
13046 else
13047 s390_swap_cmp (cond, op0, op1, insn);
13049 return insn_added_p;
13052 /* Perform machine-dependent processing. */
13054 static void
13055 s390_reorg (void)
13057 bool pool_overflow = false;
13058 int hw_before, hw_after;
13060 /* Make sure all splits have been performed; splits after
13061 machine_dependent_reorg might confuse insn length counts. */
13062 split_all_insns_noflow ();
13064 /* Install the main literal pool and the associated base
13065 register load insns.
13067 In addition, there are two problematic situations we need
13068 to correct:
13070 - the literal pool might be > 4096 bytes in size, so that
13071 some of its elements cannot be directly accessed
13073 - a branch target might be > 64K away from the branch, so that
13074 it is not possible to use a PC-relative instruction.
13076 To fix those, we split the single literal pool into multiple
13077 pool chunks, reloading the pool base register at various
13078 points throughout the function to ensure it always points to
13079 the pool chunk the following code expects, and / or replace
13080 PC-relative branches by absolute branches.
13082 However, the two problems are interdependent: splitting the
13083 literal pool can move a branch further away from its target,
13084 causing the 64K limit to overflow, and on the other hand,
13085 replacing a PC-relative branch by an absolute branch means
13086 we need to put the branch target address into the literal
13087 pool, possibly causing it to overflow.
13089 So, we loop trying to fix up both problems until we manage
13090 to satisfy both conditions at the same time. Note that the
13091 loop is guaranteed to terminate as every pass of the loop
13092 strictly decreases the total number of PC-relative branches
13093 in the function. (This is not completely true as there
13094 might be branch-over-pool insns introduced by chunkify_start.
13095 Those never need to be split however.) */
13097 for (;;)
13099 struct constant_pool *pool = NULL;
13101 /* Collect the literal pool. */
13102 if (!pool_overflow)
13104 pool = s390_mainpool_start ();
13105 if (!pool)
13106 pool_overflow = true;
13109 /* If literal pool overflowed, start to chunkify it. */
13110 if (pool_overflow)
13111 pool = s390_chunkify_start ();
13113 /* Split out-of-range branches. If this has created new
13114 literal pool entries, cancel current chunk list and
13115 recompute it. zSeries machines have large branch
13116 instructions, so we never need to split a branch. */
13117 if (!TARGET_CPU_ZARCH && s390_split_branches ())
13119 if (pool_overflow)
13120 s390_chunkify_cancel (pool);
13121 else
13122 s390_mainpool_cancel (pool);
13124 continue;
13127 /* If we made it up to here, both conditions are satisfied.
13128 Finish up literal pool related changes. */
13129 if (pool_overflow)
13130 s390_chunkify_finish (pool);
13131 else
13132 s390_mainpool_finish (pool);
13134 /* We're done splitting branches. */
13135 cfun->machine->split_branches_pending_p = false;
13136 break;
13139 /* Generate out-of-pool execute target insns. */
13140 if (TARGET_CPU_ZARCH)
13142 rtx_insn *insn, *target;
13143 rtx label;
13145 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
13147 label = s390_execute_label (insn);
13148 if (!label)
13149 continue;
13151 gcc_assert (label != const0_rtx);
13153 target = emit_label (XEXP (label, 0));
13154 INSN_ADDRESSES_NEW (target, -1);
13156 target = emit_insn (s390_execute_target (insn));
13157 INSN_ADDRESSES_NEW (target, -1);
13161 /* Try to optimize prologue and epilogue further. */
13162 s390_optimize_prologue ();
13164 /* Walk over the insns and do some >=z10 specific changes. */
13165 if (s390_tune >= PROCESSOR_2097_Z10)
13167 rtx_insn *insn;
13168 bool insn_added_p = false;
13170 /* The insn lengths and addresses have to be up to date for the
13171 following manipulations. */
13172 shorten_branches (get_insns ());
13174 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
13176 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
13177 continue;
13179 if (JUMP_P (insn))
13180 insn_added_p |= s390_fix_long_loop_prediction (insn);
13182 if ((GET_CODE (PATTERN (insn)) == PARALLEL
13183 || GET_CODE (PATTERN (insn)) == SET)
13184 && s390_tune == PROCESSOR_2097_Z10)
13185 insn_added_p |= s390_z10_optimize_cmp (insn);
13188 /* Adjust branches if we added new instructions. */
13189 if (insn_added_p)
13190 shorten_branches (get_insns ());
13193 s390_function_num_hotpatch_hw (current_function_decl, &hw_before, &hw_after);
13194 if (hw_after > 0)
13196 rtx_insn *insn;
13198 /* Insert NOPs for hotpatching. */
13199 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
13200 /* Emit NOPs
13201 1. inside the area covered by debug information to allow setting
13202 breakpoints at the NOPs,
13203 2. before any insn which results in an asm instruction,
13204 3. before in-function labels to avoid jumping to the NOPs, for
13205 example as part of a loop,
13206 4. before any barrier in case the function is completely empty
13207 (__builtin_unreachable ()) and has neither internal labels nor
13208 active insns.
13210 if (active_insn_p (insn) || BARRIER_P (insn) || LABEL_P (insn))
13211 break;
13212 /* Output a series of NOPs before the first active insn. */
13213 while (insn && hw_after > 0)
13215 if (hw_after >= 3 && TARGET_CPU_ZARCH)
13217 emit_insn_before (gen_nop_6_byte (), insn);
13218 hw_after -= 3;
13220 else if (hw_after >= 2)
13222 emit_insn_before (gen_nop_4_byte (), insn);
13223 hw_after -= 2;
13225 else
13227 emit_insn_before (gen_nop_2_byte (), insn);
13228 hw_after -= 1;
13234 /* Return true if INSN is a fp load insn writing register REGNO. */
13235 static inline bool
13236 s390_fpload_toreg (rtx_insn *insn, unsigned int regno)
13238 rtx set;
13239 enum attr_type flag = s390_safe_attr_type (insn);
13241 if (flag != TYPE_FLOADSF && flag != TYPE_FLOADDF)
13242 return false;
13244 set = single_set (insn);
13246 if (set == NULL_RTX)
13247 return false;
13249 if (!REG_P (SET_DEST (set)) || !MEM_P (SET_SRC (set)))
13250 return false;
13252 if (REGNO (SET_DEST (set)) != regno)
13253 return false;
13255 return true;
13258 /* This value describes the distance to be avoided between an
13259 aritmetic fp instruction and an fp load writing the same register.
13260 Z10_EARLYLOAD_DISTANCE - 1 as well as Z10_EARLYLOAD_DISTANCE + 1 is
13261 fine but the exact value has to be avoided. Otherwise the FP
13262 pipeline will throw an exception causing a major penalty. */
13263 #define Z10_EARLYLOAD_DISTANCE 7
13265 /* Rearrange the ready list in order to avoid the situation described
13266 for Z10_EARLYLOAD_DISTANCE. A problematic load instruction is
13267 moved to the very end of the ready list. */
13268 static void
13269 s390_z10_prevent_earlyload_conflicts (rtx_insn **ready, int *nready_p)
13271 unsigned int regno;
13272 int nready = *nready_p;
13273 rtx_insn *tmp;
13274 int i;
13275 rtx_insn *insn;
13276 rtx set;
13277 enum attr_type flag;
13278 int distance;
13280 /* Skip DISTANCE - 1 active insns. */
13281 for (insn = last_scheduled_insn, distance = Z10_EARLYLOAD_DISTANCE - 1;
13282 distance > 0 && insn != NULL_RTX;
13283 distance--, insn = prev_active_insn (insn))
13284 if (CALL_P (insn) || JUMP_P (insn))
13285 return;
13287 if (insn == NULL_RTX)
13288 return;
13290 set = single_set (insn);
13292 if (set == NULL_RTX || !REG_P (SET_DEST (set))
13293 || GET_MODE_CLASS (GET_MODE (SET_DEST (set))) != MODE_FLOAT)
13294 return;
13296 flag = s390_safe_attr_type (insn);
13298 if (flag == TYPE_FLOADSF || flag == TYPE_FLOADDF)
13299 return;
13301 regno = REGNO (SET_DEST (set));
13302 i = nready - 1;
13304 while (!s390_fpload_toreg (ready[i], regno) && i > 0)
13305 i--;
13307 if (!i)
13308 return;
13310 tmp = ready[i];
13311 memmove (&ready[1], &ready[0], sizeof (rtx_insn *) * i);
13312 ready[0] = tmp;
13316 /* The s390_sched_state variable tracks the state of the current or
13317 the last instruction group.
13319 0,1,2 number of instructions scheduled in the current group
13320 3 the last group is complete - normal insns
13321 4 the last group was a cracked/expanded insn */
13323 static int s390_sched_state;
13325 #define S390_OOO_SCHED_STATE_NORMAL 3
13326 #define S390_OOO_SCHED_STATE_CRACKED 4
13328 #define S390_OOO_SCHED_ATTR_MASK_CRACKED 0x1
13329 #define S390_OOO_SCHED_ATTR_MASK_EXPANDED 0x2
13330 #define S390_OOO_SCHED_ATTR_MASK_ENDGROUP 0x4
13331 #define S390_OOO_SCHED_ATTR_MASK_GROUPALONE 0x8
13333 static unsigned int
13334 s390_get_sched_attrmask (rtx_insn *insn)
13336 unsigned int mask = 0;
13338 if (get_attr_ooo_cracked (insn))
13339 mask |= S390_OOO_SCHED_ATTR_MASK_CRACKED;
13340 if (get_attr_ooo_expanded (insn))
13341 mask |= S390_OOO_SCHED_ATTR_MASK_EXPANDED;
13342 if (get_attr_ooo_endgroup (insn))
13343 mask |= S390_OOO_SCHED_ATTR_MASK_ENDGROUP;
13344 if (get_attr_ooo_groupalone (insn))
13345 mask |= S390_OOO_SCHED_ATTR_MASK_GROUPALONE;
13346 return mask;
13349 /* Return the scheduling score for INSN. The higher the score the
13350 better. The score is calculated from the OOO scheduling attributes
13351 of INSN and the scheduling state s390_sched_state. */
13352 static int
13353 s390_sched_score (rtx_insn *insn)
13355 unsigned int mask = s390_get_sched_attrmask (insn);
13356 int score = 0;
13358 switch (s390_sched_state)
13360 case 0:
13361 /* Try to put insns into the first slot which would otherwise
13362 break a group. */
13363 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) != 0
13364 || (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) != 0)
13365 score += 5;
13366 if ((mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) != 0)
13367 score += 10;
13368 case 1:
13369 /* Prefer not cracked insns while trying to put together a
13370 group. */
13371 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) == 0
13372 && (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) == 0
13373 && (mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) == 0)
13374 score += 10;
13375 if ((mask & S390_OOO_SCHED_ATTR_MASK_ENDGROUP) == 0)
13376 score += 5;
13377 break;
13378 case 2:
13379 /* Prefer not cracked insns while trying to put together a
13380 group. */
13381 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) == 0
13382 && (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) == 0
13383 && (mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) == 0)
13384 score += 10;
13385 /* Prefer endgroup insns in the last slot. */
13386 if ((mask & S390_OOO_SCHED_ATTR_MASK_ENDGROUP) != 0)
13387 score += 10;
13388 break;
13389 case S390_OOO_SCHED_STATE_NORMAL:
13390 /* Prefer not cracked insns if the last was not cracked. */
13391 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) == 0
13392 && (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) == 0)
13393 score += 5;
13394 if ((mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) != 0)
13395 score += 10;
13396 break;
13397 case S390_OOO_SCHED_STATE_CRACKED:
13398 /* Try to keep cracked insns together to prevent them from
13399 interrupting groups. */
13400 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) != 0
13401 || (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) != 0)
13402 score += 5;
13403 break;
13405 return score;
13408 /* This function is called via hook TARGET_SCHED_REORDER before
13409 issuing one insn from list READY which contains *NREADYP entries.
13410 For target z10 it reorders load instructions to avoid early load
13411 conflicts in the floating point pipeline */
13412 static int
13413 s390_sched_reorder (FILE *file, int verbose,
13414 rtx_insn **ready, int *nreadyp, int clock ATTRIBUTE_UNUSED)
13416 if (s390_tune == PROCESSOR_2097_Z10
13417 && reload_completed
13418 && *nreadyp > 1)
13419 s390_z10_prevent_earlyload_conflicts (ready, nreadyp);
13421 if (s390_tune >= PROCESSOR_2827_ZEC12
13422 && reload_completed
13423 && *nreadyp > 1)
13425 int i;
13426 int last_index = *nreadyp - 1;
13427 int max_index = -1;
13428 int max_score = -1;
13429 rtx_insn *tmp;
13431 /* Just move the insn with the highest score to the top (the
13432 end) of the list. A full sort is not needed since a conflict
13433 in the hazard recognition cannot happen. So the top insn in
13434 the ready list will always be taken. */
13435 for (i = last_index; i >= 0; i--)
13437 int score;
13439 if (recog_memoized (ready[i]) < 0)
13440 continue;
13442 score = s390_sched_score (ready[i]);
13443 if (score > max_score)
13445 max_score = score;
13446 max_index = i;
13450 if (max_index != -1)
13452 if (max_index != last_index)
13454 tmp = ready[max_index];
13455 ready[max_index] = ready[last_index];
13456 ready[last_index] = tmp;
13458 if (verbose > 5)
13459 fprintf (file,
13460 "move insn %d to the top of list\n",
13461 INSN_UID (ready[last_index]));
13463 else if (verbose > 5)
13464 fprintf (file,
13465 "best insn %d already on top\n",
13466 INSN_UID (ready[last_index]));
13469 if (verbose > 5)
13471 fprintf (file, "ready list ooo attributes - sched state: %d\n",
13472 s390_sched_state);
13474 for (i = last_index; i >= 0; i--)
13476 if (recog_memoized (ready[i]) < 0)
13477 continue;
13478 fprintf (file, "insn %d score: %d: ", INSN_UID (ready[i]),
13479 s390_sched_score (ready[i]));
13480 #define PRINT_OOO_ATTR(ATTR) fprintf (file, "%s ", get_attr_##ATTR (ready[i]) ? #ATTR : "!" #ATTR);
13481 PRINT_OOO_ATTR (ooo_cracked);
13482 PRINT_OOO_ATTR (ooo_expanded);
13483 PRINT_OOO_ATTR (ooo_endgroup);
13484 PRINT_OOO_ATTR (ooo_groupalone);
13485 #undef PRINT_OOO_ATTR
13486 fprintf (file, "\n");
13491 return s390_issue_rate ();
13495 /* This function is called via hook TARGET_SCHED_VARIABLE_ISSUE after
13496 the scheduler has issued INSN. It stores the last issued insn into
13497 last_scheduled_insn in order to make it available for
13498 s390_sched_reorder. */
13499 static int
13500 s390_sched_variable_issue (FILE *file, int verbose, rtx_insn *insn, int more)
13502 last_scheduled_insn = insn;
13504 if (s390_tune >= PROCESSOR_2827_ZEC12
13505 && reload_completed
13506 && recog_memoized (insn) >= 0)
13508 unsigned int mask = s390_get_sched_attrmask (insn);
13510 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) != 0
13511 || (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) != 0)
13512 s390_sched_state = S390_OOO_SCHED_STATE_CRACKED;
13513 else if ((mask & S390_OOO_SCHED_ATTR_MASK_ENDGROUP) != 0
13514 || (mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) != 0)
13515 s390_sched_state = S390_OOO_SCHED_STATE_NORMAL;
13516 else
13518 /* Only normal insns are left (mask == 0). */
13519 switch (s390_sched_state)
13521 case 0:
13522 case 1:
13523 case 2:
13524 case S390_OOO_SCHED_STATE_NORMAL:
13525 if (s390_sched_state == S390_OOO_SCHED_STATE_NORMAL)
13526 s390_sched_state = 1;
13527 else
13528 s390_sched_state++;
13530 break;
13531 case S390_OOO_SCHED_STATE_CRACKED:
13532 s390_sched_state = S390_OOO_SCHED_STATE_NORMAL;
13533 break;
13536 if (verbose > 5)
13538 fprintf (file, "insn %d: ", INSN_UID (insn));
13539 #define PRINT_OOO_ATTR(ATTR) \
13540 fprintf (file, "%s ", get_attr_##ATTR (insn) ? #ATTR : "");
13541 PRINT_OOO_ATTR (ooo_cracked);
13542 PRINT_OOO_ATTR (ooo_expanded);
13543 PRINT_OOO_ATTR (ooo_endgroup);
13544 PRINT_OOO_ATTR (ooo_groupalone);
13545 #undef PRINT_OOO_ATTR
13546 fprintf (file, "\n");
13547 fprintf (file, "sched state: %d\n", s390_sched_state);
13551 if (GET_CODE (PATTERN (insn)) != USE
13552 && GET_CODE (PATTERN (insn)) != CLOBBER)
13553 return more - 1;
13554 else
13555 return more;
13558 static void
13559 s390_sched_init (FILE *file ATTRIBUTE_UNUSED,
13560 int verbose ATTRIBUTE_UNUSED,
13561 int max_ready ATTRIBUTE_UNUSED)
13563 last_scheduled_insn = NULL;
13564 s390_sched_state = 0;
13567 /* This target hook implementation for TARGET_LOOP_UNROLL_ADJUST calculates
13568 a new number struct loop *loop should be unrolled if tuned for cpus with
13569 a built-in stride prefetcher.
13570 The loop is analyzed for memory accesses by calling check_dpu for
13571 each rtx of the loop. Depending on the loop_depth and the amount of
13572 memory accesses a new number <=nunroll is returned to improve the
13573 behaviour of the hardware prefetch unit. */
13574 static unsigned
13575 s390_loop_unroll_adjust (unsigned nunroll, struct loop *loop)
13577 basic_block *bbs;
13578 rtx_insn *insn;
13579 unsigned i;
13580 unsigned mem_count = 0;
13582 if (s390_tune < PROCESSOR_2097_Z10)
13583 return nunroll;
13585 /* Count the number of memory references within the loop body. */
13586 bbs = get_loop_body (loop);
13587 subrtx_iterator::array_type array;
13588 for (i = 0; i < loop->num_nodes; i++)
13589 FOR_BB_INSNS (bbs[i], insn)
13590 if (INSN_P (insn) && INSN_CODE (insn) != -1)
13591 FOR_EACH_SUBRTX (iter, array, PATTERN (insn), NONCONST)
13592 if (MEM_P (*iter))
13593 mem_count += 1;
13594 free (bbs);
13596 /* Prevent division by zero, and we do not need to adjust nunroll in this case. */
13597 if (mem_count == 0)
13598 return nunroll;
13600 switch (loop_depth(loop))
13602 case 1:
13603 return MIN (nunroll, 28 / mem_count);
13604 case 2:
13605 return MIN (nunroll, 22 / mem_count);
13606 default:
13607 return MIN (nunroll, 16 / mem_count);
13611 /* Restore the current options. This is a hook function and also called
13612 internally. */
13614 static void
13615 s390_function_specific_restore (struct gcc_options *opts,
13616 struct cl_target_option *ptr ATTRIBUTE_UNUSED)
13618 opts->x_s390_cost_pointer = (long)processor_table[opts->x_s390_tune].cost;
13621 static void
13622 s390_option_override_internal (bool main_args_p,
13623 struct gcc_options *opts,
13624 const struct gcc_options *opts_set)
13626 const char *prefix;
13627 const char *suffix;
13629 /* Set up prefix/suffix so the error messages refer to either the command
13630 line argument, or the attribute(target). */
13631 if (main_args_p)
13633 prefix = "-m";
13634 suffix = "";
13636 else
13638 prefix = "option(\"";
13639 suffix = "\")";
13643 /* Architecture mode defaults according to ABI. */
13644 if (!(opts_set->x_target_flags & MASK_ZARCH))
13646 if (TARGET_64BIT)
13647 opts->x_target_flags |= MASK_ZARCH;
13648 else
13649 opts->x_target_flags &= ~MASK_ZARCH;
13652 /* Set the march default in case it hasn't been specified on cmdline. */
13653 if (!opts_set->x_s390_arch)
13654 opts->x_s390_arch = PROCESSOR_2064_Z900;
13655 else if (opts->x_s390_arch == PROCESSOR_9672_G5
13656 || opts->x_s390_arch == PROCESSOR_9672_G6)
13657 warning (OPT_Wdeprecated, "%sarch=%s%s is deprecated and will be removed "
13658 "in future releases; use at least %sarch=z900%s",
13659 prefix, opts->x_s390_arch == PROCESSOR_9672_G5 ? "g5" : "g6",
13660 suffix, prefix, suffix);
13662 opts->x_s390_arch_flags = processor_flags_table[(int) opts->x_s390_arch];
13664 /* Determine processor to tune for. */
13665 if (!opts_set->x_s390_tune)
13666 opts->x_s390_tune = opts->x_s390_arch;
13667 else if (opts->x_s390_tune == PROCESSOR_9672_G5
13668 || opts->x_s390_tune == PROCESSOR_9672_G6)
13669 warning (OPT_Wdeprecated, "%stune=%s%s is deprecated and will be removed "
13670 "in future releases; use at least %stune=z900%s",
13671 prefix, opts->x_s390_tune == PROCESSOR_9672_G5 ? "g5" : "g6",
13672 suffix, prefix, suffix);
13674 opts->x_s390_tune_flags = processor_flags_table[opts->x_s390_tune];
13676 /* Sanity checks. */
13677 if (opts->x_s390_arch == PROCESSOR_NATIVE
13678 || opts->x_s390_tune == PROCESSOR_NATIVE)
13679 gcc_unreachable ();
13680 if (TARGET_ZARCH_P (opts->x_target_flags) && !TARGET_CPU_ZARCH_P (opts))
13681 error ("z/Architecture mode not supported on %s",
13682 processor_table[(int)opts->x_s390_arch].name);
13683 if (TARGET_64BIT && !TARGET_ZARCH_P (opts->x_target_flags))
13684 error ("64-bit ABI not supported in ESA/390 mode");
13686 /* Enable hardware transactions if available and not explicitly
13687 disabled by user. E.g. with -m31 -march=zEC12 -mzarch */
13688 if (!TARGET_OPT_HTM_P (opts_set->x_target_flags))
13690 if (TARGET_CPU_HTM_P (opts) && TARGET_ZARCH_P (opts->x_target_flags))
13691 opts->x_target_flags |= MASK_OPT_HTM;
13692 else
13693 opts->x_target_flags &= ~MASK_OPT_HTM;
13696 if (TARGET_OPT_VX_P (opts_set->x_target_flags))
13698 if (TARGET_OPT_VX_P (opts->x_target_flags))
13700 if (!TARGET_CPU_VX_P (opts))
13701 error ("hardware vector support not available on %s",
13702 processor_table[(int)opts->x_s390_arch].name);
13703 if (TARGET_SOFT_FLOAT_P (opts->x_target_flags))
13704 error ("hardware vector support not available with -msoft-float");
13707 else
13709 if (TARGET_CPU_VX_P (opts))
13710 /* Enable vector support if available and not explicitly disabled
13711 by user. E.g. with -m31 -march=z13 -mzarch */
13712 opts->x_target_flags |= MASK_OPT_VX;
13713 else
13714 opts->x_target_flags &= ~MASK_OPT_VX;
13717 /* Use hardware DFP if available and not explicitly disabled by
13718 user. E.g. with -m31 -march=z10 -mzarch */
13719 if (!TARGET_HARD_DFP_P (opts_set->x_target_flags))
13721 if (TARGET_DFP_P (opts))
13722 opts->x_target_flags |= MASK_HARD_DFP;
13723 else
13724 opts->x_target_flags &= ~MASK_HARD_DFP;
13727 if (TARGET_HARD_DFP_P (opts->x_target_flags) && !TARGET_DFP_P (opts))
13729 if (TARGET_HARD_DFP_P (opts_set->x_target_flags))
13731 if (!TARGET_CPU_DFP_P (opts))
13732 error ("hardware decimal floating point instructions"
13733 " not available on %s",
13734 processor_table[(int)opts->x_s390_arch].name);
13735 if (!TARGET_ZARCH_P (opts->x_target_flags))
13736 error ("hardware decimal floating point instructions"
13737 " not available in ESA/390 mode");
13739 else
13740 opts->x_target_flags &= ~MASK_HARD_DFP;
13743 if (TARGET_SOFT_FLOAT_P (opts_set->x_target_flags)
13744 && TARGET_SOFT_FLOAT_P (opts->x_target_flags))
13746 if (TARGET_HARD_DFP_P (opts_set->x_target_flags)
13747 && TARGET_HARD_DFP_P (opts->x_target_flags))
13748 error ("-mhard-dfp can%'t be used in conjunction with -msoft-float");
13750 opts->x_target_flags &= ~MASK_HARD_DFP;
13753 if (TARGET_BACKCHAIN_P (opts->x_target_flags)
13754 && TARGET_PACKED_STACK_P (opts->x_target_flags)
13755 && TARGET_HARD_FLOAT_P (opts->x_target_flags))
13756 error ("-mbackchain -mpacked-stack -mhard-float are not supported "
13757 "in combination");
13759 if (opts->x_s390_stack_size)
13761 if (opts->x_s390_stack_guard >= opts->x_s390_stack_size)
13762 error ("stack size must be greater than the stack guard value");
13763 else if (opts->x_s390_stack_size > 1 << 16)
13764 error ("stack size must not be greater than 64k");
13766 else if (opts->x_s390_stack_guard)
13767 error ("-mstack-guard implies use of -mstack-size");
13769 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
13770 if (!TARGET_LONG_DOUBLE_128_P (opts_set->x_target_flags))
13771 opts->x_target_flags |= MASK_LONG_DOUBLE_128;
13772 #endif
13774 if (opts->x_s390_tune >= PROCESSOR_2097_Z10)
13776 maybe_set_param_value (PARAM_MAX_UNROLLED_INSNS, 100,
13777 opts->x_param_values,
13778 opts_set->x_param_values);
13779 maybe_set_param_value (PARAM_MAX_UNROLL_TIMES, 32,
13780 opts->x_param_values,
13781 opts_set->x_param_values);
13782 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 2000,
13783 opts->x_param_values,
13784 opts_set->x_param_values);
13785 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEEL_TIMES, 64,
13786 opts->x_param_values,
13787 opts_set->x_param_values);
13790 maybe_set_param_value (PARAM_MAX_PENDING_LIST_LENGTH, 256,
13791 opts->x_param_values,
13792 opts_set->x_param_values);
13793 /* values for loop prefetching */
13794 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, 256,
13795 opts->x_param_values,
13796 opts_set->x_param_values);
13797 maybe_set_param_value (PARAM_L1_CACHE_SIZE, 128,
13798 opts->x_param_values,
13799 opts_set->x_param_values);
13800 /* s390 has more than 2 levels and the size is much larger. Since
13801 we are always running virtualized assume that we only get a small
13802 part of the caches above l1. */
13803 maybe_set_param_value (PARAM_L2_CACHE_SIZE, 1500,
13804 opts->x_param_values,
13805 opts_set->x_param_values);
13806 maybe_set_param_value (PARAM_PREFETCH_MIN_INSN_TO_MEM_RATIO, 2,
13807 opts->x_param_values,
13808 opts_set->x_param_values);
13809 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 6,
13810 opts->x_param_values,
13811 opts_set->x_param_values);
13813 /* Use the alternative scheduling-pressure algorithm by default. */
13814 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM, 2,
13815 opts->x_param_values,
13816 opts_set->x_param_values);
13818 /* Call target specific restore function to do post-init work. At the moment,
13819 this just sets opts->x_s390_cost_pointer. */
13820 s390_function_specific_restore (opts, NULL);
13823 static void
13824 s390_option_override (void)
13826 unsigned int i;
13827 cl_deferred_option *opt;
13828 vec<cl_deferred_option> *v =
13829 (vec<cl_deferred_option> *) s390_deferred_options;
13831 if (v)
13832 FOR_EACH_VEC_ELT (*v, i, opt)
13834 switch (opt->opt_index)
13836 case OPT_mhotpatch_:
13838 int val1;
13839 int val2;
13840 char s[256];
13841 char *t;
13843 strncpy (s, opt->arg, 256);
13844 s[255] = 0;
13845 t = strchr (s, ',');
13846 if (t != NULL)
13848 *t = 0;
13849 t++;
13850 val1 = integral_argument (s);
13851 val2 = integral_argument (t);
13853 else
13855 val1 = -1;
13856 val2 = -1;
13858 if (val1 == -1 || val2 == -1)
13860 /* argument is not a plain number */
13861 error ("arguments to %qs should be non-negative integers",
13862 "-mhotpatch=n,m");
13863 break;
13865 else if (val1 > s390_hotpatch_hw_max
13866 || val2 > s390_hotpatch_hw_max)
13868 error ("argument to %qs is too large (max. %d)",
13869 "-mhotpatch=n,m", s390_hotpatch_hw_max);
13870 break;
13872 s390_hotpatch_hw_before_label = val1;
13873 s390_hotpatch_hw_after_label = val2;
13874 break;
13876 default:
13877 gcc_unreachable ();
13881 /* Set up function hooks. */
13882 init_machine_status = s390_init_machine_status;
13884 s390_option_override_internal (true, &global_options, &global_options_set);
13886 /* Save the initial options in case the user does function specific
13887 options. */
13888 target_option_default_node = build_target_option_node (&global_options);
13889 target_option_current_node = target_option_default_node;
13891 /* This cannot reside in s390_option_optimization_table since HAVE_prefetch
13892 requires the arch flags to be evaluated already. Since prefetching
13893 is beneficial on s390, we enable it if available. */
13894 if (flag_prefetch_loop_arrays < 0 && HAVE_prefetch && optimize >= 3)
13895 flag_prefetch_loop_arrays = 1;
13897 if (TARGET_TPF)
13899 /* Don't emit DWARF3/4 unless specifically selected. The TPF
13900 debuggers do not yet support DWARF 3/4. */
13901 if (!global_options_set.x_dwarf_strict)
13902 dwarf_strict = 1;
13903 if (!global_options_set.x_dwarf_version)
13904 dwarf_version = 2;
13907 /* Register a target-specific optimization-and-lowering pass
13908 to run immediately before prologue and epilogue generation.
13910 Registering the pass must be done at start up. It's
13911 convenient to do it here. */
13912 opt_pass *new_pass = new pass_s390_early_mach (g);
13913 struct register_pass_info insert_pass_s390_early_mach =
13915 new_pass, /* pass */
13916 "pro_and_epilogue", /* reference_pass_name */
13917 1, /* ref_pass_instance_number */
13918 PASS_POS_INSERT_BEFORE /* po_op */
13920 register_pass (&insert_pass_s390_early_mach);
13923 #if S390_USE_TARGET_ATTRIBUTE
13924 /* Inner function to process the attribute((target(...))), take an argument and
13925 set the current options from the argument. If we have a list, recursively go
13926 over the list. */
13928 static bool
13929 s390_valid_target_attribute_inner_p (tree args,
13930 struct gcc_options *opts,
13931 struct gcc_options *new_opts_set,
13932 bool force_pragma)
13934 char *next_optstr;
13935 bool ret = true;
13937 #define S390_ATTRIB(S,O,A) { S, sizeof (S)-1, O, A, 0 }
13938 #define S390_PRAGMA(S,O,A) { S, sizeof (S)-1, O, A, 1 }
13939 static const struct
13941 const char *string;
13942 size_t len;
13943 int opt;
13944 int has_arg;
13945 int only_as_pragma;
13946 } attrs[] = {
13947 /* enum options */
13948 S390_ATTRIB ("arch=", OPT_march_, 1),
13949 S390_ATTRIB ("tune=", OPT_mtune_, 1),
13950 /* uinteger options */
13951 S390_ATTRIB ("stack-guard=", OPT_mstack_guard_, 1),
13952 S390_ATTRIB ("stack-size=", OPT_mstack_size_, 1),
13953 S390_ATTRIB ("branch-cost=", OPT_mbranch_cost_, 1),
13954 S390_ATTRIB ("warn-framesize=", OPT_mwarn_framesize_, 1),
13955 /* flag options */
13956 S390_ATTRIB ("backchain", OPT_mbackchain, 0),
13957 S390_ATTRIB ("hard-dfp", OPT_mhard_dfp, 0),
13958 S390_ATTRIB ("hard-float", OPT_mhard_float, 0),
13959 S390_ATTRIB ("htm", OPT_mhtm, 0),
13960 S390_ATTRIB ("vx", OPT_mvx, 0),
13961 S390_ATTRIB ("packed-stack", OPT_mpacked_stack, 0),
13962 S390_ATTRIB ("small-exec", OPT_msmall_exec, 0),
13963 S390_ATTRIB ("soft-float", OPT_msoft_float, 0),
13964 S390_ATTRIB ("mvcle", OPT_mmvcle, 0),
13965 S390_PRAGMA ("zvector", OPT_mzvector, 0),
13966 /* boolean options */
13967 S390_ATTRIB ("warn-dynamicstack", OPT_mwarn_dynamicstack, 0),
13969 #undef S390_ATTRIB
13970 #undef S390_PRAGMA
13972 /* If this is a list, recurse to get the options. */
13973 if (TREE_CODE (args) == TREE_LIST)
13975 bool ret = true;
13976 int num_pragma_values;
13977 int i;
13979 /* Note: attribs.c:decl_attributes prepends the values from
13980 current_target_pragma to the list of target attributes. To determine
13981 whether we're looking at a value of the attribute or the pragma we
13982 assume that the first [list_length (current_target_pragma)] values in
13983 the list are the values from the pragma. */
13984 num_pragma_values = (!force_pragma && current_target_pragma != NULL)
13985 ? list_length (current_target_pragma) : 0;
13986 for (i = 0; args; args = TREE_CHAIN (args), i++)
13988 bool is_pragma;
13990 is_pragma = (force_pragma || i < num_pragma_values);
13991 if (TREE_VALUE (args)
13992 && !s390_valid_target_attribute_inner_p (TREE_VALUE (args),
13993 opts, new_opts_set,
13994 is_pragma))
13996 ret = false;
13999 return ret;
14002 else if (TREE_CODE (args) != STRING_CST)
14004 error ("attribute %<target%> argument not a string");
14005 return false;
14008 /* Handle multiple arguments separated by commas. */
14009 next_optstr = ASTRDUP (TREE_STRING_POINTER (args));
14011 while (next_optstr && *next_optstr != '\0')
14013 char *p = next_optstr;
14014 char *orig_p = p;
14015 char *comma = strchr (next_optstr, ',');
14016 size_t len, opt_len;
14017 int opt;
14018 bool opt_set_p;
14019 char ch;
14020 unsigned i;
14021 int mask = 0;
14022 enum cl_var_type var_type;
14023 bool found;
14025 if (comma)
14027 *comma = '\0';
14028 len = comma - next_optstr;
14029 next_optstr = comma + 1;
14031 else
14033 len = strlen (p);
14034 next_optstr = NULL;
14037 /* Recognize no-xxx. */
14038 if (len > 3 && p[0] == 'n' && p[1] == 'o' && p[2] == '-')
14040 opt_set_p = false;
14041 p += 3;
14042 len -= 3;
14044 else
14045 opt_set_p = true;
14047 /* Find the option. */
14048 ch = *p;
14049 found = false;
14050 for (i = 0; i < ARRAY_SIZE (attrs); i++)
14052 opt_len = attrs[i].len;
14053 if (ch == attrs[i].string[0]
14054 && ((attrs[i].has_arg) ? len > opt_len : len == opt_len)
14055 && memcmp (p, attrs[i].string, opt_len) == 0)
14057 opt = attrs[i].opt;
14058 if (!opt_set_p && cl_options[opt].cl_reject_negative)
14059 continue;
14060 mask = cl_options[opt].var_value;
14061 var_type = cl_options[opt].var_type;
14062 found = true;
14063 break;
14067 /* Process the option. */
14068 if (!found)
14070 error ("attribute(target(\"%s\")) is unknown", orig_p);
14071 return false;
14073 else if (attrs[i].only_as_pragma && !force_pragma)
14075 /* Value is not allowed for the target attribute. */
14076 error ("Value %qs is not supported by attribute %<target%>",
14077 attrs[i].string);
14078 return false;
14081 else if (var_type == CLVC_BIT_SET || var_type == CLVC_BIT_CLEAR)
14083 if (var_type == CLVC_BIT_CLEAR)
14084 opt_set_p = !opt_set_p;
14086 if (opt_set_p)
14087 opts->x_target_flags |= mask;
14088 else
14089 opts->x_target_flags &= ~mask;
14090 new_opts_set->x_target_flags |= mask;
14093 else if (cl_options[opt].var_type == CLVC_BOOLEAN)
14095 int value;
14097 if (cl_options[opt].cl_uinteger)
14099 /* Unsigned integer argument. Code based on the function
14100 decode_cmdline_option () in opts-common.c. */
14101 value = integral_argument (p + opt_len);
14103 else
14104 value = (opt_set_p) ? 1 : 0;
14106 if (value != -1)
14108 struct cl_decoded_option decoded;
14110 /* Value range check; only implemented for numeric and boolean
14111 options at the moment. */
14112 generate_option (opt, NULL, value, CL_TARGET, &decoded);
14113 s390_handle_option (opts, new_opts_set, &decoded, input_location);
14114 set_option (opts, new_opts_set, opt, value,
14115 p + opt_len, DK_UNSPECIFIED, input_location,
14116 global_dc);
14118 else
14120 error ("attribute(target(\"%s\")) is unknown", orig_p);
14121 ret = false;
14125 else if (cl_options[opt].var_type == CLVC_ENUM)
14127 bool arg_ok;
14128 int value;
14130 arg_ok = opt_enum_arg_to_value (opt, p + opt_len, &value, CL_TARGET);
14131 if (arg_ok)
14132 set_option (opts, new_opts_set, opt, value,
14133 p + opt_len, DK_UNSPECIFIED, input_location,
14134 global_dc);
14135 else
14137 error ("attribute(target(\"%s\")) is unknown", orig_p);
14138 ret = false;
14142 else
14143 gcc_unreachable ();
14145 return ret;
14148 /* Return a TARGET_OPTION_NODE tree of the target options listed or NULL. */
14150 tree
14151 s390_valid_target_attribute_tree (tree args,
14152 struct gcc_options *opts,
14153 const struct gcc_options *opts_set,
14154 bool force_pragma)
14156 tree t = NULL_TREE;
14157 struct gcc_options new_opts_set;
14159 memset (&new_opts_set, 0, sizeof (new_opts_set));
14161 /* Process each of the options on the chain. */
14162 if (! s390_valid_target_attribute_inner_p (args, opts, &new_opts_set,
14163 force_pragma))
14164 return error_mark_node;
14166 /* If some option was set (even if it has not changed), rerun
14167 s390_option_override_internal, and then save the options away. */
14168 if (new_opts_set.x_target_flags
14169 || new_opts_set.x_s390_arch
14170 || new_opts_set.x_s390_tune
14171 || new_opts_set.x_s390_stack_guard
14172 || new_opts_set.x_s390_stack_size
14173 || new_opts_set.x_s390_branch_cost
14174 || new_opts_set.x_s390_warn_framesize
14175 || new_opts_set.x_s390_warn_dynamicstack_p)
14177 const unsigned char *src = (const unsigned char *)opts_set;
14178 unsigned char *dest = (unsigned char *)&new_opts_set;
14179 unsigned int i;
14181 /* Merge the original option flags into the new ones. */
14182 for (i = 0; i < sizeof(*opts_set); i++)
14183 dest[i] |= src[i];
14185 /* Do any overrides, such as arch=xxx, or tune=xxx support. */
14186 s390_option_override_internal (false, opts, &new_opts_set);
14187 /* Save the current options unless we are validating options for
14188 #pragma. */
14189 t = build_target_option_node (opts);
14191 return t;
14194 /* Hook to validate attribute((target("string"))). */
14196 static bool
14197 s390_valid_target_attribute_p (tree fndecl,
14198 tree ARG_UNUSED (name),
14199 tree args,
14200 int ARG_UNUSED (flags))
14202 struct gcc_options func_options;
14203 tree new_target, new_optimize;
14204 bool ret = true;
14206 /* attribute((target("default"))) does nothing, beyond
14207 affecting multi-versioning. */
14208 if (TREE_VALUE (args)
14209 && TREE_CODE (TREE_VALUE (args)) == STRING_CST
14210 && TREE_CHAIN (args) == NULL_TREE
14211 && strcmp (TREE_STRING_POINTER (TREE_VALUE (args)), "default") == 0)
14212 return true;
14214 tree old_optimize = build_optimization_node (&global_options);
14216 /* Get the optimization options of the current function. */
14217 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
14219 if (!func_optimize)
14220 func_optimize = old_optimize;
14222 /* Init func_options. */
14223 memset (&func_options, 0, sizeof (func_options));
14224 init_options_struct (&func_options, NULL);
14225 lang_hooks.init_options_struct (&func_options);
14227 cl_optimization_restore (&func_options, TREE_OPTIMIZATION (func_optimize));
14229 /* Initialize func_options to the default before its target options can
14230 be set. */
14231 cl_target_option_restore (&func_options,
14232 TREE_TARGET_OPTION (target_option_default_node));
14234 new_target = s390_valid_target_attribute_tree (args, &func_options,
14235 &global_options_set,
14236 (args ==
14237 current_target_pragma));
14238 new_optimize = build_optimization_node (&func_options);
14239 if (new_target == error_mark_node)
14240 ret = false;
14241 else if (fndecl && new_target)
14243 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
14244 if (old_optimize != new_optimize)
14245 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
14247 return ret;
14250 /* Restore targets globals from NEW_TREE and invalidate s390_previous_fndecl
14251 cache. */
14253 void
14254 s390_activate_target_options (tree new_tree)
14256 cl_target_option_restore (&global_options, TREE_TARGET_OPTION (new_tree));
14257 if (TREE_TARGET_GLOBALS (new_tree))
14258 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
14259 else if (new_tree == target_option_default_node)
14260 restore_target_globals (&default_target_globals);
14261 else
14262 TREE_TARGET_GLOBALS (new_tree) = save_target_globals_default_opts ();
14263 s390_previous_fndecl = NULL_TREE;
14266 /* Establish appropriate back-end context for processing the function
14267 FNDECL. The argument might be NULL to indicate processing at top
14268 level, outside of any function scope. */
14269 static void
14270 s390_set_current_function (tree fndecl)
14272 /* Only change the context if the function changes. This hook is called
14273 several times in the course of compiling a function, and we don't want to
14274 slow things down too much or call target_reinit when it isn't safe. */
14275 if (fndecl == s390_previous_fndecl)
14276 return;
14278 tree old_tree;
14279 if (s390_previous_fndecl == NULL_TREE)
14280 old_tree = target_option_current_node;
14281 else if (DECL_FUNCTION_SPECIFIC_TARGET (s390_previous_fndecl))
14282 old_tree = DECL_FUNCTION_SPECIFIC_TARGET (s390_previous_fndecl);
14283 else
14284 old_tree = target_option_default_node;
14286 if (fndecl == NULL_TREE)
14288 if (old_tree != target_option_current_node)
14289 s390_activate_target_options (target_option_current_node);
14290 return;
14293 tree new_tree = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
14294 if (new_tree == NULL_TREE)
14295 new_tree = target_option_default_node;
14297 if (old_tree != new_tree)
14298 s390_activate_target_options (new_tree);
14299 s390_previous_fndecl = fndecl;
14301 #endif
14303 /* Implement TARGET_USE_BY_PIECES_INFRASTRUCTURE_P. */
14305 static bool
14306 s390_use_by_pieces_infrastructure_p (unsigned HOST_WIDE_INT size,
14307 unsigned int align ATTRIBUTE_UNUSED,
14308 enum by_pieces_operation op ATTRIBUTE_UNUSED,
14309 bool speed_p ATTRIBUTE_UNUSED)
14311 return (size == 1 || size == 2
14312 || size == 4 || (TARGET_ZARCH && size == 8));
14315 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
14317 static void
14318 s390_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
14320 tree sfpc = s390_builtin_decls[S390_BUILTIN_s390_sfpc];
14321 tree efpc = s390_builtin_decls[S390_BUILTIN_s390_efpc];
14322 tree call_efpc = build_call_expr (efpc, 0);
14323 tree fenv_var = create_tmp_var_raw (unsigned_type_node);
14325 #define FPC_EXCEPTION_MASK HOST_WIDE_INT_UC (0xf8000000)
14326 #define FPC_FLAGS_MASK HOST_WIDE_INT_UC (0x00f80000)
14327 #define FPC_DXC_MASK HOST_WIDE_INT_UC (0x0000ff00)
14328 #define FPC_EXCEPTION_MASK_SHIFT HOST_WIDE_INT_UC (24)
14329 #define FPC_FLAGS_SHIFT HOST_WIDE_INT_UC (16)
14330 #define FPC_DXC_SHIFT HOST_WIDE_INT_UC (8)
14332 /* Generates the equivalent of feholdexcept (&fenv_var)
14334 fenv_var = __builtin_s390_efpc ();
14335 __builtin_s390_sfpc (fenv_var & mask) */
14336 tree old_fpc = build2 (MODIFY_EXPR, unsigned_type_node, fenv_var, call_efpc);
14337 tree new_fpc =
14338 build2 (BIT_AND_EXPR, unsigned_type_node, fenv_var,
14339 build_int_cst (unsigned_type_node,
14340 ~(FPC_DXC_MASK | FPC_FLAGS_MASK |
14341 FPC_EXCEPTION_MASK)));
14342 tree set_new_fpc = build_call_expr (sfpc, 1, new_fpc);
14343 *hold = build2 (COMPOUND_EXPR, void_type_node, old_fpc, set_new_fpc);
14345 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT)
14347 __builtin_s390_sfpc (__builtin_s390_efpc () & mask) */
14348 new_fpc = build2 (BIT_AND_EXPR, unsigned_type_node, call_efpc,
14349 build_int_cst (unsigned_type_node,
14350 ~(FPC_DXC_MASK | FPC_FLAGS_MASK)));
14351 *clear = build_call_expr (sfpc, 1, new_fpc);
14353 /* Generates the equivalent of feupdateenv (fenv_var)
14355 old_fpc = __builtin_s390_efpc ();
14356 __builtin_s390_sfpc (fenv_var);
14357 __atomic_feraiseexcept ((old_fpc & FPC_FLAGS_MASK) >> FPC_FLAGS_SHIFT); */
14359 old_fpc = create_tmp_var_raw (unsigned_type_node);
14360 tree store_old_fpc = build2 (MODIFY_EXPR, void_type_node,
14361 old_fpc, call_efpc);
14363 set_new_fpc = build_call_expr (sfpc, 1, fenv_var);
14365 tree raise_old_except = build2 (BIT_AND_EXPR, unsigned_type_node, old_fpc,
14366 build_int_cst (unsigned_type_node,
14367 FPC_FLAGS_MASK));
14368 raise_old_except = build2 (RSHIFT_EXPR, unsigned_type_node, raise_old_except,
14369 build_int_cst (unsigned_type_node,
14370 FPC_FLAGS_SHIFT));
14371 tree atomic_feraiseexcept
14372 = builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT);
14373 raise_old_except = build_call_expr (atomic_feraiseexcept,
14374 1, raise_old_except);
14376 *update = build2 (COMPOUND_EXPR, void_type_node,
14377 build2 (COMPOUND_EXPR, void_type_node,
14378 store_old_fpc, set_new_fpc),
14379 raise_old_except);
14381 #undef FPC_EXCEPTION_MASK
14382 #undef FPC_FLAGS_MASK
14383 #undef FPC_DXC_MASK
14384 #undef FPC_EXCEPTION_MASK_SHIFT
14385 #undef FPC_FLAGS_SHIFT
14386 #undef FPC_DXC_SHIFT
14389 /* Return the vector mode to be used for inner mode MODE when doing
14390 vectorization. */
14391 static machine_mode
14392 s390_preferred_simd_mode (machine_mode mode)
14394 if (TARGET_VX)
14395 switch (mode)
14397 case DFmode:
14398 return V2DFmode;
14399 case DImode:
14400 return V2DImode;
14401 case SImode:
14402 return V4SImode;
14403 case HImode:
14404 return V8HImode;
14405 case QImode:
14406 return V16QImode;
14407 default:;
14409 return word_mode;
14412 /* Our hardware does not require vectors to be strictly aligned. */
14413 static bool
14414 s390_support_vector_misalignment (machine_mode mode ATTRIBUTE_UNUSED,
14415 const_tree type ATTRIBUTE_UNUSED,
14416 int misalignment ATTRIBUTE_UNUSED,
14417 bool is_packed ATTRIBUTE_UNUSED)
14419 if (TARGET_VX)
14420 return true;
14422 return default_builtin_support_vector_misalignment (mode, type, misalignment,
14423 is_packed);
14426 /* The vector ABI requires vector types to be aligned on an 8 byte
14427 boundary (our stack alignment). However, we allow this to be
14428 overriden by the user, while this definitely breaks the ABI. */
14429 static HOST_WIDE_INT
14430 s390_vector_alignment (const_tree type)
14432 if (!TARGET_VX_ABI)
14433 return default_vector_alignment (type);
14435 if (TYPE_USER_ALIGN (type))
14436 return TYPE_ALIGN (type);
14438 return MIN (64, tree_to_shwi (TYPE_SIZE (type)));
14441 #ifdef HAVE_AS_MACHINE_MACHINEMODE
14442 /* Implement TARGET_ASM_FILE_START. */
14443 static void
14444 s390_asm_file_start (void)
14446 s390_asm_output_machine_for_arch (asm_out_file);
14448 #endif
14450 /* Implement TARGET_ASM_FILE_END. */
14451 static void
14452 s390_asm_file_end (void)
14454 #ifdef HAVE_AS_GNU_ATTRIBUTE
14455 varpool_node *vnode;
14456 cgraph_node *cnode;
14458 FOR_EACH_VARIABLE (vnode)
14459 if (TREE_PUBLIC (vnode->decl))
14460 s390_check_type_for_vector_abi (TREE_TYPE (vnode->decl), false, false);
14462 FOR_EACH_FUNCTION (cnode)
14463 if (TREE_PUBLIC (cnode->decl))
14464 s390_check_type_for_vector_abi (TREE_TYPE (cnode->decl), false, false);
14467 if (s390_vector_abi != 0)
14468 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
14469 s390_vector_abi);
14470 #endif
14471 file_end_indicate_exec_stack ();
14474 /* Return true if TYPE is a vector bool type. */
14475 static inline bool
14476 s390_vector_bool_type_p (const_tree type)
14478 return TYPE_VECTOR_OPAQUE (type);
14481 /* Return the diagnostic message string if the binary operation OP is
14482 not permitted on TYPE1 and TYPE2, NULL otherwise. */
14483 static const char*
14484 s390_invalid_binary_op (int op ATTRIBUTE_UNUSED, const_tree type1, const_tree type2)
14486 bool bool1_p, bool2_p;
14487 bool plusminus_p;
14488 bool muldiv_p;
14489 bool compare_p;
14490 machine_mode mode1, mode2;
14492 if (!TARGET_ZVECTOR)
14493 return NULL;
14495 if (!VECTOR_TYPE_P (type1) || !VECTOR_TYPE_P (type2))
14496 return NULL;
14498 bool1_p = s390_vector_bool_type_p (type1);
14499 bool2_p = s390_vector_bool_type_p (type2);
14501 /* Mixing signed and unsigned types is forbidden for all
14502 operators. */
14503 if (!bool1_p && !bool2_p
14504 && TYPE_UNSIGNED (type1) != TYPE_UNSIGNED (type2))
14505 return N_("types differ in signess");
14507 plusminus_p = (op == PLUS_EXPR || op == MINUS_EXPR);
14508 muldiv_p = (op == MULT_EXPR || op == RDIV_EXPR || op == TRUNC_DIV_EXPR
14509 || op == CEIL_DIV_EXPR || op == FLOOR_DIV_EXPR
14510 || op == ROUND_DIV_EXPR);
14511 compare_p = (op == LT_EXPR || op == LE_EXPR || op == GT_EXPR || op == GE_EXPR
14512 || op == EQ_EXPR || op == NE_EXPR);
14514 if (bool1_p && bool2_p && (plusminus_p || muldiv_p))
14515 return N_("binary operator does not support two vector bool operands");
14517 if (bool1_p != bool2_p && (muldiv_p || compare_p))
14518 return N_("binary operator does not support vector bool operand");
14520 mode1 = TYPE_MODE (type1);
14521 mode2 = TYPE_MODE (type2);
14523 if (bool1_p != bool2_p && plusminus_p
14524 && (GET_MODE_CLASS (mode1) == MODE_VECTOR_FLOAT
14525 || GET_MODE_CLASS (mode2) == MODE_VECTOR_FLOAT))
14526 return N_("binary operator does not support mixing vector "
14527 "bool with floating point vector operands");
14529 return NULL;
14532 /* Initialize GCC target structure. */
14534 #undef TARGET_ASM_ALIGNED_HI_OP
14535 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
14536 #undef TARGET_ASM_ALIGNED_DI_OP
14537 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
14538 #undef TARGET_ASM_INTEGER
14539 #define TARGET_ASM_INTEGER s390_assemble_integer
14541 #undef TARGET_ASM_OPEN_PAREN
14542 #define TARGET_ASM_OPEN_PAREN ""
14544 #undef TARGET_ASM_CLOSE_PAREN
14545 #define TARGET_ASM_CLOSE_PAREN ""
14547 #undef TARGET_OPTION_OVERRIDE
14548 #define TARGET_OPTION_OVERRIDE s390_option_override
14550 #undef TARGET_ENCODE_SECTION_INFO
14551 #define TARGET_ENCODE_SECTION_INFO s390_encode_section_info
14553 #undef TARGET_SCALAR_MODE_SUPPORTED_P
14554 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
14556 #ifdef HAVE_AS_TLS
14557 #undef TARGET_HAVE_TLS
14558 #define TARGET_HAVE_TLS true
14559 #endif
14560 #undef TARGET_CANNOT_FORCE_CONST_MEM
14561 #define TARGET_CANNOT_FORCE_CONST_MEM s390_cannot_force_const_mem
14563 #undef TARGET_DELEGITIMIZE_ADDRESS
14564 #define TARGET_DELEGITIMIZE_ADDRESS s390_delegitimize_address
14566 #undef TARGET_LEGITIMIZE_ADDRESS
14567 #define TARGET_LEGITIMIZE_ADDRESS s390_legitimize_address
14569 #undef TARGET_RETURN_IN_MEMORY
14570 #define TARGET_RETURN_IN_MEMORY s390_return_in_memory
14572 #undef TARGET_INIT_BUILTINS
14573 #define TARGET_INIT_BUILTINS s390_init_builtins
14574 #undef TARGET_EXPAND_BUILTIN
14575 #define TARGET_EXPAND_BUILTIN s390_expand_builtin
14576 #undef TARGET_BUILTIN_DECL
14577 #define TARGET_BUILTIN_DECL s390_builtin_decl
14579 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
14580 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA s390_output_addr_const_extra
14582 #undef TARGET_ASM_OUTPUT_MI_THUNK
14583 #define TARGET_ASM_OUTPUT_MI_THUNK s390_output_mi_thunk
14584 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
14585 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
14587 #undef TARGET_SCHED_ADJUST_PRIORITY
14588 #define TARGET_SCHED_ADJUST_PRIORITY s390_adjust_priority
14589 #undef TARGET_SCHED_ISSUE_RATE
14590 #define TARGET_SCHED_ISSUE_RATE s390_issue_rate
14591 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
14592 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD s390_first_cycle_multipass_dfa_lookahead
14594 #undef TARGET_SCHED_VARIABLE_ISSUE
14595 #define TARGET_SCHED_VARIABLE_ISSUE s390_sched_variable_issue
14596 #undef TARGET_SCHED_REORDER
14597 #define TARGET_SCHED_REORDER s390_sched_reorder
14598 #undef TARGET_SCHED_INIT
14599 #define TARGET_SCHED_INIT s390_sched_init
14601 #undef TARGET_CANNOT_COPY_INSN_P
14602 #define TARGET_CANNOT_COPY_INSN_P s390_cannot_copy_insn_p
14603 #undef TARGET_RTX_COSTS
14604 #define TARGET_RTX_COSTS s390_rtx_costs
14605 #undef TARGET_ADDRESS_COST
14606 #define TARGET_ADDRESS_COST s390_address_cost
14607 #undef TARGET_REGISTER_MOVE_COST
14608 #define TARGET_REGISTER_MOVE_COST s390_register_move_cost
14609 #undef TARGET_MEMORY_MOVE_COST
14610 #define TARGET_MEMORY_MOVE_COST s390_memory_move_cost
14612 #undef TARGET_MACHINE_DEPENDENT_REORG
14613 #define TARGET_MACHINE_DEPENDENT_REORG s390_reorg
14615 #undef TARGET_VALID_POINTER_MODE
14616 #define TARGET_VALID_POINTER_MODE s390_valid_pointer_mode
14618 #undef TARGET_BUILD_BUILTIN_VA_LIST
14619 #define TARGET_BUILD_BUILTIN_VA_LIST s390_build_builtin_va_list
14620 #undef TARGET_EXPAND_BUILTIN_VA_START
14621 #define TARGET_EXPAND_BUILTIN_VA_START s390_va_start
14622 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
14623 #define TARGET_GIMPLIFY_VA_ARG_EXPR s390_gimplify_va_arg
14625 #undef TARGET_PROMOTE_FUNCTION_MODE
14626 #define TARGET_PROMOTE_FUNCTION_MODE s390_promote_function_mode
14627 #undef TARGET_PASS_BY_REFERENCE
14628 #define TARGET_PASS_BY_REFERENCE s390_pass_by_reference
14630 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
14631 #define TARGET_FUNCTION_OK_FOR_SIBCALL s390_function_ok_for_sibcall
14632 #undef TARGET_FUNCTION_ARG
14633 #define TARGET_FUNCTION_ARG s390_function_arg
14634 #undef TARGET_FUNCTION_ARG_ADVANCE
14635 #define TARGET_FUNCTION_ARG_ADVANCE s390_function_arg_advance
14636 #undef TARGET_FUNCTION_VALUE
14637 #define TARGET_FUNCTION_VALUE s390_function_value
14638 #undef TARGET_LIBCALL_VALUE
14639 #define TARGET_LIBCALL_VALUE s390_libcall_value
14640 #undef TARGET_STRICT_ARGUMENT_NAMING
14641 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
14643 #undef TARGET_KEEP_LEAF_WHEN_PROFILED
14644 #define TARGET_KEEP_LEAF_WHEN_PROFILED s390_keep_leaf_when_profiled
14646 #undef TARGET_FIXED_CONDITION_CODE_REGS
14647 #define TARGET_FIXED_CONDITION_CODE_REGS s390_fixed_condition_code_regs
14649 #undef TARGET_CC_MODES_COMPATIBLE
14650 #define TARGET_CC_MODES_COMPATIBLE s390_cc_modes_compatible
14652 #undef TARGET_INVALID_WITHIN_DOLOOP
14653 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_insn_null
14655 #ifdef HAVE_AS_TLS
14656 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
14657 #define TARGET_ASM_OUTPUT_DWARF_DTPREL s390_output_dwarf_dtprel
14658 #endif
14660 #undef TARGET_DWARF_FRAME_REG_MODE
14661 #define TARGET_DWARF_FRAME_REG_MODE s390_dwarf_frame_reg_mode
14663 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
14664 #undef TARGET_MANGLE_TYPE
14665 #define TARGET_MANGLE_TYPE s390_mangle_type
14666 #endif
14668 #undef TARGET_SCALAR_MODE_SUPPORTED_P
14669 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
14671 #undef TARGET_VECTOR_MODE_SUPPORTED_P
14672 #define TARGET_VECTOR_MODE_SUPPORTED_P s390_vector_mode_supported_p
14674 #undef TARGET_PREFERRED_RELOAD_CLASS
14675 #define TARGET_PREFERRED_RELOAD_CLASS s390_preferred_reload_class
14677 #undef TARGET_SECONDARY_RELOAD
14678 #define TARGET_SECONDARY_RELOAD s390_secondary_reload
14680 #undef TARGET_LIBGCC_CMP_RETURN_MODE
14681 #define TARGET_LIBGCC_CMP_RETURN_MODE s390_libgcc_cmp_return_mode
14683 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
14684 #define TARGET_LIBGCC_SHIFT_COUNT_MODE s390_libgcc_shift_count_mode
14686 #undef TARGET_LEGITIMATE_ADDRESS_P
14687 #define TARGET_LEGITIMATE_ADDRESS_P s390_legitimate_address_p
14689 #undef TARGET_LEGITIMATE_CONSTANT_P
14690 #define TARGET_LEGITIMATE_CONSTANT_P s390_legitimate_constant_p
14692 #undef TARGET_LRA_P
14693 #define TARGET_LRA_P s390_lra_p
14695 #undef TARGET_CAN_ELIMINATE
14696 #define TARGET_CAN_ELIMINATE s390_can_eliminate
14698 #undef TARGET_CONDITIONAL_REGISTER_USAGE
14699 #define TARGET_CONDITIONAL_REGISTER_USAGE s390_conditional_register_usage
14701 #undef TARGET_LOOP_UNROLL_ADJUST
14702 #define TARGET_LOOP_UNROLL_ADJUST s390_loop_unroll_adjust
14704 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
14705 #define TARGET_ASM_TRAMPOLINE_TEMPLATE s390_asm_trampoline_template
14706 #undef TARGET_TRAMPOLINE_INIT
14707 #define TARGET_TRAMPOLINE_INIT s390_trampoline_init
14709 #undef TARGET_UNWIND_WORD_MODE
14710 #define TARGET_UNWIND_WORD_MODE s390_unwind_word_mode
14712 #undef TARGET_CANONICALIZE_COMPARISON
14713 #define TARGET_CANONICALIZE_COMPARISON s390_canonicalize_comparison
14715 #undef TARGET_HARD_REGNO_SCRATCH_OK
14716 #define TARGET_HARD_REGNO_SCRATCH_OK s390_hard_regno_scratch_ok
14718 #undef TARGET_ATTRIBUTE_TABLE
14719 #define TARGET_ATTRIBUTE_TABLE s390_attribute_table
14721 #undef TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P
14722 #define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P hook_bool_const_tree_true
14724 #undef TARGET_SET_UP_BY_PROLOGUE
14725 #define TARGET_SET_UP_BY_PROLOGUE s300_set_up_by_prologue
14727 #undef TARGET_USE_BY_PIECES_INFRASTRUCTURE_P
14728 #define TARGET_USE_BY_PIECES_INFRASTRUCTURE_P \
14729 s390_use_by_pieces_infrastructure_p
14731 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
14732 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV s390_atomic_assign_expand_fenv
14734 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
14735 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN s390_invalid_arg_for_unprototyped_fn
14737 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
14738 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE s390_preferred_simd_mode
14740 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
14741 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT s390_support_vector_misalignment
14743 #undef TARGET_VECTOR_ALIGNMENT
14744 #define TARGET_VECTOR_ALIGNMENT s390_vector_alignment
14746 #undef TARGET_INVALID_BINARY_OP
14747 #define TARGET_INVALID_BINARY_OP s390_invalid_binary_op
14749 #ifdef HAVE_AS_MACHINE_MACHINEMODE
14750 #undef TARGET_ASM_FILE_START
14751 #define TARGET_ASM_FILE_START s390_asm_file_start
14752 #endif
14754 #undef TARGET_ASM_FILE_END
14755 #define TARGET_ASM_FILE_END s390_asm_file_end
14757 #if S390_USE_TARGET_ATTRIBUTE
14758 #undef TARGET_SET_CURRENT_FUNCTION
14759 #define TARGET_SET_CURRENT_FUNCTION s390_set_current_function
14761 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
14762 #define TARGET_OPTION_VALID_ATTRIBUTE_P s390_valid_target_attribute_p
14763 #endif
14765 #undef TARGET_OPTION_RESTORE
14766 #define TARGET_OPTION_RESTORE s390_function_specific_restore
14768 struct gcc_target targetm = TARGET_INITIALIZER;
14770 #include "gt-s390.h"