S/390: PR79904: Disallow reg + sym_ref literal pool addresses.
[official-gcc.git] / gcc / config / s390 / s390.c
blob27640adfaec538200d0d15d1463f1b50e562546e
1 /* Subroutines used for code generation on IBM S/390 and zSeries
2 Copyright (C) 1999-2017 Free Software Foundation, Inc.
3 Contributed by Hartmut Penner (hpenner@de.ibm.com) and
4 Ulrich Weigand (uweigand@de.ibm.com) and
5 Andreas Krebbel (Andreas.Krebbel@de.ibm.com).
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "backend.h"
27 #include "target.h"
28 #include "target-globals.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "gimple.h"
32 #include "cfghooks.h"
33 #include "cfgloop.h"
34 #include "df.h"
35 #include "memmodel.h"
36 #include "tm_p.h"
37 #include "stringpool.h"
38 #include "expmed.h"
39 #include "optabs.h"
40 #include "regs.h"
41 #include "emit-rtl.h"
42 #include "recog.h"
43 #include "cgraph.h"
44 #include "diagnostic-core.h"
45 #include "diagnostic.h"
46 #include "alias.h"
47 #include "fold-const.h"
48 #include "print-tree.h"
49 #include "stor-layout.h"
50 #include "varasm.h"
51 #include "calls.h"
52 #include "conditions.h"
53 #include "output.h"
54 #include "insn-attr.h"
55 #include "flags.h"
56 #include "except.h"
57 #include "dojump.h"
58 #include "explow.h"
59 #include "stmt.h"
60 #include "expr.h"
61 #include "reload.h"
62 #include "cfgrtl.h"
63 #include "cfganal.h"
64 #include "lcm.h"
65 #include "cfgbuild.h"
66 #include "cfgcleanup.h"
67 #include "debug.h"
68 #include "langhooks.h"
69 #include "internal-fn.h"
70 #include "gimple-fold.h"
71 #include "tree-eh.h"
72 #include "gimplify.h"
73 #include "params.h"
74 #include "opts.h"
75 #include "tree-pass.h"
76 #include "context.h"
77 #include "builtins.h"
78 #include "rtl-iter.h"
79 #include "intl.h"
80 #include "tm-constrs.h"
82 /* This file should be included last. */
83 #include "target-def.h"
85 /* Remember the last target of s390_set_current_function. */
86 static GTY(()) tree s390_previous_fndecl;
88 /* Define the specific costs for a given cpu. */
90 struct processor_costs
92 /* multiplication */
93 const int m; /* cost of an M instruction. */
94 const int mghi; /* cost of an MGHI instruction. */
95 const int mh; /* cost of an MH instruction. */
96 const int mhi; /* cost of an MHI instruction. */
97 const int ml; /* cost of an ML instruction. */
98 const int mr; /* cost of an MR instruction. */
99 const int ms; /* cost of an MS instruction. */
100 const int msg; /* cost of an MSG instruction. */
101 const int msgf; /* cost of an MSGF instruction. */
102 const int msgfr; /* cost of an MSGFR instruction. */
103 const int msgr; /* cost of an MSGR instruction. */
104 const int msr; /* cost of an MSR instruction. */
105 const int mult_df; /* cost of multiplication in DFmode. */
106 const int mxbr;
107 /* square root */
108 const int sqxbr; /* cost of square root in TFmode. */
109 const int sqdbr; /* cost of square root in DFmode. */
110 const int sqebr; /* cost of square root in SFmode. */
111 /* multiply and add */
112 const int madbr; /* cost of multiply and add in DFmode. */
113 const int maebr; /* cost of multiply and add in SFmode. */
114 /* division */
115 const int dxbr;
116 const int ddbr;
117 const int debr;
118 const int dlgr;
119 const int dlr;
120 const int dr;
121 const int dsgfr;
122 const int dsgr;
125 #define s390_cost ((const struct processor_costs *)(s390_cost_pointer))
127 static const
128 struct processor_costs z900_cost =
130 COSTS_N_INSNS (5), /* M */
131 COSTS_N_INSNS (10), /* MGHI */
132 COSTS_N_INSNS (5), /* MH */
133 COSTS_N_INSNS (4), /* MHI */
134 COSTS_N_INSNS (5), /* ML */
135 COSTS_N_INSNS (5), /* MR */
136 COSTS_N_INSNS (4), /* MS */
137 COSTS_N_INSNS (15), /* MSG */
138 COSTS_N_INSNS (7), /* MSGF */
139 COSTS_N_INSNS (7), /* MSGFR */
140 COSTS_N_INSNS (10), /* MSGR */
141 COSTS_N_INSNS (4), /* MSR */
142 COSTS_N_INSNS (7), /* multiplication in DFmode */
143 COSTS_N_INSNS (13), /* MXBR */
144 COSTS_N_INSNS (136), /* SQXBR */
145 COSTS_N_INSNS (44), /* SQDBR */
146 COSTS_N_INSNS (35), /* SQEBR */
147 COSTS_N_INSNS (18), /* MADBR */
148 COSTS_N_INSNS (13), /* MAEBR */
149 COSTS_N_INSNS (134), /* DXBR */
150 COSTS_N_INSNS (30), /* DDBR */
151 COSTS_N_INSNS (27), /* DEBR */
152 COSTS_N_INSNS (220), /* DLGR */
153 COSTS_N_INSNS (34), /* DLR */
154 COSTS_N_INSNS (34), /* DR */
155 COSTS_N_INSNS (32), /* DSGFR */
156 COSTS_N_INSNS (32), /* DSGR */
159 static const
160 struct processor_costs z990_cost =
162 COSTS_N_INSNS (4), /* M */
163 COSTS_N_INSNS (2), /* MGHI */
164 COSTS_N_INSNS (2), /* MH */
165 COSTS_N_INSNS (2), /* MHI */
166 COSTS_N_INSNS (4), /* ML */
167 COSTS_N_INSNS (4), /* MR */
168 COSTS_N_INSNS (5), /* MS */
169 COSTS_N_INSNS (6), /* MSG */
170 COSTS_N_INSNS (4), /* MSGF */
171 COSTS_N_INSNS (4), /* MSGFR */
172 COSTS_N_INSNS (4), /* MSGR */
173 COSTS_N_INSNS (4), /* MSR */
174 COSTS_N_INSNS (1), /* multiplication in DFmode */
175 COSTS_N_INSNS (28), /* MXBR */
176 COSTS_N_INSNS (130), /* SQXBR */
177 COSTS_N_INSNS (66), /* SQDBR */
178 COSTS_N_INSNS (38), /* SQEBR */
179 COSTS_N_INSNS (1), /* MADBR */
180 COSTS_N_INSNS (1), /* MAEBR */
181 COSTS_N_INSNS (60), /* DXBR */
182 COSTS_N_INSNS (40), /* DDBR */
183 COSTS_N_INSNS (26), /* DEBR */
184 COSTS_N_INSNS (176), /* DLGR */
185 COSTS_N_INSNS (31), /* DLR */
186 COSTS_N_INSNS (31), /* DR */
187 COSTS_N_INSNS (31), /* DSGFR */
188 COSTS_N_INSNS (31), /* DSGR */
191 static const
192 struct processor_costs z9_109_cost =
194 COSTS_N_INSNS (4), /* M */
195 COSTS_N_INSNS (2), /* MGHI */
196 COSTS_N_INSNS (2), /* MH */
197 COSTS_N_INSNS (2), /* MHI */
198 COSTS_N_INSNS (4), /* ML */
199 COSTS_N_INSNS (4), /* MR */
200 COSTS_N_INSNS (5), /* MS */
201 COSTS_N_INSNS (6), /* MSG */
202 COSTS_N_INSNS (4), /* MSGF */
203 COSTS_N_INSNS (4), /* MSGFR */
204 COSTS_N_INSNS (4), /* MSGR */
205 COSTS_N_INSNS (4), /* MSR */
206 COSTS_N_INSNS (1), /* multiplication in DFmode */
207 COSTS_N_INSNS (28), /* MXBR */
208 COSTS_N_INSNS (130), /* SQXBR */
209 COSTS_N_INSNS (66), /* SQDBR */
210 COSTS_N_INSNS (38), /* SQEBR */
211 COSTS_N_INSNS (1), /* MADBR */
212 COSTS_N_INSNS (1), /* MAEBR */
213 COSTS_N_INSNS (60), /* DXBR */
214 COSTS_N_INSNS (40), /* DDBR */
215 COSTS_N_INSNS (26), /* DEBR */
216 COSTS_N_INSNS (30), /* DLGR */
217 COSTS_N_INSNS (23), /* DLR */
218 COSTS_N_INSNS (23), /* DR */
219 COSTS_N_INSNS (24), /* DSGFR */
220 COSTS_N_INSNS (24), /* DSGR */
223 static const
224 struct processor_costs z10_cost =
226 COSTS_N_INSNS (10), /* M */
227 COSTS_N_INSNS (10), /* MGHI */
228 COSTS_N_INSNS (10), /* MH */
229 COSTS_N_INSNS (10), /* MHI */
230 COSTS_N_INSNS (10), /* ML */
231 COSTS_N_INSNS (10), /* MR */
232 COSTS_N_INSNS (10), /* MS */
233 COSTS_N_INSNS (10), /* MSG */
234 COSTS_N_INSNS (10), /* MSGF */
235 COSTS_N_INSNS (10), /* MSGFR */
236 COSTS_N_INSNS (10), /* MSGR */
237 COSTS_N_INSNS (10), /* MSR */
238 COSTS_N_INSNS (1) , /* multiplication in DFmode */
239 COSTS_N_INSNS (50), /* MXBR */
240 COSTS_N_INSNS (120), /* SQXBR */
241 COSTS_N_INSNS (52), /* SQDBR */
242 COSTS_N_INSNS (38), /* SQEBR */
243 COSTS_N_INSNS (1), /* MADBR */
244 COSTS_N_INSNS (1), /* MAEBR */
245 COSTS_N_INSNS (111), /* DXBR */
246 COSTS_N_INSNS (39), /* DDBR */
247 COSTS_N_INSNS (32), /* DEBR */
248 COSTS_N_INSNS (160), /* DLGR */
249 COSTS_N_INSNS (71), /* DLR */
250 COSTS_N_INSNS (71), /* DR */
251 COSTS_N_INSNS (71), /* DSGFR */
252 COSTS_N_INSNS (71), /* DSGR */
255 static const
256 struct processor_costs z196_cost =
258 COSTS_N_INSNS (7), /* M */
259 COSTS_N_INSNS (5), /* MGHI */
260 COSTS_N_INSNS (5), /* MH */
261 COSTS_N_INSNS (5), /* MHI */
262 COSTS_N_INSNS (7), /* ML */
263 COSTS_N_INSNS (7), /* MR */
264 COSTS_N_INSNS (6), /* MS */
265 COSTS_N_INSNS (8), /* MSG */
266 COSTS_N_INSNS (6), /* MSGF */
267 COSTS_N_INSNS (6), /* MSGFR */
268 COSTS_N_INSNS (8), /* MSGR */
269 COSTS_N_INSNS (6), /* MSR */
270 COSTS_N_INSNS (1) , /* multiplication in DFmode */
271 COSTS_N_INSNS (40), /* MXBR B+40 */
272 COSTS_N_INSNS (100), /* SQXBR B+100 */
273 COSTS_N_INSNS (42), /* SQDBR B+42 */
274 COSTS_N_INSNS (28), /* SQEBR B+28 */
275 COSTS_N_INSNS (1), /* MADBR B */
276 COSTS_N_INSNS (1), /* MAEBR B */
277 COSTS_N_INSNS (101), /* DXBR B+101 */
278 COSTS_N_INSNS (29), /* DDBR */
279 COSTS_N_INSNS (22), /* DEBR */
280 COSTS_N_INSNS (160), /* DLGR cracked */
281 COSTS_N_INSNS (160), /* DLR cracked */
282 COSTS_N_INSNS (160), /* DR expanded */
283 COSTS_N_INSNS (160), /* DSGFR cracked */
284 COSTS_N_INSNS (160), /* DSGR cracked */
287 static const
288 struct processor_costs zEC12_cost =
290 COSTS_N_INSNS (7), /* M */
291 COSTS_N_INSNS (5), /* MGHI */
292 COSTS_N_INSNS (5), /* MH */
293 COSTS_N_INSNS (5), /* MHI */
294 COSTS_N_INSNS (7), /* ML */
295 COSTS_N_INSNS (7), /* MR */
296 COSTS_N_INSNS (6), /* MS */
297 COSTS_N_INSNS (8), /* MSG */
298 COSTS_N_INSNS (6), /* MSGF */
299 COSTS_N_INSNS (6), /* MSGFR */
300 COSTS_N_INSNS (8), /* MSGR */
301 COSTS_N_INSNS (6), /* MSR */
302 COSTS_N_INSNS (1) , /* multiplication in DFmode */
303 COSTS_N_INSNS (40), /* MXBR B+40 */
304 COSTS_N_INSNS (100), /* SQXBR B+100 */
305 COSTS_N_INSNS (42), /* SQDBR B+42 */
306 COSTS_N_INSNS (28), /* SQEBR B+28 */
307 COSTS_N_INSNS (1), /* MADBR B */
308 COSTS_N_INSNS (1), /* MAEBR B */
309 COSTS_N_INSNS (131), /* DXBR B+131 */
310 COSTS_N_INSNS (29), /* DDBR */
311 COSTS_N_INSNS (22), /* DEBR */
312 COSTS_N_INSNS (160), /* DLGR cracked */
313 COSTS_N_INSNS (160), /* DLR cracked */
314 COSTS_N_INSNS (160), /* DR expanded */
315 COSTS_N_INSNS (160), /* DSGFR cracked */
316 COSTS_N_INSNS (160), /* DSGR cracked */
319 static struct
321 const char *const name;
322 const enum processor_type processor;
323 const struct processor_costs *cost;
325 const processor_table[] =
327 { "g5", PROCESSOR_9672_G5, &z900_cost },
328 { "g6", PROCESSOR_9672_G6, &z900_cost },
329 { "z900", PROCESSOR_2064_Z900, &z900_cost },
330 { "z990", PROCESSOR_2084_Z990, &z990_cost },
331 { "z9-109", PROCESSOR_2094_Z9_109, &z9_109_cost },
332 { "z9-ec", PROCESSOR_2094_Z9_EC, &z9_109_cost },
333 { "z10", PROCESSOR_2097_Z10, &z10_cost },
334 { "z196", PROCESSOR_2817_Z196, &z196_cost },
335 { "zEC12", PROCESSOR_2827_ZEC12, &zEC12_cost },
336 { "z13", PROCESSOR_2964_Z13, &zEC12_cost },
337 { "native", PROCESSOR_NATIVE, NULL }
340 extern int reload_completed;
342 /* Kept up to date using the SCHED_VARIABLE_ISSUE hook. */
343 static rtx_insn *last_scheduled_insn;
344 #define MAX_SCHED_UNITS 3
345 static int last_scheduled_unit_distance[MAX_SCHED_UNITS];
347 /* The maximum score added for an instruction whose unit hasn't been
348 in use for MAX_SCHED_MIX_DISTANCE steps. Increase this value to
349 give instruction mix scheduling more priority over instruction
350 grouping. */
351 #define MAX_SCHED_MIX_SCORE 8
353 /* The maximum distance up to which individual scores will be
354 calculated. Everything beyond this gives MAX_SCHED_MIX_SCORE.
355 Increase this with the OOO windows size of the machine. */
356 #define MAX_SCHED_MIX_DISTANCE 100
358 /* Structure used to hold the components of a S/390 memory
359 address. A legitimate address on S/390 is of the general
360 form
361 base + index + displacement
362 where any of the components is optional.
364 base and index are registers of the class ADDR_REGS,
365 displacement is an unsigned 12-bit immediate constant. */
367 struct s390_address
369 rtx base;
370 rtx indx;
371 rtx disp;
372 bool pointer;
373 bool literal_pool;
376 /* The following structure is embedded in the machine
377 specific part of struct function. */
379 struct GTY (()) s390_frame_layout
381 /* Offset within stack frame. */
382 HOST_WIDE_INT gprs_offset;
383 HOST_WIDE_INT f0_offset;
384 HOST_WIDE_INT f4_offset;
385 HOST_WIDE_INT f8_offset;
386 HOST_WIDE_INT backchain_offset;
388 /* Number of first and last gpr where slots in the register
389 save area are reserved for. */
390 int first_save_gpr_slot;
391 int last_save_gpr_slot;
393 /* Location (FP register number) where GPRs (r0-r15) should
394 be saved to.
395 0 - does not need to be saved at all
396 -1 - stack slot */
397 #define SAVE_SLOT_NONE 0
398 #define SAVE_SLOT_STACK -1
399 signed char gpr_save_slots[16];
401 /* Number of first and last gpr to be saved, restored. */
402 int first_save_gpr;
403 int first_restore_gpr;
404 int last_save_gpr;
405 int last_restore_gpr;
407 /* Bits standing for floating point registers. Set, if the
408 respective register has to be saved. Starting with reg 16 (f0)
409 at the rightmost bit.
410 Bit 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
411 fpr 15 13 11 9 14 12 10 8 7 5 3 1 6 4 2 0
412 reg 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 */
413 unsigned int fpr_bitmap;
415 /* Number of floating point registers f8-f15 which must be saved. */
416 int high_fprs;
418 /* Set if return address needs to be saved.
419 This flag is set by s390_return_addr_rtx if it could not use
420 the initial value of r14 and therefore depends on r14 saved
421 to the stack. */
422 bool save_return_addr_p;
424 /* Size of stack frame. */
425 HOST_WIDE_INT frame_size;
428 /* Define the structure for the machine field in struct function. */
430 struct GTY(()) machine_function
432 struct s390_frame_layout frame_layout;
434 /* Literal pool base register. */
435 rtx base_reg;
437 /* True if we may need to perform branch splitting. */
438 bool split_branches_pending_p;
440 bool has_landing_pad_p;
442 /* True if the current function may contain a tbegin clobbering
443 FPRs. */
444 bool tbegin_p;
446 /* For -fsplit-stack support: A stack local which holds a pointer to
447 the stack arguments for a function with a variable number of
448 arguments. This is set at the start of the function and is used
449 to initialize the overflow_arg_area field of the va_list
450 structure. */
451 rtx split_stack_varargs_pointer;
454 /* Few accessor macros for struct cfun->machine->s390_frame_layout. */
456 #define cfun_frame_layout (cfun->machine->frame_layout)
457 #define cfun_save_high_fprs_p (!!cfun_frame_layout.high_fprs)
458 #define cfun_save_arg_fprs_p (!!(TARGET_64BIT \
459 ? cfun_frame_layout.fpr_bitmap & 0x0f \
460 : cfun_frame_layout.fpr_bitmap & 0x03))
461 #define cfun_gprs_save_area_size ((cfun_frame_layout.last_save_gpr_slot - \
462 cfun_frame_layout.first_save_gpr_slot + 1) * UNITS_PER_LONG)
463 #define cfun_set_fpr_save(REGNO) (cfun->machine->frame_layout.fpr_bitmap |= \
464 (1 << (REGNO - FPR0_REGNUM)))
465 #define cfun_fpr_save_p(REGNO) (!!(cfun->machine->frame_layout.fpr_bitmap & \
466 (1 << (REGNO - FPR0_REGNUM))))
467 #define cfun_gpr_save_slot(REGNO) \
468 cfun->machine->frame_layout.gpr_save_slots[REGNO]
470 /* Number of GPRs and FPRs used for argument passing. */
471 #define GP_ARG_NUM_REG 5
472 #define FP_ARG_NUM_REG (TARGET_64BIT? 4 : 2)
473 #define VEC_ARG_NUM_REG 8
475 /* A couple of shortcuts. */
476 #define CONST_OK_FOR_J(x) \
477 CONST_OK_FOR_CONSTRAINT_P((x), 'J', "J")
478 #define CONST_OK_FOR_K(x) \
479 CONST_OK_FOR_CONSTRAINT_P((x), 'K', "K")
480 #define CONST_OK_FOR_Os(x) \
481 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Os")
482 #define CONST_OK_FOR_Op(x) \
483 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Op")
484 #define CONST_OK_FOR_On(x) \
485 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "On")
487 #define REGNO_PAIR_OK(REGNO, MODE) \
488 (HARD_REGNO_NREGS ((REGNO), (MODE)) == 1 || !((REGNO) & 1))
490 /* That's the read ahead of the dynamic branch prediction unit in
491 bytes on a z10 (or higher) CPU. */
492 #define PREDICT_DISTANCE (TARGET_Z10 ? 384 : 2048)
495 /* Indicate which ABI has been used for passing vector args.
496 0 - no vector type arguments have been passed where the ABI is relevant
497 1 - the old ABI has been used
498 2 - a vector type argument has been passed either in a vector register
499 or on the stack by value */
500 static int s390_vector_abi = 0;
502 /* Set the vector ABI marker if TYPE is subject to the vector ABI
503 switch. The vector ABI affects only vector data types. There are
504 two aspects of the vector ABI relevant here:
506 1. vectors >= 16 bytes have an alignment of 8 bytes with the new
507 ABI and natural alignment with the old.
509 2. vector <= 16 bytes are passed in VRs or by value on the stack
510 with the new ABI but by reference on the stack with the old.
512 If ARG_P is true TYPE is used for a function argument or return
513 value. The ABI marker then is set for all vector data types. If
514 ARG_P is false only type 1 vectors are being checked. */
516 static void
517 s390_check_type_for_vector_abi (const_tree type, bool arg_p, bool in_struct_p)
519 static hash_set<const_tree> visited_types_hash;
521 if (s390_vector_abi)
522 return;
524 if (type == NULL_TREE || TREE_CODE (type) == ERROR_MARK)
525 return;
527 if (visited_types_hash.contains (type))
528 return;
530 visited_types_hash.add (type);
532 if (VECTOR_TYPE_P (type))
534 int type_size = int_size_in_bytes (type);
536 /* Outside arguments only the alignment is changing and this
537 only happens for vector types >= 16 bytes. */
538 if (!arg_p && type_size < 16)
539 return;
541 /* In arguments vector types > 16 are passed as before (GCC
542 never enforced the bigger alignment for arguments which was
543 required by the old vector ABI). However, it might still be
544 ABI relevant due to the changed alignment if it is a struct
545 member. */
546 if (arg_p && type_size > 16 && !in_struct_p)
547 return;
549 s390_vector_abi = TARGET_VX_ABI ? 2 : 1;
551 else if (POINTER_TYPE_P (type) || TREE_CODE (type) == ARRAY_TYPE)
553 /* ARRAY_TYPE: Since with neither of the ABIs we have more than
554 natural alignment there will never be ABI dependent padding
555 in an array type. That's why we do not set in_struct_p to
556 true here. */
557 s390_check_type_for_vector_abi (TREE_TYPE (type), arg_p, in_struct_p);
559 else if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
561 tree arg_chain;
563 /* Check the return type. */
564 s390_check_type_for_vector_abi (TREE_TYPE (type), true, false);
566 for (arg_chain = TYPE_ARG_TYPES (type);
567 arg_chain;
568 arg_chain = TREE_CHAIN (arg_chain))
569 s390_check_type_for_vector_abi (TREE_VALUE (arg_chain), true, false);
571 else if (RECORD_OR_UNION_TYPE_P (type))
573 tree field;
575 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
577 if (TREE_CODE (field) != FIELD_DECL)
578 continue;
580 s390_check_type_for_vector_abi (TREE_TYPE (field), arg_p, true);
586 /* System z builtins. */
588 #include "s390-builtins.h"
590 const unsigned int bflags_builtin[S390_BUILTIN_MAX + 1] =
592 #undef B_DEF
593 #undef OB_DEF
594 #undef OB_DEF_VAR
595 #define B_DEF(NAME, PATTERN, ATTRS, BFLAGS, ...) BFLAGS,
596 #define OB_DEF(...)
597 #define OB_DEF_VAR(...)
598 #include "s390-builtins.def"
602 const unsigned int opflags_builtin[S390_BUILTIN_MAX + 1] =
604 #undef B_DEF
605 #undef OB_DEF
606 #undef OB_DEF_VAR
607 #define B_DEF(NAME, PATTERN, ATTRS, BFLAGS, OPFLAGS, ...) OPFLAGS,
608 #define OB_DEF(...)
609 #define OB_DEF_VAR(...)
610 #include "s390-builtins.def"
614 const unsigned int bflags_overloaded_builtin[S390_OVERLOADED_BUILTIN_MAX + 1] =
616 #undef B_DEF
617 #undef OB_DEF
618 #undef OB_DEF_VAR
619 #define B_DEF(...)
620 #define OB_DEF(NAME, FIRST_VAR_NAME, LAST_VAR_NAME, BFLAGS, ...) BFLAGS,
621 #define OB_DEF_VAR(...)
622 #include "s390-builtins.def"
626 const unsigned int
627 opflags_overloaded_builtin_var[S390_OVERLOADED_BUILTIN_VAR_MAX + 1] =
629 #undef B_DEF
630 #undef OB_DEF
631 #undef OB_DEF_VAR
632 #define B_DEF(...)
633 #define OB_DEF(...)
634 #define OB_DEF_VAR(NAME, PATTERN, FLAGS, FNTYPE) FLAGS,
635 #include "s390-builtins.def"
639 tree s390_builtin_types[BT_MAX];
640 tree s390_builtin_fn_types[BT_FN_MAX];
641 tree s390_builtin_decls[S390_BUILTIN_MAX +
642 S390_OVERLOADED_BUILTIN_MAX +
643 S390_OVERLOADED_BUILTIN_VAR_MAX];
645 static enum insn_code const code_for_builtin[S390_BUILTIN_MAX + 1] = {
646 #undef B_DEF
647 #undef OB_DEF
648 #undef OB_DEF_VAR
649 #define B_DEF(NAME, PATTERN, ...) CODE_FOR_##PATTERN,
650 #define OB_DEF(...)
651 #define OB_DEF_VAR(...)
653 #include "s390-builtins.def"
654 CODE_FOR_nothing
657 static void
658 s390_init_builtins (void)
660 /* These definitions are being used in s390-builtins.def. */
661 tree returns_twice_attr = tree_cons (get_identifier ("returns_twice"),
662 NULL, NULL);
663 tree noreturn_attr = tree_cons (get_identifier ("noreturn"), NULL, NULL);
664 tree c_uint64_type_node;
666 /* The uint64_type_node from tree.c is not compatible to the C99
667 uint64_t data type. What we want is c_uint64_type_node from
668 c-common.c. But since backend code is not supposed to interface
669 with the frontend we recreate it here. */
670 if (TARGET_64BIT)
671 c_uint64_type_node = long_unsigned_type_node;
672 else
673 c_uint64_type_node = long_long_unsigned_type_node;
675 #undef DEF_TYPE
676 #define DEF_TYPE(INDEX, NODE, CONST_P) \
677 if (s390_builtin_types[INDEX] == NULL) \
678 s390_builtin_types[INDEX] = (!CONST_P) ? \
679 (NODE) : build_type_variant ((NODE), 1, 0);
681 #undef DEF_POINTER_TYPE
682 #define DEF_POINTER_TYPE(INDEX, INDEX_BASE) \
683 if (s390_builtin_types[INDEX] == NULL) \
684 s390_builtin_types[INDEX] = \
685 build_pointer_type (s390_builtin_types[INDEX_BASE]);
687 #undef DEF_DISTINCT_TYPE
688 #define DEF_DISTINCT_TYPE(INDEX, INDEX_BASE) \
689 if (s390_builtin_types[INDEX] == NULL) \
690 s390_builtin_types[INDEX] = \
691 build_distinct_type_copy (s390_builtin_types[INDEX_BASE]);
693 #undef DEF_VECTOR_TYPE
694 #define DEF_VECTOR_TYPE(INDEX, INDEX_BASE, ELEMENTS) \
695 if (s390_builtin_types[INDEX] == NULL) \
696 s390_builtin_types[INDEX] = \
697 build_vector_type (s390_builtin_types[INDEX_BASE], ELEMENTS);
699 #undef DEF_OPAQUE_VECTOR_TYPE
700 #define DEF_OPAQUE_VECTOR_TYPE(INDEX, INDEX_BASE, ELEMENTS) \
701 if (s390_builtin_types[INDEX] == NULL) \
702 s390_builtin_types[INDEX] = \
703 build_opaque_vector_type (s390_builtin_types[INDEX_BASE], ELEMENTS);
705 #undef DEF_FN_TYPE
706 #define DEF_FN_TYPE(INDEX, args...) \
707 if (s390_builtin_fn_types[INDEX] == NULL) \
708 s390_builtin_fn_types[INDEX] = \
709 build_function_type_list (args, NULL_TREE);
710 #undef DEF_OV_TYPE
711 #define DEF_OV_TYPE(...)
712 #include "s390-builtin-types.def"
714 #undef B_DEF
715 #define B_DEF(NAME, PATTERN, ATTRS, BFLAGS, OPFLAGS, FNTYPE) \
716 if (s390_builtin_decls[S390_BUILTIN_##NAME] == NULL) \
717 s390_builtin_decls[S390_BUILTIN_##NAME] = \
718 add_builtin_function ("__builtin_" #NAME, \
719 s390_builtin_fn_types[FNTYPE], \
720 S390_BUILTIN_##NAME, \
721 BUILT_IN_MD, \
722 NULL, \
723 ATTRS);
724 #undef OB_DEF
725 #define OB_DEF(NAME, FIRST_VAR_NAME, LAST_VAR_NAME, BFLAGS, FNTYPE) \
726 if (s390_builtin_decls[S390_OVERLOADED_BUILTIN_##NAME + S390_BUILTIN_MAX] \
727 == NULL) \
728 s390_builtin_decls[S390_OVERLOADED_BUILTIN_##NAME + S390_BUILTIN_MAX] = \
729 add_builtin_function ("__builtin_" #NAME, \
730 s390_builtin_fn_types[FNTYPE], \
731 S390_OVERLOADED_BUILTIN_##NAME + S390_BUILTIN_MAX, \
732 BUILT_IN_MD, \
733 NULL, \
735 #undef OB_DEF_VAR
736 #define OB_DEF_VAR(...)
737 #include "s390-builtins.def"
741 /* Return true if ARG is appropriate as argument number ARGNUM of
742 builtin DECL. The operand flags from s390-builtins.def have to
743 passed as OP_FLAGS. */
744 bool
745 s390_const_operand_ok (tree arg, int argnum, int op_flags, tree decl)
747 if (O_UIMM_P (op_flags))
749 int bitwidths[] = { 1, 2, 3, 4, 5, 8, 12, 16, 32 };
750 int bitwidth = bitwidths[op_flags - O_U1];
752 if (!tree_fits_uhwi_p (arg)
753 || tree_to_uhwi (arg) > (HOST_WIDE_INT_1U << bitwidth) - 1)
755 error("constant argument %d for builtin %qF is out of range (0.."
756 HOST_WIDE_INT_PRINT_UNSIGNED ")",
757 argnum, decl,
758 (HOST_WIDE_INT_1U << bitwidth) - 1);
759 return false;
763 if (O_SIMM_P (op_flags))
765 int bitwidths[] = { 2, 3, 4, 5, 8, 12, 16, 32 };
766 int bitwidth = bitwidths[op_flags - O_S2];
768 if (!tree_fits_shwi_p (arg)
769 || tree_to_shwi (arg) < -(HOST_WIDE_INT_1 << (bitwidth - 1))
770 || tree_to_shwi (arg) > ((HOST_WIDE_INT_1 << (bitwidth - 1)) - 1))
772 error("constant argument %d for builtin %qF is out of range ("
773 HOST_WIDE_INT_PRINT_DEC ".."
774 HOST_WIDE_INT_PRINT_DEC ")",
775 argnum, decl,
776 -(HOST_WIDE_INT_1 << (bitwidth - 1)),
777 (HOST_WIDE_INT_1 << (bitwidth - 1)) - 1);
778 return false;
781 return true;
784 /* Expand an expression EXP that calls a built-in function,
785 with result going to TARGET if that's convenient
786 (and in mode MODE if that's convenient).
787 SUBTARGET may be used as the target for computing one of EXP's operands.
788 IGNORE is nonzero if the value is to be ignored. */
790 static rtx
791 s390_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
792 machine_mode mode ATTRIBUTE_UNUSED,
793 int ignore ATTRIBUTE_UNUSED)
795 #define MAX_ARGS 6
797 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
798 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
799 enum insn_code icode;
800 rtx op[MAX_ARGS], pat;
801 int arity;
802 bool nonvoid;
803 tree arg;
804 call_expr_arg_iterator iter;
805 unsigned int all_op_flags = opflags_for_builtin (fcode);
806 machine_mode last_vec_mode = VOIDmode;
808 if (TARGET_DEBUG_ARG)
810 fprintf (stderr,
811 "s390_expand_builtin, code = %4d, %s, bflags = 0x%x\n",
812 (int)fcode, IDENTIFIER_POINTER (DECL_NAME (fndecl)),
813 bflags_for_builtin (fcode));
816 if (S390_USE_TARGET_ATTRIBUTE)
818 unsigned int bflags;
820 bflags = bflags_for_builtin (fcode);
821 if ((bflags & B_HTM) && !TARGET_HTM)
823 error ("builtin %qF is not supported without -mhtm "
824 "(default with -march=zEC12 and higher).", fndecl);
825 return const0_rtx;
827 if ((bflags & B_VX) && !TARGET_VX)
829 error ("builtin %qF is not supported without -mvx "
830 "(default with -march=z13 and higher).", fndecl);
831 return const0_rtx;
834 if (fcode >= S390_OVERLOADED_BUILTIN_VAR_OFFSET
835 && fcode < S390_ALL_BUILTIN_MAX)
837 gcc_unreachable ();
839 else if (fcode < S390_OVERLOADED_BUILTIN_OFFSET)
841 icode = code_for_builtin[fcode];
842 /* Set a flag in the machine specific cfun part in order to support
843 saving/restoring of FPRs. */
844 if (fcode == S390_BUILTIN_tbegin || fcode == S390_BUILTIN_tbegin_retry)
845 cfun->machine->tbegin_p = true;
847 else if (fcode < S390_OVERLOADED_BUILTIN_VAR_OFFSET)
849 error ("unresolved overloaded builtin");
850 return const0_rtx;
852 else
853 internal_error ("bad builtin fcode");
855 if (icode == 0)
856 internal_error ("bad builtin icode");
858 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
860 if (nonvoid)
862 machine_mode tmode = insn_data[icode].operand[0].mode;
863 if (!target
864 || GET_MODE (target) != tmode
865 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
866 target = gen_reg_rtx (tmode);
868 /* There are builtins (e.g. vec_promote) with no vector
869 arguments but an element selector. So we have to also look
870 at the vector return type when emitting the modulo
871 operation. */
872 if (VECTOR_MODE_P (insn_data[icode].operand[0].mode))
873 last_vec_mode = insn_data[icode].operand[0].mode;
876 arity = 0;
877 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
879 rtx tmp_rtx;
880 const struct insn_operand_data *insn_op;
881 unsigned int op_flags = all_op_flags & ((1 << O_SHIFT) - 1);
883 all_op_flags = all_op_flags >> O_SHIFT;
885 if (arg == error_mark_node)
886 return NULL_RTX;
887 if (arity >= MAX_ARGS)
888 return NULL_RTX;
890 if (O_IMM_P (op_flags)
891 && TREE_CODE (arg) != INTEGER_CST)
893 error ("constant value required for builtin %qF argument %d",
894 fndecl, arity + 1);
895 return const0_rtx;
898 if (!s390_const_operand_ok (arg, arity + 1, op_flags, fndecl))
899 return const0_rtx;
901 insn_op = &insn_data[icode].operand[arity + nonvoid];
902 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
904 /* expand_expr truncates constants to the target mode only if it
905 is "convenient". However, our checks below rely on this
906 being done. */
907 if (CONST_INT_P (op[arity])
908 && SCALAR_INT_MODE_P (insn_op->mode)
909 && GET_MODE (op[arity]) != insn_op->mode)
910 op[arity] = GEN_INT (trunc_int_for_mode (INTVAL (op[arity]),
911 insn_op->mode));
913 /* Wrap the expanded RTX for pointer types into a MEM expr with
914 the proper mode. This allows us to use e.g. (match_operand
915 "memory_operand"..) in the insn patterns instead of (mem
916 (match_operand "address_operand)). This is helpful for
917 patterns not just accepting MEMs. */
918 if (POINTER_TYPE_P (TREE_TYPE (arg))
919 && insn_op->predicate != address_operand)
920 op[arity] = gen_rtx_MEM (insn_op->mode, op[arity]);
922 /* Expand the module operation required on element selectors. */
923 if (op_flags == O_ELEM)
925 gcc_assert (last_vec_mode != VOIDmode);
926 op[arity] = simplify_expand_binop (SImode, code_to_optab (AND),
927 op[arity],
928 GEN_INT (GET_MODE_NUNITS (last_vec_mode) - 1),
929 NULL_RTX, 1, OPTAB_DIRECT);
932 /* Record the vector mode used for an element selector. This assumes:
933 1. There is no builtin with two different vector modes and an element selector
934 2. The element selector comes after the vector type it is referring to.
935 This currently the true for all the builtins but FIXME we
936 should better check for that. */
937 if (VECTOR_MODE_P (insn_op->mode))
938 last_vec_mode = insn_op->mode;
940 if (insn_op->predicate (op[arity], insn_op->mode))
942 arity++;
943 continue;
946 if (MEM_P (op[arity])
947 && insn_op->predicate == memory_operand
948 && (GET_MODE (XEXP (op[arity], 0)) == Pmode
949 || GET_MODE (XEXP (op[arity], 0)) == VOIDmode))
951 op[arity] = replace_equiv_address (op[arity],
952 copy_to_mode_reg (Pmode,
953 XEXP (op[arity], 0)));
955 /* Some of the builtins require different modes/types than the
956 pattern in order to implement a specific API. Instead of
957 adding many expanders which do the mode change we do it here.
958 E.g. s390_vec_add_u128 required to have vector unsigned char
959 arguments is mapped to addti3. */
960 else if (insn_op->mode != VOIDmode
961 && GET_MODE (op[arity]) != VOIDmode
962 && GET_MODE (op[arity]) != insn_op->mode
963 && ((tmp_rtx = simplify_gen_subreg (insn_op->mode, op[arity],
964 GET_MODE (op[arity]), 0))
965 != NULL_RTX))
967 op[arity] = tmp_rtx;
969 else if (GET_MODE (op[arity]) == insn_op->mode
970 || GET_MODE (op[arity]) == VOIDmode
971 || (insn_op->predicate == address_operand
972 && GET_MODE (op[arity]) == Pmode))
974 /* An address_operand usually has VOIDmode in the expander
975 so we cannot use this. */
976 machine_mode target_mode =
977 (insn_op->predicate == address_operand
978 ? Pmode : insn_op->mode);
979 op[arity] = copy_to_mode_reg (target_mode, op[arity]);
982 if (!insn_op->predicate (op[arity], insn_op->mode))
984 error ("invalid argument %d for builtin %qF", arity + 1, fndecl);
985 return const0_rtx;
987 arity++;
990 switch (arity)
992 case 0:
993 pat = GEN_FCN (icode) (target);
994 break;
995 case 1:
996 if (nonvoid)
997 pat = GEN_FCN (icode) (target, op[0]);
998 else
999 pat = GEN_FCN (icode) (op[0]);
1000 break;
1001 case 2:
1002 if (nonvoid)
1003 pat = GEN_FCN (icode) (target, op[0], op[1]);
1004 else
1005 pat = GEN_FCN (icode) (op[0], op[1]);
1006 break;
1007 case 3:
1008 if (nonvoid)
1009 pat = GEN_FCN (icode) (target, op[0], op[1], op[2]);
1010 else
1011 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
1012 break;
1013 case 4:
1014 if (nonvoid)
1015 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3]);
1016 else
1017 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
1018 break;
1019 case 5:
1020 if (nonvoid)
1021 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3], op[4]);
1022 else
1023 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4]);
1024 break;
1025 case 6:
1026 if (nonvoid)
1027 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3], op[4], op[5]);
1028 else
1029 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4], op[5]);
1030 break;
1031 default:
1032 gcc_unreachable ();
1034 if (!pat)
1035 return NULL_RTX;
1036 emit_insn (pat);
1038 if (nonvoid)
1039 return target;
1040 else
1041 return const0_rtx;
1045 static const int s390_hotpatch_hw_max = 1000000;
1046 static int s390_hotpatch_hw_before_label = 0;
1047 static int s390_hotpatch_hw_after_label = 0;
1049 /* Check whether the hotpatch attribute is applied to a function and, if it has
1050 an argument, the argument is valid. */
1052 static tree
1053 s390_handle_hotpatch_attribute (tree *node, tree name, tree args,
1054 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
1056 tree expr;
1057 tree expr2;
1058 int err;
1060 if (TREE_CODE (*node) != FUNCTION_DECL)
1062 warning (OPT_Wattributes, "%qE attribute only applies to functions",
1063 name);
1064 *no_add_attrs = true;
1066 if (args != NULL && TREE_CHAIN (args) != NULL)
1068 expr = TREE_VALUE (args);
1069 expr2 = TREE_VALUE (TREE_CHAIN (args));
1071 if (args == NULL || TREE_CHAIN (args) == NULL)
1072 err = 1;
1073 else if (TREE_CODE (expr) != INTEGER_CST
1074 || !INTEGRAL_TYPE_P (TREE_TYPE (expr))
1075 || wi::gtu_p (expr, s390_hotpatch_hw_max))
1076 err = 1;
1077 else if (TREE_CODE (expr2) != INTEGER_CST
1078 || !INTEGRAL_TYPE_P (TREE_TYPE (expr2))
1079 || wi::gtu_p (expr2, s390_hotpatch_hw_max))
1080 err = 1;
1081 else
1082 err = 0;
1083 if (err)
1085 error ("requested %qE attribute is not a comma separated pair of"
1086 " non-negative integer constants or too large (max. %d)", name,
1087 s390_hotpatch_hw_max);
1088 *no_add_attrs = true;
1091 return NULL_TREE;
1094 /* Expand the s390_vector_bool type attribute. */
1096 static tree
1097 s390_handle_vectorbool_attribute (tree *node, tree name ATTRIBUTE_UNUSED,
1098 tree args ATTRIBUTE_UNUSED,
1099 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
1101 tree type = *node, result = NULL_TREE;
1102 machine_mode mode;
1104 while (POINTER_TYPE_P (type)
1105 || TREE_CODE (type) == FUNCTION_TYPE
1106 || TREE_CODE (type) == METHOD_TYPE
1107 || TREE_CODE (type) == ARRAY_TYPE)
1108 type = TREE_TYPE (type);
1110 mode = TYPE_MODE (type);
1111 switch (mode)
1113 case DImode: case V2DImode: result = s390_builtin_types[BT_BV2DI]; break;
1114 case SImode: case V4SImode: result = s390_builtin_types[BT_BV4SI]; break;
1115 case HImode: case V8HImode: result = s390_builtin_types[BT_BV8HI]; break;
1116 case QImode: case V16QImode: result = s390_builtin_types[BT_BV16QI];
1117 default: break;
1120 *no_add_attrs = true; /* No need to hang on to the attribute. */
1122 if (result)
1123 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
1125 return NULL_TREE;
1128 static const struct attribute_spec s390_attribute_table[] = {
1129 { "hotpatch", 2, 2, true, false, false, s390_handle_hotpatch_attribute, false },
1130 { "s390_vector_bool", 0, 0, false, true, false, s390_handle_vectorbool_attribute, true },
1131 /* End element. */
1132 { NULL, 0, 0, false, false, false, NULL, false }
1135 /* Return the alignment for LABEL. We default to the -falign-labels
1136 value except for the literal pool base label. */
1138 s390_label_align (rtx_insn *label)
1140 rtx_insn *prev_insn = prev_active_insn (label);
1141 rtx set, src;
1143 if (prev_insn == NULL_RTX)
1144 goto old;
1146 set = single_set (prev_insn);
1148 if (set == NULL_RTX)
1149 goto old;
1151 src = SET_SRC (set);
1153 /* Don't align literal pool base labels. */
1154 if (GET_CODE (src) == UNSPEC
1155 && XINT (src, 1) == UNSPEC_MAIN_BASE)
1156 return 0;
1158 old:
1159 return align_labels_log;
1162 static machine_mode
1163 s390_libgcc_cmp_return_mode (void)
1165 return TARGET_64BIT ? DImode : SImode;
1168 static machine_mode
1169 s390_libgcc_shift_count_mode (void)
1171 return TARGET_64BIT ? DImode : SImode;
1174 static machine_mode
1175 s390_unwind_word_mode (void)
1177 return TARGET_64BIT ? DImode : SImode;
1180 /* Return true if the back end supports mode MODE. */
1181 static bool
1182 s390_scalar_mode_supported_p (machine_mode mode)
1184 /* In contrast to the default implementation reject TImode constants on 31bit
1185 TARGET_ZARCH for ABI compliance. */
1186 if (!TARGET_64BIT && TARGET_ZARCH && mode == TImode)
1187 return false;
1189 if (DECIMAL_FLOAT_MODE_P (mode))
1190 return default_decimal_float_supported_p ();
1192 return default_scalar_mode_supported_p (mode);
1195 /* Return true if the back end supports vector mode MODE. */
1196 static bool
1197 s390_vector_mode_supported_p (machine_mode mode)
1199 machine_mode inner;
1201 if (!VECTOR_MODE_P (mode)
1202 || !TARGET_VX
1203 || GET_MODE_SIZE (mode) > 16)
1204 return false;
1206 inner = GET_MODE_INNER (mode);
1208 switch (inner)
1210 case QImode:
1211 case HImode:
1212 case SImode:
1213 case DImode:
1214 case TImode:
1215 case SFmode:
1216 case DFmode:
1217 case TFmode:
1218 return true;
1219 default:
1220 return false;
1224 /* Set the has_landing_pad_p flag in struct machine_function to VALUE. */
1226 void
1227 s390_set_has_landing_pad_p (bool value)
1229 cfun->machine->has_landing_pad_p = value;
1232 /* If two condition code modes are compatible, return a condition code
1233 mode which is compatible with both. Otherwise, return
1234 VOIDmode. */
1236 static machine_mode
1237 s390_cc_modes_compatible (machine_mode m1, machine_mode m2)
1239 if (m1 == m2)
1240 return m1;
1242 switch (m1)
1244 case CCZmode:
1245 if (m2 == CCUmode || m2 == CCTmode || m2 == CCZ1mode
1246 || m2 == CCSmode || m2 == CCSRmode || m2 == CCURmode)
1247 return m2;
1248 return VOIDmode;
1250 case CCSmode:
1251 case CCUmode:
1252 case CCTmode:
1253 case CCSRmode:
1254 case CCURmode:
1255 case CCZ1mode:
1256 if (m2 == CCZmode)
1257 return m1;
1259 return VOIDmode;
1261 default:
1262 return VOIDmode;
1264 return VOIDmode;
1267 /* Return true if SET either doesn't set the CC register, or else
1268 the source and destination have matching CC modes and that
1269 CC mode is at least as constrained as REQ_MODE. */
1271 static bool
1272 s390_match_ccmode_set (rtx set, machine_mode req_mode)
1274 machine_mode set_mode;
1276 gcc_assert (GET_CODE (set) == SET);
1278 /* These modes are supposed to be used only in CC consumer
1279 patterns. */
1280 gcc_assert (req_mode != CCVIALLmode && req_mode != CCVIANYmode
1281 && req_mode != CCVFALLmode && req_mode != CCVFANYmode);
1283 if (GET_CODE (SET_DEST (set)) != REG || !CC_REGNO_P (REGNO (SET_DEST (set))))
1284 return 1;
1286 set_mode = GET_MODE (SET_DEST (set));
1287 switch (set_mode)
1289 case CCSmode:
1290 case CCSRmode:
1291 case CCUmode:
1292 case CCURmode:
1293 case CCLmode:
1294 case CCL1mode:
1295 case CCL2mode:
1296 case CCL3mode:
1297 case CCT1mode:
1298 case CCT2mode:
1299 case CCT3mode:
1300 case CCVEQmode:
1301 case CCVIHmode:
1302 case CCVIHUmode:
1303 case CCVFHmode:
1304 case CCVFHEmode:
1305 if (req_mode != set_mode)
1306 return 0;
1307 break;
1309 case CCZmode:
1310 if (req_mode != CCSmode && req_mode != CCUmode && req_mode != CCTmode
1311 && req_mode != CCSRmode && req_mode != CCURmode)
1312 return 0;
1313 break;
1315 case CCAPmode:
1316 case CCANmode:
1317 if (req_mode != CCAmode)
1318 return 0;
1319 break;
1321 default:
1322 gcc_unreachable ();
1325 return (GET_MODE (SET_SRC (set)) == set_mode);
1328 /* Return true if every SET in INSN that sets the CC register
1329 has source and destination with matching CC modes and that
1330 CC mode is at least as constrained as REQ_MODE.
1331 If REQ_MODE is VOIDmode, always return false. */
1333 bool
1334 s390_match_ccmode (rtx_insn *insn, machine_mode req_mode)
1336 int i;
1338 /* s390_tm_ccmode returns VOIDmode to indicate failure. */
1339 if (req_mode == VOIDmode)
1340 return false;
1342 if (GET_CODE (PATTERN (insn)) == SET)
1343 return s390_match_ccmode_set (PATTERN (insn), req_mode);
1345 if (GET_CODE (PATTERN (insn)) == PARALLEL)
1346 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1348 rtx set = XVECEXP (PATTERN (insn), 0, i);
1349 if (GET_CODE (set) == SET)
1350 if (!s390_match_ccmode_set (set, req_mode))
1351 return false;
1354 return true;
1357 /* If a test-under-mask instruction can be used to implement
1358 (compare (and ... OP1) OP2), return the CC mode required
1359 to do that. Otherwise, return VOIDmode.
1360 MIXED is true if the instruction can distinguish between
1361 CC1 and CC2 for mixed selected bits (TMxx), it is false
1362 if the instruction cannot (TM). */
1364 machine_mode
1365 s390_tm_ccmode (rtx op1, rtx op2, bool mixed)
1367 int bit0, bit1;
1369 /* ??? Fixme: should work on CONST_WIDE_INT as well. */
1370 if (GET_CODE (op1) != CONST_INT || GET_CODE (op2) != CONST_INT)
1371 return VOIDmode;
1373 /* Selected bits all zero: CC0.
1374 e.g.: int a; if ((a & (16 + 128)) == 0) */
1375 if (INTVAL (op2) == 0)
1376 return CCTmode;
1378 /* Selected bits all one: CC3.
1379 e.g.: int a; if ((a & (16 + 128)) == 16 + 128) */
1380 if (INTVAL (op2) == INTVAL (op1))
1381 return CCT3mode;
1383 /* Exactly two bits selected, mixed zeroes and ones: CC1 or CC2. e.g.:
1384 int a;
1385 if ((a & (16 + 128)) == 16) -> CCT1
1386 if ((a & (16 + 128)) == 128) -> CCT2 */
1387 if (mixed)
1389 bit1 = exact_log2 (INTVAL (op2));
1390 bit0 = exact_log2 (INTVAL (op1) ^ INTVAL (op2));
1391 if (bit0 != -1 && bit1 != -1)
1392 return bit0 > bit1 ? CCT1mode : CCT2mode;
1395 return VOIDmode;
1398 /* Given a comparison code OP (EQ, NE, etc.) and the operands
1399 OP0 and OP1 of a COMPARE, return the mode to be used for the
1400 comparison. */
1402 machine_mode
1403 s390_select_ccmode (enum rtx_code code, rtx op0, rtx op1)
1405 if (TARGET_VX
1406 && register_operand (op0, DFmode)
1407 && register_operand (op1, DFmode))
1409 /* LT, LE, UNGT, UNGE require swapping OP0 and OP1. Either
1410 s390_emit_compare or s390_canonicalize_comparison will take
1411 care of it. */
1412 switch (code)
1414 case EQ:
1415 case NE:
1416 return CCVEQmode;
1417 case GT:
1418 case UNLE:
1419 return CCVFHmode;
1420 case GE:
1421 case UNLT:
1422 return CCVFHEmode;
1423 default:
1428 switch (code)
1430 case EQ:
1431 case NE:
1432 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
1433 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1434 return CCAPmode;
1435 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
1436 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
1437 return CCAPmode;
1438 if ((GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1439 || GET_CODE (op1) == NEG)
1440 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1441 return CCLmode;
1443 if (GET_CODE (op0) == AND)
1445 /* Check whether we can potentially do it via TM. */
1446 machine_mode ccmode;
1447 ccmode = s390_tm_ccmode (XEXP (op0, 1), op1, 1);
1448 if (ccmode != VOIDmode)
1450 /* Relax CCTmode to CCZmode to allow fall-back to AND
1451 if that turns out to be beneficial. */
1452 return ccmode == CCTmode ? CCZmode : ccmode;
1456 if (register_operand (op0, HImode)
1457 && GET_CODE (op1) == CONST_INT
1458 && (INTVAL (op1) == -1 || INTVAL (op1) == 65535))
1459 return CCT3mode;
1460 if (register_operand (op0, QImode)
1461 && GET_CODE (op1) == CONST_INT
1462 && (INTVAL (op1) == -1 || INTVAL (op1) == 255))
1463 return CCT3mode;
1465 return CCZmode;
1467 case LE:
1468 case LT:
1469 case GE:
1470 case GT:
1471 /* The only overflow condition of NEG and ABS happens when
1472 -INT_MAX is used as parameter, which stays negative. So
1473 we have an overflow from a positive value to a negative.
1474 Using CCAP mode the resulting cc can be used for comparisons. */
1475 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
1476 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1477 return CCAPmode;
1479 /* If constants are involved in an add instruction it is possible to use
1480 the resulting cc for comparisons with zero. Knowing the sign of the
1481 constant the overflow behavior gets predictable. e.g.:
1482 int a, b; if ((b = a + c) > 0)
1483 with c as a constant value: c < 0 -> CCAN and c >= 0 -> CCAP */
1484 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
1485 && (CONST_OK_FOR_K (INTVAL (XEXP (op0, 1)))
1486 || (CONST_OK_FOR_CONSTRAINT_P (INTVAL (XEXP (op0, 1)), 'O', "Os")
1487 /* Avoid INT32_MIN on 32 bit. */
1488 && (!TARGET_ZARCH || INTVAL (XEXP (op0, 1)) != -0x7fffffff - 1))))
1490 if (INTVAL (XEXP((op0), 1)) < 0)
1491 return CCANmode;
1492 else
1493 return CCAPmode;
1495 /* Fall through. */
1496 case UNORDERED:
1497 case ORDERED:
1498 case UNEQ:
1499 case UNLE:
1500 case UNLT:
1501 case UNGE:
1502 case UNGT:
1503 case LTGT:
1504 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1505 && GET_CODE (op1) != CONST_INT)
1506 return CCSRmode;
1507 return CCSmode;
1509 case LTU:
1510 case GEU:
1511 if (GET_CODE (op0) == PLUS
1512 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1513 return CCL1mode;
1515 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1516 && GET_CODE (op1) != CONST_INT)
1517 return CCURmode;
1518 return CCUmode;
1520 case LEU:
1521 case GTU:
1522 if (GET_CODE (op0) == MINUS
1523 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1524 return CCL2mode;
1526 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1527 && GET_CODE (op1) != CONST_INT)
1528 return CCURmode;
1529 return CCUmode;
1531 default:
1532 gcc_unreachable ();
1536 /* Replace the comparison OP0 CODE OP1 by a semantically equivalent one
1537 that we can implement more efficiently. */
1539 static void
1540 s390_canonicalize_comparison (int *code, rtx *op0, rtx *op1,
1541 bool op0_preserve_value)
1543 if (op0_preserve_value)
1544 return;
1546 /* Convert ZERO_EXTRACT back to AND to enable TM patterns. */
1547 if ((*code == EQ || *code == NE)
1548 && *op1 == const0_rtx
1549 && GET_CODE (*op0) == ZERO_EXTRACT
1550 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
1551 && GET_CODE (XEXP (*op0, 2)) == CONST_INT
1552 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
1554 rtx inner = XEXP (*op0, 0);
1555 HOST_WIDE_INT modesize = GET_MODE_BITSIZE (GET_MODE (inner));
1556 HOST_WIDE_INT len = INTVAL (XEXP (*op0, 1));
1557 HOST_WIDE_INT pos = INTVAL (XEXP (*op0, 2));
1559 if (len > 0 && len < modesize
1560 && pos >= 0 && pos + len <= modesize
1561 && modesize <= HOST_BITS_PER_WIDE_INT)
1563 unsigned HOST_WIDE_INT block;
1564 block = (HOST_WIDE_INT_1U << len) - 1;
1565 block <<= modesize - pos - len;
1567 *op0 = gen_rtx_AND (GET_MODE (inner), inner,
1568 gen_int_mode (block, GET_MODE (inner)));
1572 /* Narrow AND of memory against immediate to enable TM. */
1573 if ((*code == EQ || *code == NE)
1574 && *op1 == const0_rtx
1575 && GET_CODE (*op0) == AND
1576 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
1577 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
1579 rtx inner = XEXP (*op0, 0);
1580 rtx mask = XEXP (*op0, 1);
1582 /* Ignore paradoxical SUBREGs if all extra bits are masked out. */
1583 if (GET_CODE (inner) == SUBREG
1584 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (inner)))
1585 && (GET_MODE_SIZE (GET_MODE (inner))
1586 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
1587 && ((INTVAL (mask)
1588 & GET_MODE_MASK (GET_MODE (inner))
1589 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (inner))))
1590 == 0))
1591 inner = SUBREG_REG (inner);
1593 /* Do not change volatile MEMs. */
1594 if (MEM_P (inner) && !MEM_VOLATILE_P (inner))
1596 int part = s390_single_part (XEXP (*op0, 1),
1597 GET_MODE (inner), QImode, 0);
1598 if (part >= 0)
1600 mask = gen_int_mode (s390_extract_part (mask, QImode, 0), QImode);
1601 inner = adjust_address_nv (inner, QImode, part);
1602 *op0 = gen_rtx_AND (QImode, inner, mask);
1607 /* Narrow comparisons against 0xffff to HImode if possible. */
1608 if ((*code == EQ || *code == NE)
1609 && GET_CODE (*op1) == CONST_INT
1610 && INTVAL (*op1) == 0xffff
1611 && SCALAR_INT_MODE_P (GET_MODE (*op0))
1612 && (nonzero_bits (*op0, GET_MODE (*op0))
1613 & ~HOST_WIDE_INT_UC (0xffff)) == 0)
1615 *op0 = gen_lowpart (HImode, *op0);
1616 *op1 = constm1_rtx;
1619 /* Remove redundant UNSPEC_STRCMPCC_TO_INT conversions if possible. */
1620 if (GET_CODE (*op0) == UNSPEC
1621 && XINT (*op0, 1) == UNSPEC_STRCMPCC_TO_INT
1622 && XVECLEN (*op0, 0) == 1
1623 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCUmode
1624 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
1625 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
1626 && *op1 == const0_rtx)
1628 enum rtx_code new_code = UNKNOWN;
1629 switch (*code)
1631 case EQ: new_code = EQ; break;
1632 case NE: new_code = NE; break;
1633 case LT: new_code = GTU; break;
1634 case GT: new_code = LTU; break;
1635 case LE: new_code = GEU; break;
1636 case GE: new_code = LEU; break;
1637 default: break;
1640 if (new_code != UNKNOWN)
1642 *op0 = XVECEXP (*op0, 0, 0);
1643 *code = new_code;
1647 /* Remove redundant UNSPEC_CC_TO_INT conversions if possible. */
1648 if (GET_CODE (*op0) == UNSPEC
1649 && XINT (*op0, 1) == UNSPEC_CC_TO_INT
1650 && XVECLEN (*op0, 0) == 1
1651 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
1652 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
1653 && CONST_INT_P (*op1))
1655 enum rtx_code new_code = UNKNOWN;
1656 switch (GET_MODE (XVECEXP (*op0, 0, 0)))
1658 case CCZmode:
1659 case CCRAWmode:
1660 switch (*code)
1662 case EQ: new_code = EQ; break;
1663 case NE: new_code = NE; break;
1664 default: break;
1666 break;
1667 default: break;
1670 if (new_code != UNKNOWN)
1672 /* For CCRAWmode put the required cc mask into the second
1673 operand. */
1674 if (GET_MODE (XVECEXP (*op0, 0, 0)) == CCRAWmode
1675 && INTVAL (*op1) >= 0 && INTVAL (*op1) <= 3)
1676 *op1 = gen_rtx_CONST_INT (VOIDmode, 1 << (3 - INTVAL (*op1)));
1677 *op0 = XVECEXP (*op0, 0, 0);
1678 *code = new_code;
1682 /* Simplify cascaded EQ, NE with const0_rtx. */
1683 if ((*code == NE || *code == EQ)
1684 && (GET_CODE (*op0) == EQ || GET_CODE (*op0) == NE)
1685 && GET_MODE (*op0) == SImode
1686 && GET_MODE (XEXP (*op0, 0)) == CCZ1mode
1687 && REG_P (XEXP (*op0, 0))
1688 && XEXP (*op0, 1) == const0_rtx
1689 && *op1 == const0_rtx)
1691 if ((*code == EQ && GET_CODE (*op0) == NE)
1692 || (*code == NE && GET_CODE (*op0) == EQ))
1693 *code = EQ;
1694 else
1695 *code = NE;
1696 *op0 = XEXP (*op0, 0);
1699 /* Prefer register over memory as first operand. */
1700 if (MEM_P (*op0) && REG_P (*op1))
1702 rtx tem = *op0; *op0 = *op1; *op1 = tem;
1703 *code = (int)swap_condition ((enum rtx_code)*code);
1706 /* Using the scalar variants of vector instructions for 64 bit FP
1707 comparisons might require swapping the operands. */
1708 if (TARGET_VX
1709 && register_operand (*op0, DFmode)
1710 && register_operand (*op1, DFmode)
1711 && (*code == LT || *code == LE || *code == UNGT || *code == UNGE))
1713 rtx tmp;
1715 switch (*code)
1717 case LT: *code = GT; break;
1718 case LE: *code = GE; break;
1719 case UNGT: *code = UNLE; break;
1720 case UNGE: *code = UNLT; break;
1721 default: ;
1723 tmp = *op0; *op0 = *op1; *op1 = tmp;
1726 /* A comparison result is compared against zero. Replace it with
1727 the (perhaps inverted) original comparison.
1728 This probably should be done by simplify_relational_operation. */
1729 if ((*code == EQ || *code == NE)
1730 && *op1 == const0_rtx
1731 && COMPARISON_P (*op0)
1732 && CC_REG_P (XEXP (*op0, 0)))
1734 enum rtx_code new_code;
1736 if (*code == EQ)
1737 new_code = reversed_comparison_code_parts (GET_CODE (*op0),
1738 XEXP (*op0, 0),
1739 XEXP (*op1, 0), NULL);
1740 else
1741 new_code = GET_CODE (*op0);
1743 if (new_code != UNKNOWN)
1745 *code = new_code;
1746 *op1 = XEXP (*op0, 1);
1747 *op0 = XEXP (*op0, 0);
1752 /* Helper function for s390_emit_compare. If possible emit a 64 bit
1753 FP compare using the single element variant of vector instructions.
1754 Replace CODE with the comparison code to be used in the CC reg
1755 compare and return the condition code register RTX in CC. */
1757 static bool
1758 s390_expand_vec_compare_scalar (enum rtx_code *code, rtx cmp1, rtx cmp2,
1759 rtx *cc)
1761 machine_mode cmp_mode;
1762 bool swap_p = false;
1764 switch (*code)
1766 case EQ: cmp_mode = CCVEQmode; break;
1767 case NE: cmp_mode = CCVEQmode; break;
1768 case GT: cmp_mode = CCVFHmode; break;
1769 case GE: cmp_mode = CCVFHEmode; break;
1770 case UNLE: cmp_mode = CCVFHmode; break;
1771 case UNLT: cmp_mode = CCVFHEmode; break;
1772 case LT: cmp_mode = CCVFHmode; *code = GT; swap_p = true; break;
1773 case LE: cmp_mode = CCVFHEmode; *code = GE; swap_p = true; break;
1774 case UNGE: cmp_mode = CCVFHmode; *code = UNLE; swap_p = true; break;
1775 case UNGT: cmp_mode = CCVFHEmode; *code = UNLT; swap_p = true; break;
1776 default: return false;
1779 if (swap_p)
1781 rtx tmp = cmp2;
1782 cmp2 = cmp1;
1783 cmp1 = tmp;
1786 emit_insn (gen_rtx_PARALLEL (VOIDmode,
1787 gen_rtvec (2,
1788 gen_rtx_SET (gen_rtx_REG (cmp_mode, CC_REGNUM),
1789 gen_rtx_COMPARE (cmp_mode, cmp1,
1790 cmp2)),
1791 gen_rtx_CLOBBER (VOIDmode,
1792 gen_rtx_SCRATCH (V2DImode)))));
1794 /* This is the cc reg how it will be used in the cc mode consumer.
1795 It either needs to be CCVFALL or CCVFANY. However, CC1 will
1796 never be set by the scalar variants. So it actually doesn't
1797 matter which one we choose here. */
1798 *cc = gen_rtx_REG (CCVFALLmode, CC_REGNUM);
1799 return true;
1803 /* Emit a compare instruction suitable to implement the comparison
1804 OP0 CODE OP1. Return the correct condition RTL to be placed in
1805 the IF_THEN_ELSE of the conditional branch testing the result. */
1808 s390_emit_compare (enum rtx_code code, rtx op0, rtx op1)
1810 machine_mode mode = s390_select_ccmode (code, op0, op1);
1811 rtx cc;
1813 if (TARGET_VX
1814 && register_operand (op0, DFmode)
1815 && register_operand (op1, DFmode)
1816 && s390_expand_vec_compare_scalar (&code, op0, op1, &cc))
1818 /* Work has been done by s390_expand_vec_compare_scalar already. */
1820 else if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
1822 /* Do not output a redundant compare instruction if a
1823 compare_and_swap pattern already computed the result and the
1824 machine modes are compatible. */
1825 gcc_assert (s390_cc_modes_compatible (GET_MODE (op0), mode)
1826 == GET_MODE (op0));
1827 cc = op0;
1829 else
1831 cc = gen_rtx_REG (mode, CC_REGNUM);
1832 emit_insn (gen_rtx_SET (cc, gen_rtx_COMPARE (mode, op0, op1)));
1835 return gen_rtx_fmt_ee (code, VOIDmode, cc, const0_rtx);
1838 /* Emit a SImode compare and swap instruction setting MEM to NEW_RTX if OLD
1839 matches CMP.
1840 Return the correct condition RTL to be placed in the IF_THEN_ELSE of the
1841 conditional branch testing the result. */
1843 static rtx
1844 s390_emit_compare_and_swap (enum rtx_code code, rtx old, rtx mem,
1845 rtx cmp, rtx new_rtx)
1847 emit_insn (gen_atomic_compare_and_swapsi_internal (old, mem, cmp, new_rtx));
1848 return s390_emit_compare (code, gen_rtx_REG (CCZ1mode, CC_REGNUM),
1849 const0_rtx);
1852 /* Emit a jump instruction to TARGET and return it. If COND is
1853 NULL_RTX, emit an unconditional jump, else a conditional jump under
1854 condition COND. */
1856 rtx_insn *
1857 s390_emit_jump (rtx target, rtx cond)
1859 rtx insn;
1861 target = gen_rtx_LABEL_REF (VOIDmode, target);
1862 if (cond)
1863 target = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, target, pc_rtx);
1865 insn = gen_rtx_SET (pc_rtx, target);
1866 return emit_jump_insn (insn);
1869 /* Return branch condition mask to implement a branch
1870 specified by CODE. Return -1 for invalid comparisons. */
1873 s390_branch_condition_mask (rtx code)
1875 const int CC0 = 1 << 3;
1876 const int CC1 = 1 << 2;
1877 const int CC2 = 1 << 1;
1878 const int CC3 = 1 << 0;
1880 gcc_assert (GET_CODE (XEXP (code, 0)) == REG);
1881 gcc_assert (REGNO (XEXP (code, 0)) == CC_REGNUM);
1882 gcc_assert (XEXP (code, 1) == const0_rtx
1883 || (GET_MODE (XEXP (code, 0)) == CCRAWmode
1884 && CONST_INT_P (XEXP (code, 1))));
1887 switch (GET_MODE (XEXP (code, 0)))
1889 case CCZmode:
1890 case CCZ1mode:
1891 switch (GET_CODE (code))
1893 case EQ: return CC0;
1894 case NE: return CC1 | CC2 | CC3;
1895 default: return -1;
1897 break;
1899 case CCT1mode:
1900 switch (GET_CODE (code))
1902 case EQ: return CC1;
1903 case NE: return CC0 | CC2 | CC3;
1904 default: return -1;
1906 break;
1908 case CCT2mode:
1909 switch (GET_CODE (code))
1911 case EQ: return CC2;
1912 case NE: return CC0 | CC1 | CC3;
1913 default: return -1;
1915 break;
1917 case CCT3mode:
1918 switch (GET_CODE (code))
1920 case EQ: return CC3;
1921 case NE: return CC0 | CC1 | CC2;
1922 default: return -1;
1924 break;
1926 case CCLmode:
1927 switch (GET_CODE (code))
1929 case EQ: return CC0 | CC2;
1930 case NE: return CC1 | CC3;
1931 default: return -1;
1933 break;
1935 case CCL1mode:
1936 switch (GET_CODE (code))
1938 case LTU: return CC2 | CC3; /* carry */
1939 case GEU: return CC0 | CC1; /* no carry */
1940 default: return -1;
1942 break;
1944 case CCL2mode:
1945 switch (GET_CODE (code))
1947 case GTU: return CC0 | CC1; /* borrow */
1948 case LEU: return CC2 | CC3; /* no borrow */
1949 default: return -1;
1951 break;
1953 case CCL3mode:
1954 switch (GET_CODE (code))
1956 case EQ: return CC0 | CC2;
1957 case NE: return CC1 | CC3;
1958 case LTU: return CC1;
1959 case GTU: return CC3;
1960 case LEU: return CC1 | CC2;
1961 case GEU: return CC2 | CC3;
1962 default: return -1;
1965 case CCUmode:
1966 switch (GET_CODE (code))
1968 case EQ: return CC0;
1969 case NE: return CC1 | CC2 | CC3;
1970 case LTU: return CC1;
1971 case GTU: return CC2;
1972 case LEU: return CC0 | CC1;
1973 case GEU: return CC0 | CC2;
1974 default: return -1;
1976 break;
1978 case CCURmode:
1979 switch (GET_CODE (code))
1981 case EQ: return CC0;
1982 case NE: return CC2 | CC1 | CC3;
1983 case LTU: return CC2;
1984 case GTU: return CC1;
1985 case LEU: return CC0 | CC2;
1986 case GEU: return CC0 | CC1;
1987 default: return -1;
1989 break;
1991 case CCAPmode:
1992 switch (GET_CODE (code))
1994 case EQ: return CC0;
1995 case NE: return CC1 | CC2 | CC3;
1996 case LT: return CC1 | CC3;
1997 case GT: return CC2;
1998 case LE: return CC0 | CC1 | CC3;
1999 case GE: return CC0 | CC2;
2000 default: return -1;
2002 break;
2004 case CCANmode:
2005 switch (GET_CODE (code))
2007 case EQ: return CC0;
2008 case NE: return CC1 | CC2 | CC3;
2009 case LT: return CC1;
2010 case GT: return CC2 | CC3;
2011 case LE: return CC0 | CC1;
2012 case GE: return CC0 | CC2 | CC3;
2013 default: return -1;
2015 break;
2017 case CCSmode:
2018 switch (GET_CODE (code))
2020 case EQ: return CC0;
2021 case NE: return CC1 | CC2 | CC3;
2022 case LT: return CC1;
2023 case GT: return CC2;
2024 case LE: return CC0 | CC1;
2025 case GE: return CC0 | CC2;
2026 case UNORDERED: return CC3;
2027 case ORDERED: return CC0 | CC1 | CC2;
2028 case UNEQ: return CC0 | CC3;
2029 case UNLT: return CC1 | CC3;
2030 case UNGT: return CC2 | CC3;
2031 case UNLE: return CC0 | CC1 | CC3;
2032 case UNGE: return CC0 | CC2 | CC3;
2033 case LTGT: return CC1 | CC2;
2034 default: return -1;
2036 break;
2038 case CCSRmode:
2039 switch (GET_CODE (code))
2041 case EQ: return CC0;
2042 case NE: return CC2 | CC1 | CC3;
2043 case LT: return CC2;
2044 case GT: return CC1;
2045 case LE: return CC0 | CC2;
2046 case GE: return CC0 | CC1;
2047 case UNORDERED: return CC3;
2048 case ORDERED: return CC0 | CC2 | CC1;
2049 case UNEQ: return CC0 | CC3;
2050 case UNLT: return CC2 | CC3;
2051 case UNGT: return CC1 | CC3;
2052 case UNLE: return CC0 | CC2 | CC3;
2053 case UNGE: return CC0 | CC1 | CC3;
2054 case LTGT: return CC2 | CC1;
2055 default: return -1;
2057 break;
2059 /* Vector comparison modes. */
2060 /* CC2 will never be set. It however is part of the negated
2061 masks. */
2062 case CCVIALLmode:
2063 switch (GET_CODE (code))
2065 case EQ:
2066 case GTU:
2067 case GT:
2068 case GE: return CC0;
2069 /* The inverted modes are in fact *any* modes. */
2070 case NE:
2071 case LEU:
2072 case LE:
2073 case LT: return CC3 | CC1 | CC2;
2074 default: return -1;
2077 case CCVIANYmode:
2078 switch (GET_CODE (code))
2080 case EQ:
2081 case GTU:
2082 case GT:
2083 case GE: return CC0 | CC1;
2084 /* The inverted modes are in fact *all* modes. */
2085 case NE:
2086 case LEU:
2087 case LE:
2088 case LT: return CC3 | CC2;
2089 default: return -1;
2091 case CCVFALLmode:
2092 switch (GET_CODE (code))
2094 case EQ:
2095 case GT:
2096 case GE: return CC0;
2097 /* The inverted modes are in fact *any* modes. */
2098 case NE:
2099 case UNLE:
2100 case UNLT: return CC3 | CC1 | CC2;
2101 default: return -1;
2104 case CCVFANYmode:
2105 switch (GET_CODE (code))
2107 case EQ:
2108 case GT:
2109 case GE: return CC0 | CC1;
2110 /* The inverted modes are in fact *all* modes. */
2111 case NE:
2112 case UNLE:
2113 case UNLT: return CC3 | CC2;
2114 default: return -1;
2117 case CCRAWmode:
2118 switch (GET_CODE (code))
2120 case EQ:
2121 return INTVAL (XEXP (code, 1));
2122 case NE:
2123 return (INTVAL (XEXP (code, 1))) ^ 0xf;
2124 default:
2125 gcc_unreachable ();
2128 default:
2129 return -1;
2134 /* Return branch condition mask to implement a compare and branch
2135 specified by CODE. Return -1 for invalid comparisons. */
2138 s390_compare_and_branch_condition_mask (rtx code)
2140 const int CC0 = 1 << 3;
2141 const int CC1 = 1 << 2;
2142 const int CC2 = 1 << 1;
2144 switch (GET_CODE (code))
2146 case EQ:
2147 return CC0;
2148 case NE:
2149 return CC1 | CC2;
2150 case LT:
2151 case LTU:
2152 return CC1;
2153 case GT:
2154 case GTU:
2155 return CC2;
2156 case LE:
2157 case LEU:
2158 return CC0 | CC1;
2159 case GE:
2160 case GEU:
2161 return CC0 | CC2;
2162 default:
2163 gcc_unreachable ();
2165 return -1;
2168 /* If INV is false, return assembler mnemonic string to implement
2169 a branch specified by CODE. If INV is true, return mnemonic
2170 for the corresponding inverted branch. */
2172 static const char *
2173 s390_branch_condition_mnemonic (rtx code, int inv)
2175 int mask;
2177 static const char *const mnemonic[16] =
2179 NULL, "o", "h", "nle",
2180 "l", "nhe", "lh", "ne",
2181 "e", "nlh", "he", "nl",
2182 "le", "nh", "no", NULL
2185 if (GET_CODE (XEXP (code, 0)) == REG
2186 && REGNO (XEXP (code, 0)) == CC_REGNUM
2187 && (XEXP (code, 1) == const0_rtx
2188 || (GET_MODE (XEXP (code, 0)) == CCRAWmode
2189 && CONST_INT_P (XEXP (code, 1)))))
2190 mask = s390_branch_condition_mask (code);
2191 else
2192 mask = s390_compare_and_branch_condition_mask (code);
2194 gcc_assert (mask >= 0);
2196 if (inv)
2197 mask ^= 15;
2199 gcc_assert (mask >= 1 && mask <= 14);
2201 return mnemonic[mask];
2204 /* Return the part of op which has a value different from def.
2205 The size of the part is determined by mode.
2206 Use this function only if you already know that op really
2207 contains such a part. */
2209 unsigned HOST_WIDE_INT
2210 s390_extract_part (rtx op, machine_mode mode, int def)
2212 unsigned HOST_WIDE_INT value = 0;
2213 int max_parts = HOST_BITS_PER_WIDE_INT / GET_MODE_BITSIZE (mode);
2214 int part_bits = GET_MODE_BITSIZE (mode);
2215 unsigned HOST_WIDE_INT part_mask = (HOST_WIDE_INT_1U << part_bits) - 1;
2216 int i;
2218 for (i = 0; i < max_parts; i++)
2220 if (i == 0)
2221 value = UINTVAL (op);
2222 else
2223 value >>= part_bits;
2225 if ((value & part_mask) != (def & part_mask))
2226 return value & part_mask;
2229 gcc_unreachable ();
2232 /* If OP is an integer constant of mode MODE with exactly one
2233 part of mode PART_MODE unequal to DEF, return the number of that
2234 part. Otherwise, return -1. */
2237 s390_single_part (rtx op,
2238 machine_mode mode,
2239 machine_mode part_mode,
2240 int def)
2242 unsigned HOST_WIDE_INT value = 0;
2243 int n_parts = GET_MODE_SIZE (mode) / GET_MODE_SIZE (part_mode);
2244 unsigned HOST_WIDE_INT part_mask
2245 = (HOST_WIDE_INT_1U << GET_MODE_BITSIZE (part_mode)) - 1;
2246 int i, part = -1;
2248 if (GET_CODE (op) != CONST_INT)
2249 return -1;
2251 for (i = 0; i < n_parts; i++)
2253 if (i == 0)
2254 value = UINTVAL (op);
2255 else
2256 value >>= GET_MODE_BITSIZE (part_mode);
2258 if ((value & part_mask) != (def & part_mask))
2260 if (part != -1)
2261 return -1;
2262 else
2263 part = i;
2266 return part == -1 ? -1 : n_parts - 1 - part;
2269 /* Return true if IN contains a contiguous bitfield in the lower SIZE
2270 bits and no other bits are set in (the lower SIZE bits of) IN.
2272 PSTART and PEND can be used to obtain the start and end
2273 position (inclusive) of the bitfield relative to 64
2274 bits. *PSTART / *PEND gives the position of the first/last bit
2275 of the bitfield counting from the highest order bit starting
2276 with zero. */
2278 bool
2279 s390_contiguous_bitmask_nowrap_p (unsigned HOST_WIDE_INT in, int size,
2280 int *pstart, int *pend)
2282 int start;
2283 int end = -1;
2284 int lowbit = HOST_BITS_PER_WIDE_INT - 1;
2285 int highbit = HOST_BITS_PER_WIDE_INT - size;
2286 unsigned HOST_WIDE_INT bitmask = HOST_WIDE_INT_1U;
2288 gcc_assert (!!pstart == !!pend);
2289 for (start = lowbit; start >= highbit; bitmask <<= 1, start--)
2290 if (end == -1)
2292 /* Look for the rightmost bit of a contiguous range of ones. */
2293 if (bitmask & in)
2294 /* Found it. */
2295 end = start;
2297 else
2299 /* Look for the firt zero bit after the range of ones. */
2300 if (! (bitmask & in))
2301 /* Found it. */
2302 break;
2304 /* We're one past the last one-bit. */
2305 start++;
2307 if (end == -1)
2308 /* No one bits found. */
2309 return false;
2311 if (start > highbit)
2313 unsigned HOST_WIDE_INT mask;
2315 /* Calculate a mask for all bits beyond the contiguous bits. */
2316 mask = ((~HOST_WIDE_INT_0U >> highbit)
2317 & (~HOST_WIDE_INT_0U << (lowbit - start + 1)));
2318 if (mask & in)
2319 /* There are more bits set beyond the first range of one bits. */
2320 return false;
2323 if (pstart)
2325 *pstart = start;
2326 *pend = end;
2329 return true;
2332 /* Same as s390_contiguous_bitmask_nowrap_p but also returns true
2333 if ~IN contains a contiguous bitfield. In that case, *END is <
2334 *START.
2336 If WRAP_P is true, a bitmask that wraps around is also tested.
2337 When a wraparoud occurs *START is greater than *END (in
2338 non-null pointers), and the uppermost (64 - SIZE) bits are thus
2339 part of the range. If WRAP_P is false, no wraparound is
2340 tested. */
2342 bool
2343 s390_contiguous_bitmask_p (unsigned HOST_WIDE_INT in, bool wrap_p,
2344 int size, int *start, int *end)
2346 int bs = HOST_BITS_PER_WIDE_INT;
2347 bool b;
2349 gcc_assert (!!start == !!end);
2350 if ((in & ((~HOST_WIDE_INT_0U) >> (bs - size))) == 0)
2351 /* This cannot be expressed as a contiguous bitmask. Exit early because
2352 the second call of s390_contiguous_bitmask_nowrap_p would accept this as
2353 a valid bitmask. */
2354 return false;
2355 b = s390_contiguous_bitmask_nowrap_p (in, size, start, end);
2356 if (b)
2357 return true;
2358 if (! wrap_p)
2359 return false;
2360 b = s390_contiguous_bitmask_nowrap_p (~in, size, start, end);
2361 if (b && start)
2363 int s = *start;
2364 int e = *end;
2366 gcc_assert (s >= 1);
2367 *start = ((e + 1) & (bs - 1));
2368 *end = ((s - 1 + bs) & (bs - 1));
2371 return b;
2374 /* Return true if OP contains the same contiguous bitfield in *all*
2375 its elements. START and END can be used to obtain the start and
2376 end position of the bitfield.
2378 START/STOP give the position of the first/last bit of the bitfield
2379 counting from the lowest order bit starting with zero. In order to
2380 use these values for S/390 instructions this has to be converted to
2381 "bits big endian" style. */
2383 bool
2384 s390_contiguous_bitmask_vector_p (rtx op, int *start, int *end)
2386 unsigned HOST_WIDE_INT mask;
2387 int size;
2388 rtx elt;
2389 bool b;
2391 gcc_assert (!!start == !!end);
2392 if (!const_vec_duplicate_p (op, &elt)
2393 || !CONST_INT_P (elt))
2394 return false;
2396 size = GET_MODE_UNIT_BITSIZE (GET_MODE (op));
2398 /* We cannot deal with V1TI/V1TF. This would require a vgmq. */
2399 if (size > 64)
2400 return false;
2402 mask = UINTVAL (elt);
2404 b = s390_contiguous_bitmask_p (mask, true, size, start, end);
2405 if (b)
2407 if (start)
2409 *start -= (HOST_BITS_PER_WIDE_INT - size);
2410 *end -= (HOST_BITS_PER_WIDE_INT - size);
2412 return true;
2414 else
2415 return false;
2418 /* Return true if C consists only of byte chunks being either 0 or
2419 0xff. If MASK is !=NULL a byte mask is generated which is
2420 appropriate for the vector generate byte mask instruction. */
2422 bool
2423 s390_bytemask_vector_p (rtx op, unsigned *mask)
2425 int i;
2426 unsigned tmp_mask = 0;
2427 int nunit, unit_size;
2429 if (!VECTOR_MODE_P (GET_MODE (op))
2430 || GET_CODE (op) != CONST_VECTOR
2431 || !CONST_INT_P (XVECEXP (op, 0, 0)))
2432 return false;
2434 nunit = GET_MODE_NUNITS (GET_MODE (op));
2435 unit_size = GET_MODE_UNIT_SIZE (GET_MODE (op));
2437 for (i = 0; i < nunit; i++)
2439 unsigned HOST_WIDE_INT c;
2440 int j;
2442 if (!CONST_INT_P (XVECEXP (op, 0, i)))
2443 return false;
2445 c = UINTVAL (XVECEXP (op, 0, i));
2446 for (j = 0; j < unit_size; j++)
2448 if ((c & 0xff) != 0 && (c & 0xff) != 0xff)
2449 return false;
2450 tmp_mask |= (c & 1) << ((nunit - 1 - i) * unit_size + j);
2451 c = c >> BITS_PER_UNIT;
2455 if (mask != NULL)
2456 *mask = tmp_mask;
2458 return true;
2461 /* Check whether a rotate of ROTL followed by an AND of CONTIG is
2462 equivalent to a shift followed by the AND. In particular, CONTIG
2463 should not overlap the (rotated) bit 0/bit 63 gap. Negative values
2464 for ROTL indicate a rotate to the right. */
2466 bool
2467 s390_extzv_shift_ok (int bitsize, int rotl, unsigned HOST_WIDE_INT contig)
2469 int start, end;
2470 bool ok;
2472 ok = s390_contiguous_bitmask_nowrap_p (contig, bitsize, &start, &end);
2473 gcc_assert (ok);
2475 if (rotl >= 0)
2476 return (64 - end >= rotl);
2477 else
2479 /* Translate "- rotate right" in BITSIZE mode to "rotate left" in
2480 DIMode. */
2481 rotl = -rotl + (64 - bitsize);
2482 return (start >= rotl);
2486 /* Check whether we can (and want to) split a double-word
2487 move in mode MODE from SRC to DST into two single-word
2488 moves, moving the subword FIRST_SUBWORD first. */
2490 bool
2491 s390_split_ok_p (rtx dst, rtx src, machine_mode mode, int first_subword)
2493 /* Floating point and vector registers cannot be split. */
2494 if (FP_REG_P (src) || FP_REG_P (dst) || VECTOR_REG_P (src) || VECTOR_REG_P (dst))
2495 return false;
2497 /* We don't need to split if operands are directly accessible. */
2498 if (s_operand (src, mode) || s_operand (dst, mode))
2499 return false;
2501 /* Non-offsettable memory references cannot be split. */
2502 if ((GET_CODE (src) == MEM && !offsettable_memref_p (src))
2503 || (GET_CODE (dst) == MEM && !offsettable_memref_p (dst)))
2504 return false;
2506 /* Moving the first subword must not clobber a register
2507 needed to move the second subword. */
2508 if (register_operand (dst, mode))
2510 rtx subreg = operand_subword (dst, first_subword, 0, mode);
2511 if (reg_overlap_mentioned_p (subreg, src))
2512 return false;
2515 return true;
2518 /* Return true if it can be proven that [MEM1, MEM1 + SIZE]
2519 and [MEM2, MEM2 + SIZE] do overlap and false
2520 otherwise. */
2522 bool
2523 s390_overlap_p (rtx mem1, rtx mem2, HOST_WIDE_INT size)
2525 rtx addr1, addr2, addr_delta;
2526 HOST_WIDE_INT delta;
2528 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
2529 return true;
2531 if (size == 0)
2532 return false;
2534 addr1 = XEXP (mem1, 0);
2535 addr2 = XEXP (mem2, 0);
2537 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
2539 /* This overlapping check is used by peepholes merging memory block operations.
2540 Overlapping operations would otherwise be recognized by the S/390 hardware
2541 and would fall back to a slower implementation. Allowing overlapping
2542 operations would lead to slow code but not to wrong code. Therefore we are
2543 somewhat optimistic if we cannot prove that the memory blocks are
2544 overlapping.
2545 That's why we return false here although this may accept operations on
2546 overlapping memory areas. */
2547 if (!addr_delta || GET_CODE (addr_delta) != CONST_INT)
2548 return false;
2550 delta = INTVAL (addr_delta);
2552 if (delta == 0
2553 || (delta > 0 && delta < size)
2554 || (delta < 0 && -delta < size))
2555 return true;
2557 return false;
2560 /* Check whether the address of memory reference MEM2 equals exactly
2561 the address of memory reference MEM1 plus DELTA. Return true if
2562 we can prove this to be the case, false otherwise. */
2564 bool
2565 s390_offset_p (rtx mem1, rtx mem2, rtx delta)
2567 rtx addr1, addr2, addr_delta;
2569 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
2570 return false;
2572 addr1 = XEXP (mem1, 0);
2573 addr2 = XEXP (mem2, 0);
2575 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
2576 if (!addr_delta || !rtx_equal_p (addr_delta, delta))
2577 return false;
2579 return true;
2582 /* Expand logical operator CODE in mode MODE with operands OPERANDS. */
2584 void
2585 s390_expand_logical_operator (enum rtx_code code, machine_mode mode,
2586 rtx *operands)
2588 machine_mode wmode = mode;
2589 rtx dst = operands[0];
2590 rtx src1 = operands[1];
2591 rtx src2 = operands[2];
2592 rtx op, clob, tem;
2594 /* If we cannot handle the operation directly, use a temp register. */
2595 if (!s390_logical_operator_ok_p (operands))
2596 dst = gen_reg_rtx (mode);
2598 /* QImode and HImode patterns make sense only if we have a destination
2599 in memory. Otherwise perform the operation in SImode. */
2600 if ((mode == QImode || mode == HImode) && GET_CODE (dst) != MEM)
2601 wmode = SImode;
2603 /* Widen operands if required. */
2604 if (mode != wmode)
2606 if (GET_CODE (dst) == SUBREG
2607 && (tem = simplify_subreg (wmode, dst, mode, 0)) != 0)
2608 dst = tem;
2609 else if (REG_P (dst))
2610 dst = gen_rtx_SUBREG (wmode, dst, 0);
2611 else
2612 dst = gen_reg_rtx (wmode);
2614 if (GET_CODE (src1) == SUBREG
2615 && (tem = simplify_subreg (wmode, src1, mode, 0)) != 0)
2616 src1 = tem;
2617 else if (GET_MODE (src1) != VOIDmode)
2618 src1 = gen_rtx_SUBREG (wmode, force_reg (mode, src1), 0);
2620 if (GET_CODE (src2) == SUBREG
2621 && (tem = simplify_subreg (wmode, src2, mode, 0)) != 0)
2622 src2 = tem;
2623 else if (GET_MODE (src2) != VOIDmode)
2624 src2 = gen_rtx_SUBREG (wmode, force_reg (mode, src2), 0);
2627 /* Emit the instruction. */
2628 op = gen_rtx_SET (dst, gen_rtx_fmt_ee (code, wmode, src1, src2));
2629 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
2630 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
2632 /* Fix up the destination if needed. */
2633 if (dst != operands[0])
2634 emit_move_insn (operands[0], gen_lowpart (mode, dst));
2637 /* Check whether OPERANDS are OK for a logical operation (AND, IOR, XOR). */
2639 bool
2640 s390_logical_operator_ok_p (rtx *operands)
2642 /* If the destination operand is in memory, it needs to coincide
2643 with one of the source operands. After reload, it has to be
2644 the first source operand. */
2645 if (GET_CODE (operands[0]) == MEM)
2646 return rtx_equal_p (operands[0], operands[1])
2647 || (!reload_completed && rtx_equal_p (operands[0], operands[2]));
2649 return true;
2652 /* Narrow logical operation CODE of memory operand MEMOP with immediate
2653 operand IMMOP to switch from SS to SI type instructions. */
2655 void
2656 s390_narrow_logical_operator (enum rtx_code code, rtx *memop, rtx *immop)
2658 int def = code == AND ? -1 : 0;
2659 HOST_WIDE_INT mask;
2660 int part;
2662 gcc_assert (GET_CODE (*memop) == MEM);
2663 gcc_assert (!MEM_VOLATILE_P (*memop));
2665 mask = s390_extract_part (*immop, QImode, def);
2666 part = s390_single_part (*immop, GET_MODE (*memop), QImode, def);
2667 gcc_assert (part >= 0);
2669 *memop = adjust_address (*memop, QImode, part);
2670 *immop = gen_int_mode (mask, QImode);
2674 /* How to allocate a 'struct machine_function'. */
2676 static struct machine_function *
2677 s390_init_machine_status (void)
2679 return ggc_cleared_alloc<machine_function> ();
2682 /* Map for smallest class containing reg regno. */
2684 const enum reg_class regclass_map[FIRST_PSEUDO_REGISTER] =
2685 { GENERAL_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 0 */
2686 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 4 */
2687 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 8 */
2688 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 12 */
2689 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 16 */
2690 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 20 */
2691 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 24 */
2692 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 28 */
2693 ADDR_REGS, CC_REGS, ADDR_REGS, ADDR_REGS, /* 32 */
2694 ACCESS_REGS, ACCESS_REGS, VEC_REGS, VEC_REGS, /* 36 */
2695 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 40 */
2696 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 44 */
2697 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 48 */
2698 VEC_REGS, VEC_REGS /* 52 */
2701 /* Return attribute type of insn. */
2703 static enum attr_type
2704 s390_safe_attr_type (rtx_insn *insn)
2706 if (recog_memoized (insn) >= 0)
2707 return get_attr_type (insn);
2708 else
2709 return TYPE_NONE;
2712 /* Return true if DISP is a valid short displacement. */
2714 static bool
2715 s390_short_displacement (rtx disp)
2717 /* No displacement is OK. */
2718 if (!disp)
2719 return true;
2721 /* Without the long displacement facility we don't need to
2722 distingiush between long and short displacement. */
2723 if (!TARGET_LONG_DISPLACEMENT)
2724 return true;
2726 /* Integer displacement in range. */
2727 if (GET_CODE (disp) == CONST_INT)
2728 return INTVAL (disp) >= 0 && INTVAL (disp) < 4096;
2730 /* GOT offset is not OK, the GOT can be large. */
2731 if (GET_CODE (disp) == CONST
2732 && GET_CODE (XEXP (disp, 0)) == UNSPEC
2733 && (XINT (XEXP (disp, 0), 1) == UNSPEC_GOT
2734 || XINT (XEXP (disp, 0), 1) == UNSPEC_GOTNTPOFF))
2735 return false;
2737 /* All other symbolic constants are literal pool references,
2738 which are OK as the literal pool must be small. */
2739 if (GET_CODE (disp) == CONST)
2740 return true;
2742 return false;
2745 /* Decompose a RTL expression ADDR for a memory address into
2746 its components, returned in OUT.
2748 Returns false if ADDR is not a valid memory address, true
2749 otherwise. If OUT is NULL, don't return the components,
2750 but check for validity only.
2752 Note: Only addresses in canonical form are recognized.
2753 LEGITIMIZE_ADDRESS should convert non-canonical forms to the
2754 canonical form so that they will be recognized. */
2756 static int
2757 s390_decompose_address (rtx addr, struct s390_address *out)
2759 HOST_WIDE_INT offset = 0;
2760 rtx base = NULL_RTX;
2761 rtx indx = NULL_RTX;
2762 rtx disp = NULL_RTX;
2763 rtx orig_disp;
2764 bool pointer = false;
2765 bool base_ptr = false;
2766 bool indx_ptr = false;
2767 bool literal_pool = false;
2769 /* We may need to substitute the literal pool base register into the address
2770 below. However, at this point we do not know which register is going to
2771 be used as base, so we substitute the arg pointer register. This is going
2772 to be treated as holding a pointer below -- it shouldn't be used for any
2773 other purpose. */
2774 rtx fake_pool_base = gen_rtx_REG (Pmode, ARG_POINTER_REGNUM);
2776 /* Decompose address into base + index + displacement. */
2778 if (GET_CODE (addr) == REG || GET_CODE (addr) == UNSPEC)
2779 base = addr;
2781 else if (GET_CODE (addr) == PLUS)
2783 rtx op0 = XEXP (addr, 0);
2784 rtx op1 = XEXP (addr, 1);
2785 enum rtx_code code0 = GET_CODE (op0);
2786 enum rtx_code code1 = GET_CODE (op1);
2788 if (code0 == REG || code0 == UNSPEC)
2790 if (code1 == REG || code1 == UNSPEC)
2792 indx = op0; /* index + base */
2793 base = op1;
2796 else
2798 base = op0; /* base + displacement */
2799 disp = op1;
2803 else if (code0 == PLUS)
2805 indx = XEXP (op0, 0); /* index + base + disp */
2806 base = XEXP (op0, 1);
2807 disp = op1;
2810 else
2812 return false;
2816 else
2817 disp = addr; /* displacement */
2819 /* Extract integer part of displacement. */
2820 orig_disp = disp;
2821 if (disp)
2823 if (GET_CODE (disp) == CONST_INT)
2825 offset = INTVAL (disp);
2826 disp = NULL_RTX;
2828 else if (GET_CODE (disp) == CONST
2829 && GET_CODE (XEXP (disp, 0)) == PLUS
2830 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
2832 offset = INTVAL (XEXP (XEXP (disp, 0), 1));
2833 disp = XEXP (XEXP (disp, 0), 0);
2837 /* Strip off CONST here to avoid special case tests later. */
2838 if (disp && GET_CODE (disp) == CONST)
2839 disp = XEXP (disp, 0);
2841 /* We can convert literal pool addresses to
2842 displacements by basing them off the base register. */
2843 if (disp && GET_CODE (disp) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (disp))
2845 if (base || indx)
2846 return false;
2848 base = fake_pool_base, literal_pool = true;
2850 /* Mark up the displacement. */
2851 disp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, disp),
2852 UNSPEC_LTREL_OFFSET);
2855 /* Validate base register. */
2856 if (base)
2858 if (GET_CODE (base) == UNSPEC)
2859 switch (XINT (base, 1))
2861 case UNSPEC_LTREF:
2862 if (!disp)
2863 disp = gen_rtx_UNSPEC (Pmode,
2864 gen_rtvec (1, XVECEXP (base, 0, 0)),
2865 UNSPEC_LTREL_OFFSET);
2866 else
2867 return false;
2869 base = XVECEXP (base, 0, 1);
2870 break;
2872 case UNSPEC_LTREL_BASE:
2873 if (XVECLEN (base, 0) == 1)
2874 base = fake_pool_base, literal_pool = true;
2875 else
2876 base = XVECEXP (base, 0, 1);
2877 break;
2879 default:
2880 return false;
2883 if (!REG_P (base) || GET_MODE (base) != Pmode)
2884 return false;
2886 if (REGNO (base) == STACK_POINTER_REGNUM
2887 || REGNO (base) == FRAME_POINTER_REGNUM
2888 || ((reload_completed || reload_in_progress)
2889 && frame_pointer_needed
2890 && REGNO (base) == HARD_FRAME_POINTER_REGNUM)
2891 || REGNO (base) == ARG_POINTER_REGNUM
2892 || (flag_pic
2893 && REGNO (base) == PIC_OFFSET_TABLE_REGNUM))
2894 pointer = base_ptr = true;
2896 if ((reload_completed || reload_in_progress)
2897 && base == cfun->machine->base_reg)
2898 pointer = base_ptr = literal_pool = true;
2901 /* Validate index register. */
2902 if (indx)
2904 if (GET_CODE (indx) == UNSPEC)
2905 switch (XINT (indx, 1))
2907 case UNSPEC_LTREF:
2908 if (!disp)
2909 disp = gen_rtx_UNSPEC (Pmode,
2910 gen_rtvec (1, XVECEXP (indx, 0, 0)),
2911 UNSPEC_LTREL_OFFSET);
2912 else
2913 return false;
2915 indx = XVECEXP (indx, 0, 1);
2916 break;
2918 case UNSPEC_LTREL_BASE:
2919 if (XVECLEN (indx, 0) == 1)
2920 indx = fake_pool_base, literal_pool = true;
2921 else
2922 indx = XVECEXP (indx, 0, 1);
2923 break;
2925 default:
2926 return false;
2929 if (!REG_P (indx) || GET_MODE (indx) != Pmode)
2930 return false;
2932 if (REGNO (indx) == STACK_POINTER_REGNUM
2933 || REGNO (indx) == FRAME_POINTER_REGNUM
2934 || ((reload_completed || reload_in_progress)
2935 && frame_pointer_needed
2936 && REGNO (indx) == HARD_FRAME_POINTER_REGNUM)
2937 || REGNO (indx) == ARG_POINTER_REGNUM
2938 || (flag_pic
2939 && REGNO (indx) == PIC_OFFSET_TABLE_REGNUM))
2940 pointer = indx_ptr = true;
2942 if ((reload_completed || reload_in_progress)
2943 && indx == cfun->machine->base_reg)
2944 pointer = indx_ptr = literal_pool = true;
2947 /* Prefer to use pointer as base, not index. */
2948 if (base && indx && !base_ptr
2949 && (indx_ptr || (!REG_POINTER (base) && REG_POINTER (indx))))
2951 rtx tmp = base;
2952 base = indx;
2953 indx = tmp;
2956 /* Validate displacement. */
2957 if (!disp)
2959 /* If virtual registers are involved, the displacement will change later
2960 anyway as the virtual registers get eliminated. This could make a
2961 valid displacement invalid, but it is more likely to make an invalid
2962 displacement valid, because we sometimes access the register save area
2963 via negative offsets to one of those registers.
2964 Thus we don't check the displacement for validity here. If after
2965 elimination the displacement turns out to be invalid after all,
2966 this is fixed up by reload in any case. */
2967 /* LRA maintains always displacements up to date and we need to
2968 know the displacement is right during all LRA not only at the
2969 final elimination. */
2970 if (lra_in_progress
2971 || (base != arg_pointer_rtx
2972 && indx != arg_pointer_rtx
2973 && base != return_address_pointer_rtx
2974 && indx != return_address_pointer_rtx
2975 && base != frame_pointer_rtx
2976 && indx != frame_pointer_rtx
2977 && base != virtual_stack_vars_rtx
2978 && indx != virtual_stack_vars_rtx))
2979 if (!DISP_IN_RANGE (offset))
2980 return false;
2982 else
2984 /* All the special cases are pointers. */
2985 pointer = true;
2987 /* In the small-PIC case, the linker converts @GOT
2988 and @GOTNTPOFF offsets to possible displacements. */
2989 if (GET_CODE (disp) == UNSPEC
2990 && (XINT (disp, 1) == UNSPEC_GOT
2991 || XINT (disp, 1) == UNSPEC_GOTNTPOFF)
2992 && flag_pic == 1)
2997 /* Accept pool label offsets. */
2998 else if (GET_CODE (disp) == UNSPEC
2999 && XINT (disp, 1) == UNSPEC_POOL_OFFSET)
3002 /* Accept literal pool references. */
3003 else if (GET_CODE (disp) == UNSPEC
3004 && XINT (disp, 1) == UNSPEC_LTREL_OFFSET)
3006 /* In case CSE pulled a non literal pool reference out of
3007 the pool we have to reject the address. This is
3008 especially important when loading the GOT pointer on non
3009 zarch CPUs. In this case the literal pool contains an lt
3010 relative offset to the _GLOBAL_OFFSET_TABLE_ label which
3011 will most likely exceed the displacement. */
3012 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
3013 || !CONSTANT_POOL_ADDRESS_P (XVECEXP (disp, 0, 0)))
3014 return false;
3016 orig_disp = gen_rtx_CONST (Pmode, disp);
3017 if (offset)
3019 /* If we have an offset, make sure it does not
3020 exceed the size of the constant pool entry. */
3021 rtx sym = XVECEXP (disp, 0, 0);
3022 if (offset >= GET_MODE_SIZE (get_pool_mode (sym)))
3023 return false;
3025 orig_disp = plus_constant (Pmode, orig_disp, offset);
3029 else
3030 return false;
3033 if (!base && !indx)
3034 pointer = true;
3036 if (out)
3038 out->base = base;
3039 out->indx = indx;
3040 out->disp = orig_disp;
3041 out->pointer = pointer;
3042 out->literal_pool = literal_pool;
3045 return true;
3048 /* Decompose a RTL expression OP for an address style operand into its
3049 components, and return the base register in BASE and the offset in
3050 OFFSET. While OP looks like an address it is never supposed to be
3051 used as such.
3053 Return true if OP is a valid address operand, false if not. */
3055 bool
3056 s390_decompose_addrstyle_without_index (rtx op, rtx *base,
3057 HOST_WIDE_INT *offset)
3059 rtx off = NULL_RTX;
3061 /* We can have an integer constant, an address register,
3062 or a sum of the two. */
3063 if (CONST_SCALAR_INT_P (op))
3065 off = op;
3066 op = NULL_RTX;
3068 if (op && GET_CODE (op) == PLUS && CONST_SCALAR_INT_P (XEXP (op, 1)))
3070 off = XEXP (op, 1);
3071 op = XEXP (op, 0);
3073 while (op && GET_CODE (op) == SUBREG)
3074 op = SUBREG_REG (op);
3076 if (op && GET_CODE (op) != REG)
3077 return false;
3079 if (offset)
3081 if (off == NULL_RTX)
3082 *offset = 0;
3083 else if (CONST_INT_P (off))
3084 *offset = INTVAL (off);
3085 else if (CONST_WIDE_INT_P (off))
3086 /* The offset will anyway be cut down to 12 bits so take just
3087 the lowest order chunk of the wide int. */
3088 *offset = CONST_WIDE_INT_ELT (off, 0);
3089 else
3090 gcc_unreachable ();
3092 if (base)
3093 *base = op;
3095 return true;
3099 /* Return true if CODE is a valid address without index. */
3101 bool
3102 s390_legitimate_address_without_index_p (rtx op)
3104 struct s390_address addr;
3106 if (!s390_decompose_address (XEXP (op, 0), &addr))
3107 return false;
3108 if (addr.indx)
3109 return false;
3111 return true;
3115 /* Return TRUE if ADDR is an operand valid for a load/store relative
3116 instruction. Be aware that the alignment of the operand needs to
3117 be checked separately.
3118 Valid addresses are single references or a sum of a reference and a
3119 constant integer. Return these parts in SYMREF and ADDEND. You can
3120 pass NULL in REF and/or ADDEND if you are not interested in these
3121 values. Literal pool references are *not* considered symbol
3122 references. */
3124 static bool
3125 s390_loadrelative_operand_p (rtx addr, rtx *symref, HOST_WIDE_INT *addend)
3127 HOST_WIDE_INT tmpaddend = 0;
3129 if (GET_CODE (addr) == CONST)
3130 addr = XEXP (addr, 0);
3132 if (GET_CODE (addr) == PLUS)
3134 if (!CONST_INT_P (XEXP (addr, 1)))
3135 return false;
3137 tmpaddend = INTVAL (XEXP (addr, 1));
3138 addr = XEXP (addr, 0);
3141 if ((GET_CODE (addr) == SYMBOL_REF && !CONSTANT_POOL_ADDRESS_P (addr))
3142 || (GET_CODE (addr) == UNSPEC
3143 && (XINT (addr, 1) == UNSPEC_GOTENT
3144 || (TARGET_CPU_ZARCH && XINT (addr, 1) == UNSPEC_PLT))))
3146 if (symref)
3147 *symref = addr;
3148 if (addend)
3149 *addend = tmpaddend;
3151 return true;
3153 return false;
3156 /* Return true if the address in OP is valid for constraint letter C
3157 if wrapped in a MEM rtx. Set LIT_POOL_OK to true if it literal
3158 pool MEMs should be accepted. Only the Q, R, S, T constraint
3159 letters are allowed for C. */
3161 static int
3162 s390_check_qrst_address (char c, rtx op, bool lit_pool_ok)
3164 struct s390_address addr;
3165 bool decomposed = false;
3167 /* This check makes sure that no symbolic address (except literal
3168 pool references) are accepted by the R or T constraints. */
3169 if (s390_loadrelative_operand_p (op, NULL, NULL))
3170 return 0;
3172 /* Ensure literal pool references are only accepted if LIT_POOL_OK. */
3173 if (!lit_pool_ok)
3175 if (!s390_decompose_address (op, &addr))
3176 return 0;
3177 if (addr.literal_pool)
3178 return 0;
3179 decomposed = true;
3182 /* With reload, we sometimes get intermediate address forms that are
3183 actually invalid as-is, but we need to accept them in the most
3184 generic cases below ('R' or 'T'), since reload will in fact fix
3185 them up. LRA behaves differently here; we never see such forms,
3186 but on the other hand, we need to strictly reject every invalid
3187 address form. Perform this check right up front. */
3188 if (lra_in_progress)
3190 if (!decomposed && !s390_decompose_address (op, &addr))
3191 return 0;
3192 decomposed = true;
3195 switch (c)
3197 case 'Q': /* no index short displacement */
3198 if (!decomposed && !s390_decompose_address (op, &addr))
3199 return 0;
3200 if (addr.indx)
3201 return 0;
3202 if (!s390_short_displacement (addr.disp))
3203 return 0;
3204 break;
3206 case 'R': /* with index short displacement */
3207 if (TARGET_LONG_DISPLACEMENT)
3209 if (!decomposed && !s390_decompose_address (op, &addr))
3210 return 0;
3211 if (!s390_short_displacement (addr.disp))
3212 return 0;
3214 /* Any invalid address here will be fixed up by reload,
3215 so accept it for the most generic constraint. */
3216 break;
3218 case 'S': /* no index long displacement */
3219 if (!decomposed && !s390_decompose_address (op, &addr))
3220 return 0;
3221 if (addr.indx)
3222 return 0;
3223 break;
3225 case 'T': /* with index long displacement */
3226 /* Any invalid address here will be fixed up by reload,
3227 so accept it for the most generic constraint. */
3228 break;
3230 default:
3231 return 0;
3233 return 1;
3237 /* Evaluates constraint strings described by the regular expression
3238 ([A|B|Z](Q|R|S|T))|Y and returns 1 if OP is a valid operand for
3239 the constraint given in STR, or 0 else. */
3242 s390_mem_constraint (const char *str, rtx op)
3244 char c = str[0];
3246 switch (c)
3248 case 'A':
3249 /* Check for offsettable variants of memory constraints. */
3250 if (!MEM_P (op) || MEM_VOLATILE_P (op))
3251 return 0;
3252 if ((reload_completed || reload_in_progress)
3253 ? !offsettable_memref_p (op) : !offsettable_nonstrict_memref_p (op))
3254 return 0;
3255 return s390_check_qrst_address (str[1], XEXP (op, 0), true);
3256 case 'B':
3257 /* Check for non-literal-pool variants of memory constraints. */
3258 if (!MEM_P (op))
3259 return 0;
3260 return s390_check_qrst_address (str[1], XEXP (op, 0), false);
3261 case 'Q':
3262 case 'R':
3263 case 'S':
3264 case 'T':
3265 if (GET_CODE (op) != MEM)
3266 return 0;
3267 return s390_check_qrst_address (c, XEXP (op, 0), true);
3268 case 'Y':
3269 /* Simply check for the basic form of a shift count. Reload will
3270 take care of making sure we have a proper base register. */
3271 if (!s390_decompose_addrstyle_without_index (op, NULL, NULL))
3272 return 0;
3273 break;
3274 case 'Z':
3275 return s390_check_qrst_address (str[1], op, true);
3276 default:
3277 return 0;
3279 return 1;
3283 /* Evaluates constraint strings starting with letter O. Input
3284 parameter C is the second letter following the "O" in the constraint
3285 string. Returns 1 if VALUE meets the respective constraint and 0
3286 otherwise. */
3289 s390_O_constraint_str (const char c, HOST_WIDE_INT value)
3291 if (!TARGET_EXTIMM)
3292 return 0;
3294 switch (c)
3296 case 's':
3297 return trunc_int_for_mode (value, SImode) == value;
3299 case 'p':
3300 return value == 0
3301 || s390_single_part (GEN_INT (value), DImode, SImode, 0) == 1;
3303 case 'n':
3304 return s390_single_part (GEN_INT (value - 1), DImode, SImode, -1) == 1;
3306 default:
3307 gcc_unreachable ();
3312 /* Evaluates constraint strings starting with letter N. Parameter STR
3313 contains the letters following letter "N" in the constraint string.
3314 Returns true if VALUE matches the constraint. */
3317 s390_N_constraint_str (const char *str, HOST_WIDE_INT value)
3319 machine_mode mode, part_mode;
3320 int def;
3321 int part, part_goal;
3324 if (str[0] == 'x')
3325 part_goal = -1;
3326 else
3327 part_goal = str[0] - '0';
3329 switch (str[1])
3331 case 'Q':
3332 part_mode = QImode;
3333 break;
3334 case 'H':
3335 part_mode = HImode;
3336 break;
3337 case 'S':
3338 part_mode = SImode;
3339 break;
3340 default:
3341 return 0;
3344 switch (str[2])
3346 case 'H':
3347 mode = HImode;
3348 break;
3349 case 'S':
3350 mode = SImode;
3351 break;
3352 case 'D':
3353 mode = DImode;
3354 break;
3355 default:
3356 return 0;
3359 switch (str[3])
3361 case '0':
3362 def = 0;
3363 break;
3364 case 'F':
3365 def = -1;
3366 break;
3367 default:
3368 return 0;
3371 if (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (part_mode))
3372 return 0;
3374 part = s390_single_part (GEN_INT (value), mode, part_mode, def);
3375 if (part < 0)
3376 return 0;
3377 if (part_goal != -1 && part_goal != part)
3378 return 0;
3380 return 1;
3384 /* Returns true if the input parameter VALUE is a float zero. */
3387 s390_float_const_zero_p (rtx value)
3389 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
3390 && value == CONST0_RTX (GET_MODE (value)));
3393 /* Implement TARGET_REGISTER_MOVE_COST. */
3395 static int
3396 s390_register_move_cost (machine_mode mode,
3397 reg_class_t from, reg_class_t to)
3399 /* On s390, copy between fprs and gprs is expensive. */
3401 /* It becomes somewhat faster having ldgr/lgdr. */
3402 if (TARGET_Z10 && GET_MODE_SIZE (mode) == 8)
3404 /* ldgr is single cycle. */
3405 if (reg_classes_intersect_p (from, GENERAL_REGS)
3406 && reg_classes_intersect_p (to, FP_REGS))
3407 return 1;
3408 /* lgdr needs 3 cycles. */
3409 if (reg_classes_intersect_p (to, GENERAL_REGS)
3410 && reg_classes_intersect_p (from, FP_REGS))
3411 return 3;
3414 /* Otherwise copying is done via memory. */
3415 if ((reg_classes_intersect_p (from, GENERAL_REGS)
3416 && reg_classes_intersect_p (to, FP_REGS))
3417 || (reg_classes_intersect_p (from, FP_REGS)
3418 && reg_classes_intersect_p (to, GENERAL_REGS)))
3419 return 10;
3421 return 1;
3424 /* Implement TARGET_MEMORY_MOVE_COST. */
3426 static int
3427 s390_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
3428 reg_class_t rclass ATTRIBUTE_UNUSED,
3429 bool in ATTRIBUTE_UNUSED)
3431 return 2;
3434 /* Compute a (partial) cost for rtx X. Return true if the complete
3435 cost has been computed, and false if subexpressions should be
3436 scanned. In either case, *TOTAL contains the cost result. The
3437 initial value of *TOTAL is the default value computed by
3438 rtx_cost. It may be left unmodified. OUTER_CODE contains the
3439 code of the superexpression of x. */
3441 static bool
3442 s390_rtx_costs (rtx x, machine_mode mode, int outer_code,
3443 int opno ATTRIBUTE_UNUSED,
3444 int *total, bool speed ATTRIBUTE_UNUSED)
3446 int code = GET_CODE (x);
3447 switch (code)
3449 case CONST:
3450 case CONST_INT:
3451 case LABEL_REF:
3452 case SYMBOL_REF:
3453 case CONST_DOUBLE:
3454 case CONST_WIDE_INT:
3455 case MEM:
3456 *total = 0;
3457 return true;
3459 case IOR:
3460 /* risbg */
3461 if (GET_CODE (XEXP (x, 0)) == AND
3462 && GET_CODE (XEXP (x, 1)) == ASHIFT
3463 && REG_P (XEXP (XEXP (x, 0), 0))
3464 && REG_P (XEXP (XEXP (x, 1), 0))
3465 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
3466 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
3467 && (UINTVAL (XEXP (XEXP (x, 0), 1)) ==
3468 (HOST_WIDE_INT_1U << UINTVAL (XEXP (XEXP (x, 1), 1))) - 1))
3470 *total = COSTS_N_INSNS (2);
3471 return true;
3473 /* fallthrough */
3474 case ASHIFT:
3475 case ASHIFTRT:
3476 case LSHIFTRT:
3477 case ROTATE:
3478 case ROTATERT:
3479 case AND:
3480 case XOR:
3481 case NEG:
3482 case NOT:
3483 *total = COSTS_N_INSNS (1);
3484 return false;
3486 case PLUS:
3487 case MINUS:
3488 *total = COSTS_N_INSNS (1);
3489 return false;
3491 case MULT:
3492 switch (mode)
3494 case SImode:
3496 rtx left = XEXP (x, 0);
3497 rtx right = XEXP (x, 1);
3498 if (GET_CODE (right) == CONST_INT
3499 && CONST_OK_FOR_K (INTVAL (right)))
3500 *total = s390_cost->mhi;
3501 else if (GET_CODE (left) == SIGN_EXTEND)
3502 *total = s390_cost->mh;
3503 else
3504 *total = s390_cost->ms; /* msr, ms, msy */
3505 break;
3507 case DImode:
3509 rtx left = XEXP (x, 0);
3510 rtx right = XEXP (x, 1);
3511 if (TARGET_ZARCH)
3513 if (GET_CODE (right) == CONST_INT
3514 && CONST_OK_FOR_K (INTVAL (right)))
3515 *total = s390_cost->mghi;
3516 else if (GET_CODE (left) == SIGN_EXTEND)
3517 *total = s390_cost->msgf;
3518 else
3519 *total = s390_cost->msg; /* msgr, msg */
3521 else /* TARGET_31BIT */
3523 if (GET_CODE (left) == SIGN_EXTEND
3524 && GET_CODE (right) == SIGN_EXTEND)
3525 /* mulsidi case: mr, m */
3526 *total = s390_cost->m;
3527 else if (GET_CODE (left) == ZERO_EXTEND
3528 && GET_CODE (right) == ZERO_EXTEND
3529 && TARGET_CPU_ZARCH)
3530 /* umulsidi case: ml, mlr */
3531 *total = s390_cost->ml;
3532 else
3533 /* Complex calculation is required. */
3534 *total = COSTS_N_INSNS (40);
3536 break;
3538 case SFmode:
3539 case DFmode:
3540 *total = s390_cost->mult_df;
3541 break;
3542 case TFmode:
3543 *total = s390_cost->mxbr;
3544 break;
3545 default:
3546 return false;
3548 return false;
3550 case FMA:
3551 switch (mode)
3553 case DFmode:
3554 *total = s390_cost->madbr;
3555 break;
3556 case SFmode:
3557 *total = s390_cost->maebr;
3558 break;
3559 default:
3560 return false;
3562 /* Negate in the third argument is free: FMSUB. */
3563 if (GET_CODE (XEXP (x, 2)) == NEG)
3565 *total += (rtx_cost (XEXP (x, 0), mode, FMA, 0, speed)
3566 + rtx_cost (XEXP (x, 1), mode, FMA, 1, speed)
3567 + rtx_cost (XEXP (XEXP (x, 2), 0), mode, FMA, 2, speed));
3568 return true;
3570 return false;
3572 case UDIV:
3573 case UMOD:
3574 if (mode == TImode) /* 128 bit division */
3575 *total = s390_cost->dlgr;
3576 else if (mode == DImode)
3578 rtx right = XEXP (x, 1);
3579 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
3580 *total = s390_cost->dlr;
3581 else /* 64 by 64 bit division */
3582 *total = s390_cost->dlgr;
3584 else if (mode == SImode) /* 32 bit division */
3585 *total = s390_cost->dlr;
3586 return false;
3588 case DIV:
3589 case MOD:
3590 if (mode == DImode)
3592 rtx right = XEXP (x, 1);
3593 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
3594 if (TARGET_ZARCH)
3595 *total = s390_cost->dsgfr;
3596 else
3597 *total = s390_cost->dr;
3598 else /* 64 by 64 bit division */
3599 *total = s390_cost->dsgr;
3601 else if (mode == SImode) /* 32 bit division */
3602 *total = s390_cost->dlr;
3603 else if (mode == SFmode)
3605 *total = s390_cost->debr;
3607 else if (mode == DFmode)
3609 *total = s390_cost->ddbr;
3611 else if (mode == TFmode)
3613 *total = s390_cost->dxbr;
3615 return false;
3617 case SQRT:
3618 if (mode == SFmode)
3619 *total = s390_cost->sqebr;
3620 else if (mode == DFmode)
3621 *total = s390_cost->sqdbr;
3622 else /* TFmode */
3623 *total = s390_cost->sqxbr;
3624 return false;
3626 case SIGN_EXTEND:
3627 case ZERO_EXTEND:
3628 if (outer_code == MULT || outer_code == DIV || outer_code == MOD
3629 || outer_code == PLUS || outer_code == MINUS
3630 || outer_code == COMPARE)
3631 *total = 0;
3632 return false;
3634 case COMPARE:
3635 *total = COSTS_N_INSNS (1);
3636 if (GET_CODE (XEXP (x, 0)) == AND
3637 && GET_CODE (XEXP (x, 1)) == CONST_INT
3638 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
3640 rtx op0 = XEXP (XEXP (x, 0), 0);
3641 rtx op1 = XEXP (XEXP (x, 0), 1);
3642 rtx op2 = XEXP (x, 1);
3644 if (memory_operand (op0, GET_MODE (op0))
3645 && s390_tm_ccmode (op1, op2, 0) != VOIDmode)
3646 return true;
3647 if (register_operand (op0, GET_MODE (op0))
3648 && s390_tm_ccmode (op1, op2, 1) != VOIDmode)
3649 return true;
3651 return false;
3653 default:
3654 return false;
3658 /* Return the cost of an address rtx ADDR. */
3660 static int
3661 s390_address_cost (rtx addr, machine_mode mode ATTRIBUTE_UNUSED,
3662 addr_space_t as ATTRIBUTE_UNUSED,
3663 bool speed ATTRIBUTE_UNUSED)
3665 struct s390_address ad;
3666 if (!s390_decompose_address (addr, &ad))
3667 return 1000;
3669 return ad.indx? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (1);
3672 /* Implement targetm.vectorize.builtin_vectorization_cost. */
3673 static int
3674 s390_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
3675 tree vectype,
3676 int misalign ATTRIBUTE_UNUSED)
3678 switch (type_of_cost)
3680 case scalar_stmt:
3681 case scalar_load:
3682 case scalar_store:
3683 case vector_stmt:
3684 case vector_load:
3685 case vector_store:
3686 case vec_to_scalar:
3687 case scalar_to_vec:
3688 case cond_branch_not_taken:
3689 case vec_perm:
3690 case vec_promote_demote:
3691 case unaligned_load:
3692 case unaligned_store:
3693 return 1;
3695 case cond_branch_taken:
3696 return 3;
3698 case vec_construct:
3699 return TYPE_VECTOR_SUBPARTS (vectype) - 1;
3701 default:
3702 gcc_unreachable ();
3706 /* If OP is a SYMBOL_REF of a thread-local symbol, return its TLS mode,
3707 otherwise return 0. */
3710 tls_symbolic_operand (rtx op)
3712 if (GET_CODE (op) != SYMBOL_REF)
3713 return 0;
3714 return SYMBOL_REF_TLS_MODEL (op);
3717 /* Split DImode access register reference REG (on 64-bit) into its constituent
3718 low and high parts, and store them into LO and HI. Note that gen_lowpart/
3719 gen_highpart cannot be used as they assume all registers are word-sized,
3720 while our access registers have only half that size. */
3722 void
3723 s390_split_access_reg (rtx reg, rtx *lo, rtx *hi)
3725 gcc_assert (TARGET_64BIT);
3726 gcc_assert (ACCESS_REG_P (reg));
3727 gcc_assert (GET_MODE (reg) == DImode);
3728 gcc_assert (!(REGNO (reg) & 1));
3730 *lo = gen_rtx_REG (SImode, REGNO (reg) + 1);
3731 *hi = gen_rtx_REG (SImode, REGNO (reg));
3734 /* Return true if OP contains a symbol reference */
3736 bool
3737 symbolic_reference_mentioned_p (rtx op)
3739 const char *fmt;
3740 int i;
3742 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
3743 return 1;
3745 fmt = GET_RTX_FORMAT (GET_CODE (op));
3746 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
3748 if (fmt[i] == 'E')
3750 int j;
3752 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
3753 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
3754 return 1;
3757 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
3758 return 1;
3761 return 0;
3764 /* Return true if OP contains a reference to a thread-local symbol. */
3766 bool
3767 tls_symbolic_reference_mentioned_p (rtx op)
3769 const char *fmt;
3770 int i;
3772 if (GET_CODE (op) == SYMBOL_REF)
3773 return tls_symbolic_operand (op);
3775 fmt = GET_RTX_FORMAT (GET_CODE (op));
3776 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
3778 if (fmt[i] == 'E')
3780 int j;
3782 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
3783 if (tls_symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
3784 return true;
3787 else if (fmt[i] == 'e' && tls_symbolic_reference_mentioned_p (XEXP (op, i)))
3788 return true;
3791 return false;
3795 /* Return true if OP is a legitimate general operand when
3796 generating PIC code. It is given that flag_pic is on
3797 and that OP satisfies CONSTANT_P. */
3800 legitimate_pic_operand_p (rtx op)
3802 /* Accept all non-symbolic constants. */
3803 if (!SYMBOLIC_CONST (op))
3804 return 1;
3806 /* Reject everything else; must be handled
3807 via emit_symbolic_move. */
3808 return 0;
3811 /* Returns true if the constant value OP is a legitimate general operand.
3812 It is given that OP satisfies CONSTANT_P. */
3814 static bool
3815 s390_legitimate_constant_p (machine_mode mode, rtx op)
3817 if (TARGET_VX && VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
3819 if (GET_MODE_SIZE (mode) != 16)
3820 return 0;
3822 if (!satisfies_constraint_j00 (op)
3823 && !satisfies_constraint_jm1 (op)
3824 && !satisfies_constraint_jKK (op)
3825 && !satisfies_constraint_jxx (op)
3826 && !satisfies_constraint_jyy (op))
3827 return 0;
3830 /* Accept all non-symbolic constants. */
3831 if (!SYMBOLIC_CONST (op))
3832 return 1;
3834 /* Accept immediate LARL operands. */
3835 if (TARGET_CPU_ZARCH && larl_operand (op, mode))
3836 return 1;
3838 /* Thread-local symbols are never legal constants. This is
3839 so that emit_call knows that computing such addresses
3840 might require a function call. */
3841 if (TLS_SYMBOLIC_CONST (op))
3842 return 0;
3844 /* In the PIC case, symbolic constants must *not* be
3845 forced into the literal pool. We accept them here,
3846 so that they will be handled by emit_symbolic_move. */
3847 if (flag_pic)
3848 return 1;
3850 /* All remaining non-PIC symbolic constants are
3851 forced into the literal pool. */
3852 return 0;
3855 /* Determine if it's legal to put X into the constant pool. This
3856 is not possible if X contains the address of a symbol that is
3857 not constant (TLS) or not known at final link time (PIC). */
3859 static bool
3860 s390_cannot_force_const_mem (machine_mode mode, rtx x)
3862 switch (GET_CODE (x))
3864 case CONST_INT:
3865 case CONST_DOUBLE:
3866 case CONST_WIDE_INT:
3867 case CONST_VECTOR:
3868 /* Accept all non-symbolic constants. */
3869 return false;
3871 case LABEL_REF:
3872 /* Labels are OK iff we are non-PIC. */
3873 return flag_pic != 0;
3875 case SYMBOL_REF:
3876 /* 'Naked' TLS symbol references are never OK,
3877 non-TLS symbols are OK iff we are non-PIC. */
3878 if (tls_symbolic_operand (x))
3879 return true;
3880 else
3881 return flag_pic != 0;
3883 case CONST:
3884 return s390_cannot_force_const_mem (mode, XEXP (x, 0));
3885 case PLUS:
3886 case MINUS:
3887 return s390_cannot_force_const_mem (mode, XEXP (x, 0))
3888 || s390_cannot_force_const_mem (mode, XEXP (x, 1));
3890 case UNSPEC:
3891 switch (XINT (x, 1))
3893 /* Only lt-relative or GOT-relative UNSPECs are OK. */
3894 case UNSPEC_LTREL_OFFSET:
3895 case UNSPEC_GOT:
3896 case UNSPEC_GOTOFF:
3897 case UNSPEC_PLTOFF:
3898 case UNSPEC_TLSGD:
3899 case UNSPEC_TLSLDM:
3900 case UNSPEC_NTPOFF:
3901 case UNSPEC_DTPOFF:
3902 case UNSPEC_GOTNTPOFF:
3903 case UNSPEC_INDNTPOFF:
3904 return false;
3906 /* If the literal pool shares the code section, be put
3907 execute template placeholders into the pool as well. */
3908 case UNSPEC_INSN:
3909 return TARGET_CPU_ZARCH;
3911 default:
3912 return true;
3914 break;
3916 default:
3917 gcc_unreachable ();
3921 /* Returns true if the constant value OP is a legitimate general
3922 operand during and after reload. The difference to
3923 legitimate_constant_p is that this function will not accept
3924 a constant that would need to be forced to the literal pool
3925 before it can be used as operand.
3926 This function accepts all constants which can be loaded directly
3927 into a GPR. */
3929 bool
3930 legitimate_reload_constant_p (rtx op)
3932 /* Accept la(y) operands. */
3933 if (GET_CODE (op) == CONST_INT
3934 && DISP_IN_RANGE (INTVAL (op)))
3935 return true;
3937 /* Accept l(g)hi/l(g)fi operands. */
3938 if (GET_CODE (op) == CONST_INT
3939 && (CONST_OK_FOR_K (INTVAL (op)) || CONST_OK_FOR_Os (INTVAL (op))))
3940 return true;
3942 /* Accept lliXX operands. */
3943 if (TARGET_ZARCH
3944 && GET_CODE (op) == CONST_INT
3945 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
3946 && s390_single_part (op, word_mode, HImode, 0) >= 0)
3947 return true;
3949 if (TARGET_EXTIMM
3950 && GET_CODE (op) == CONST_INT
3951 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
3952 && s390_single_part (op, word_mode, SImode, 0) >= 0)
3953 return true;
3955 /* Accept larl operands. */
3956 if (TARGET_CPU_ZARCH
3957 && larl_operand (op, VOIDmode))
3958 return true;
3960 /* Accept floating-point zero operands that fit into a single GPR. */
3961 if (GET_CODE (op) == CONST_DOUBLE
3962 && s390_float_const_zero_p (op)
3963 && GET_MODE_SIZE (GET_MODE (op)) <= UNITS_PER_WORD)
3964 return true;
3966 /* Accept double-word operands that can be split. */
3967 if (GET_CODE (op) == CONST_WIDE_INT
3968 || (GET_CODE (op) == CONST_INT
3969 && trunc_int_for_mode (INTVAL (op), word_mode) != INTVAL (op)))
3971 machine_mode dword_mode = word_mode == SImode ? DImode : TImode;
3972 rtx hi = operand_subword (op, 0, 0, dword_mode);
3973 rtx lo = operand_subword (op, 1, 0, dword_mode);
3974 return legitimate_reload_constant_p (hi)
3975 && legitimate_reload_constant_p (lo);
3978 /* Everything else cannot be handled without reload. */
3979 return false;
3982 /* Returns true if the constant value OP is a legitimate fp operand
3983 during and after reload.
3984 This function accepts all constants which can be loaded directly
3985 into an FPR. */
3987 static bool
3988 legitimate_reload_fp_constant_p (rtx op)
3990 /* Accept floating-point zero operands if the load zero instruction
3991 can be used. Prior to z196 the load fp zero instruction caused a
3992 performance penalty if the result is used as BFP number. */
3993 if (TARGET_Z196
3994 && GET_CODE (op) == CONST_DOUBLE
3995 && s390_float_const_zero_p (op))
3996 return true;
3998 return false;
4001 /* Returns true if the constant value OP is a legitimate vector operand
4002 during and after reload.
4003 This function accepts all constants which can be loaded directly
4004 into an VR. */
4006 static bool
4007 legitimate_reload_vector_constant_p (rtx op)
4009 if (TARGET_VX && GET_MODE_SIZE (GET_MODE (op)) == 16
4010 && (satisfies_constraint_j00 (op)
4011 || satisfies_constraint_jm1 (op)
4012 || satisfies_constraint_jKK (op)
4013 || satisfies_constraint_jxx (op)
4014 || satisfies_constraint_jyy (op)))
4015 return true;
4017 return false;
4020 /* Given an rtx OP being reloaded into a reg required to be in class RCLASS,
4021 return the class of reg to actually use. */
4023 static reg_class_t
4024 s390_preferred_reload_class (rtx op, reg_class_t rclass)
4026 switch (GET_CODE (op))
4028 /* Constants we cannot reload into general registers
4029 must be forced into the literal pool. */
4030 case CONST_VECTOR:
4031 case CONST_DOUBLE:
4032 case CONST_INT:
4033 case CONST_WIDE_INT:
4034 if (reg_class_subset_p (GENERAL_REGS, rclass)
4035 && legitimate_reload_constant_p (op))
4036 return GENERAL_REGS;
4037 else if (reg_class_subset_p (ADDR_REGS, rclass)
4038 && legitimate_reload_constant_p (op))
4039 return ADDR_REGS;
4040 else if (reg_class_subset_p (FP_REGS, rclass)
4041 && legitimate_reload_fp_constant_p (op))
4042 return FP_REGS;
4043 else if (reg_class_subset_p (VEC_REGS, rclass)
4044 && legitimate_reload_vector_constant_p (op))
4045 return VEC_REGS;
4047 return NO_REGS;
4049 /* If a symbolic constant or a PLUS is reloaded,
4050 it is most likely being used as an address, so
4051 prefer ADDR_REGS. If 'class' is not a superset
4052 of ADDR_REGS, e.g. FP_REGS, reject this reload. */
4053 case CONST:
4054 /* Symrefs cannot be pushed into the literal pool with -fPIC
4055 so we *MUST NOT* return NO_REGS for these cases
4056 (s390_cannot_force_const_mem will return true).
4058 On the other hand we MUST return NO_REGS for symrefs with
4059 invalid addend which might have been pushed to the literal
4060 pool (no -fPIC). Usually we would expect them to be
4061 handled via secondary reload but this does not happen if
4062 they are used as literal pool slot replacement in reload
4063 inheritance (see emit_input_reload_insns). */
4064 if (TARGET_CPU_ZARCH
4065 && GET_CODE (XEXP (op, 0)) == PLUS
4066 && GET_CODE (XEXP (XEXP(op, 0), 0)) == SYMBOL_REF
4067 && GET_CODE (XEXP (XEXP(op, 0), 1)) == CONST_INT)
4069 if (flag_pic && reg_class_subset_p (ADDR_REGS, rclass))
4070 return ADDR_REGS;
4071 else
4072 return NO_REGS;
4074 /* fallthrough */
4075 case LABEL_REF:
4076 case SYMBOL_REF:
4077 if (!legitimate_reload_constant_p (op))
4078 return NO_REGS;
4079 /* fallthrough */
4080 case PLUS:
4081 /* load address will be used. */
4082 if (reg_class_subset_p (ADDR_REGS, rclass))
4083 return ADDR_REGS;
4084 else
4085 return NO_REGS;
4087 default:
4088 break;
4091 return rclass;
4094 /* Return true if ADDR is SYMBOL_REF + addend with addend being a
4095 multiple of ALIGNMENT and the SYMBOL_REF being naturally
4096 aligned. */
4098 bool
4099 s390_check_symref_alignment (rtx addr, HOST_WIDE_INT alignment)
4101 HOST_WIDE_INT addend;
4102 rtx symref;
4104 /* The "required alignment" might be 0 (e.g. for certain structs
4105 accessed via BLKmode). Early abort in this case, as well as when
4106 an alignment > 8 is required. */
4107 if (alignment < 2 || alignment > 8)
4108 return false;
4110 if (!s390_loadrelative_operand_p (addr, &symref, &addend))
4111 return false;
4113 if (addend & (alignment - 1))
4114 return false;
4116 if (GET_CODE (symref) == SYMBOL_REF)
4118 /* We have load-relative instructions for 2-byte, 4-byte, and
4119 8-byte alignment so allow only these. */
4120 switch (alignment)
4122 case 8: return !SYMBOL_FLAG_NOTALIGN8_P (symref);
4123 case 4: return !SYMBOL_FLAG_NOTALIGN4_P (symref);
4124 case 2: return !SYMBOL_FLAG_NOTALIGN2_P (symref);
4125 default: return false;
4129 if (GET_CODE (symref) == UNSPEC
4130 && alignment <= UNITS_PER_LONG)
4131 return true;
4133 return false;
4136 /* ADDR is moved into REG using larl. If ADDR isn't a valid larl
4137 operand SCRATCH is used to reload the even part of the address and
4138 adding one. */
4140 void
4141 s390_reload_larl_operand (rtx reg, rtx addr, rtx scratch)
4143 HOST_WIDE_INT addend;
4144 rtx symref;
4146 if (!s390_loadrelative_operand_p (addr, &symref, &addend))
4147 gcc_unreachable ();
4149 if (!(addend & 1))
4150 /* Easy case. The addend is even so larl will do fine. */
4151 emit_move_insn (reg, addr);
4152 else
4154 /* We can leave the scratch register untouched if the target
4155 register is a valid base register. */
4156 if (REGNO (reg) < FIRST_PSEUDO_REGISTER
4157 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS)
4158 scratch = reg;
4160 gcc_assert (REGNO (scratch) < FIRST_PSEUDO_REGISTER);
4161 gcc_assert (REGNO_REG_CLASS (REGNO (scratch)) == ADDR_REGS);
4163 if (addend != 1)
4164 emit_move_insn (scratch,
4165 gen_rtx_CONST (Pmode,
4166 gen_rtx_PLUS (Pmode, symref,
4167 GEN_INT (addend - 1))));
4168 else
4169 emit_move_insn (scratch, symref);
4171 /* Increment the address using la in order to avoid clobbering cc. */
4172 s390_load_address (reg, gen_rtx_PLUS (Pmode, scratch, const1_rtx));
4176 /* Generate what is necessary to move between REG and MEM using
4177 SCRATCH. The direction is given by TOMEM. */
4179 void
4180 s390_reload_symref_address (rtx reg, rtx mem, rtx scratch, bool tomem)
4182 /* Reload might have pulled a constant out of the literal pool.
4183 Force it back in. */
4184 if (CONST_INT_P (mem) || GET_CODE (mem) == CONST_DOUBLE
4185 || GET_CODE (mem) == CONST_WIDE_INT
4186 || GET_CODE (mem) == CONST_VECTOR
4187 || GET_CODE (mem) == CONST)
4188 mem = force_const_mem (GET_MODE (reg), mem);
4190 gcc_assert (MEM_P (mem));
4192 /* For a load from memory we can leave the scratch register
4193 untouched if the target register is a valid base register. */
4194 if (!tomem
4195 && REGNO (reg) < FIRST_PSEUDO_REGISTER
4196 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS
4197 && GET_MODE (reg) == GET_MODE (scratch))
4198 scratch = reg;
4200 /* Load address into scratch register. Since we can't have a
4201 secondary reload for a secondary reload we have to cover the case
4202 where larl would need a secondary reload here as well. */
4203 s390_reload_larl_operand (scratch, XEXP (mem, 0), scratch);
4205 /* Now we can use a standard load/store to do the move. */
4206 if (tomem)
4207 emit_move_insn (replace_equiv_address (mem, scratch), reg);
4208 else
4209 emit_move_insn (reg, replace_equiv_address (mem, scratch));
4212 /* Inform reload about cases where moving X with a mode MODE to a register in
4213 RCLASS requires an extra scratch or immediate register. Return the class
4214 needed for the immediate register. */
4216 static reg_class_t
4217 s390_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
4218 machine_mode mode, secondary_reload_info *sri)
4220 enum reg_class rclass = (enum reg_class) rclass_i;
4222 /* Intermediate register needed. */
4223 if (reg_classes_intersect_p (CC_REGS, rclass))
4224 return GENERAL_REGS;
4226 if (TARGET_VX)
4228 /* The vst/vl vector move instructions allow only for short
4229 displacements. */
4230 if (MEM_P (x)
4231 && GET_CODE (XEXP (x, 0)) == PLUS
4232 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4233 && !SHORT_DISP_IN_RANGE(INTVAL (XEXP (XEXP (x, 0), 1)))
4234 && reg_class_subset_p (rclass, VEC_REGS)
4235 && (!reg_class_subset_p (rclass, FP_REGS)
4236 || (GET_MODE_SIZE (mode) > 8
4237 && s390_class_max_nregs (FP_REGS, mode) == 1)))
4239 if (in_p)
4240 sri->icode = (TARGET_64BIT ?
4241 CODE_FOR_reloaddi_la_in :
4242 CODE_FOR_reloadsi_la_in);
4243 else
4244 sri->icode = (TARGET_64BIT ?
4245 CODE_FOR_reloaddi_la_out :
4246 CODE_FOR_reloadsi_la_out);
4250 if (TARGET_Z10)
4252 HOST_WIDE_INT offset;
4253 rtx symref;
4255 /* On z10 several optimizer steps may generate larl operands with
4256 an odd addend. */
4257 if (in_p
4258 && s390_loadrelative_operand_p (x, &symref, &offset)
4259 && mode == Pmode
4260 && !SYMBOL_FLAG_NOTALIGN2_P (symref)
4261 && (offset & 1) == 1)
4262 sri->icode = ((mode == DImode) ? CODE_FOR_reloaddi_larl_odd_addend_z10
4263 : CODE_FOR_reloadsi_larl_odd_addend_z10);
4265 /* Handle all the (mem (symref)) accesses we cannot use the z10
4266 instructions for. */
4267 if (MEM_P (x)
4268 && s390_loadrelative_operand_p (XEXP (x, 0), NULL, NULL)
4269 && (mode == QImode
4270 || !reg_class_subset_p (rclass, GENERAL_REGS)
4271 || GET_MODE_SIZE (mode) > UNITS_PER_WORD
4272 || !s390_check_symref_alignment (XEXP (x, 0),
4273 GET_MODE_SIZE (mode))))
4275 #define __SECONDARY_RELOAD_CASE(M,m) \
4276 case M##mode: \
4277 if (TARGET_64BIT) \
4278 sri->icode = in_p ? CODE_FOR_reload##m##di_toreg_z10 : \
4279 CODE_FOR_reload##m##di_tomem_z10; \
4280 else \
4281 sri->icode = in_p ? CODE_FOR_reload##m##si_toreg_z10 : \
4282 CODE_FOR_reload##m##si_tomem_z10; \
4283 break;
4285 switch (GET_MODE (x))
4287 __SECONDARY_RELOAD_CASE (QI, qi);
4288 __SECONDARY_RELOAD_CASE (HI, hi);
4289 __SECONDARY_RELOAD_CASE (SI, si);
4290 __SECONDARY_RELOAD_CASE (DI, di);
4291 __SECONDARY_RELOAD_CASE (TI, ti);
4292 __SECONDARY_RELOAD_CASE (SF, sf);
4293 __SECONDARY_RELOAD_CASE (DF, df);
4294 __SECONDARY_RELOAD_CASE (TF, tf);
4295 __SECONDARY_RELOAD_CASE (SD, sd);
4296 __SECONDARY_RELOAD_CASE (DD, dd);
4297 __SECONDARY_RELOAD_CASE (TD, td);
4298 __SECONDARY_RELOAD_CASE (V1QI, v1qi);
4299 __SECONDARY_RELOAD_CASE (V2QI, v2qi);
4300 __SECONDARY_RELOAD_CASE (V4QI, v4qi);
4301 __SECONDARY_RELOAD_CASE (V8QI, v8qi);
4302 __SECONDARY_RELOAD_CASE (V16QI, v16qi);
4303 __SECONDARY_RELOAD_CASE (V1HI, v1hi);
4304 __SECONDARY_RELOAD_CASE (V2HI, v2hi);
4305 __SECONDARY_RELOAD_CASE (V4HI, v4hi);
4306 __SECONDARY_RELOAD_CASE (V8HI, v8hi);
4307 __SECONDARY_RELOAD_CASE (V1SI, v1si);
4308 __SECONDARY_RELOAD_CASE (V2SI, v2si);
4309 __SECONDARY_RELOAD_CASE (V4SI, v4si);
4310 __SECONDARY_RELOAD_CASE (V1DI, v1di);
4311 __SECONDARY_RELOAD_CASE (V2DI, v2di);
4312 __SECONDARY_RELOAD_CASE (V1TI, v1ti);
4313 __SECONDARY_RELOAD_CASE (V1SF, v1sf);
4314 __SECONDARY_RELOAD_CASE (V2SF, v2sf);
4315 __SECONDARY_RELOAD_CASE (V4SF, v4sf);
4316 __SECONDARY_RELOAD_CASE (V1DF, v1df);
4317 __SECONDARY_RELOAD_CASE (V2DF, v2df);
4318 __SECONDARY_RELOAD_CASE (V1TF, v1tf);
4319 default:
4320 gcc_unreachable ();
4322 #undef __SECONDARY_RELOAD_CASE
4326 /* We need a scratch register when loading a PLUS expression which
4327 is not a legitimate operand of the LOAD ADDRESS instruction. */
4328 /* LRA can deal with transformation of plus op very well -- so we
4329 don't need to prompt LRA in this case. */
4330 if (! lra_in_progress && in_p && s390_plus_operand (x, mode))
4331 sri->icode = (TARGET_64BIT ?
4332 CODE_FOR_reloaddi_plus : CODE_FOR_reloadsi_plus);
4334 /* Performing a multiword move from or to memory we have to make sure the
4335 second chunk in memory is addressable without causing a displacement
4336 overflow. If that would be the case we calculate the address in
4337 a scratch register. */
4338 if (MEM_P (x)
4339 && GET_CODE (XEXP (x, 0)) == PLUS
4340 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4341 && !DISP_IN_RANGE (INTVAL (XEXP (XEXP (x, 0), 1))
4342 + GET_MODE_SIZE (mode) - 1))
4344 /* For GENERAL_REGS a displacement overflow is no problem if occurring
4345 in a s_operand address since we may fallback to lm/stm. So we only
4346 have to care about overflows in the b+i+d case. */
4347 if ((reg_classes_intersect_p (GENERAL_REGS, rclass)
4348 && s390_class_max_nregs (GENERAL_REGS, mode) > 1
4349 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
4350 /* For FP_REGS no lm/stm is available so this check is triggered
4351 for displacement overflows in b+i+d and b+d like addresses. */
4352 || (reg_classes_intersect_p (FP_REGS, rclass)
4353 && s390_class_max_nregs (FP_REGS, mode) > 1))
4355 if (in_p)
4356 sri->icode = (TARGET_64BIT ?
4357 CODE_FOR_reloaddi_la_in :
4358 CODE_FOR_reloadsi_la_in);
4359 else
4360 sri->icode = (TARGET_64BIT ?
4361 CODE_FOR_reloaddi_la_out :
4362 CODE_FOR_reloadsi_la_out);
4366 /* A scratch address register is needed when a symbolic constant is
4367 copied to r0 compiling with -fPIC. In other cases the target
4368 register might be used as temporary (see legitimize_pic_address). */
4369 if (in_p && SYMBOLIC_CONST (x) && flag_pic == 2 && rclass != ADDR_REGS)
4370 sri->icode = (TARGET_64BIT ?
4371 CODE_FOR_reloaddi_PIC_addr :
4372 CODE_FOR_reloadsi_PIC_addr);
4374 /* Either scratch or no register needed. */
4375 return NO_REGS;
4378 /* Generate code to load SRC, which is PLUS that is not a
4379 legitimate operand for the LA instruction, into TARGET.
4380 SCRATCH may be used as scratch register. */
4382 void
4383 s390_expand_plus_operand (rtx target, rtx src,
4384 rtx scratch)
4386 rtx sum1, sum2;
4387 struct s390_address ad;
4389 /* src must be a PLUS; get its two operands. */
4390 gcc_assert (GET_CODE (src) == PLUS);
4391 gcc_assert (GET_MODE (src) == Pmode);
4393 /* Check if any of the two operands is already scheduled
4394 for replacement by reload. This can happen e.g. when
4395 float registers occur in an address. */
4396 sum1 = find_replacement (&XEXP (src, 0));
4397 sum2 = find_replacement (&XEXP (src, 1));
4398 src = gen_rtx_PLUS (Pmode, sum1, sum2);
4400 /* If the address is already strictly valid, there's nothing to do. */
4401 if (!s390_decompose_address (src, &ad)
4402 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
4403 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
4405 /* Otherwise, one of the operands cannot be an address register;
4406 we reload its value into the scratch register. */
4407 if (true_regnum (sum1) < 1 || true_regnum (sum1) > 15)
4409 emit_move_insn (scratch, sum1);
4410 sum1 = scratch;
4412 if (true_regnum (sum2) < 1 || true_regnum (sum2) > 15)
4414 emit_move_insn (scratch, sum2);
4415 sum2 = scratch;
4418 /* According to the way these invalid addresses are generated
4419 in reload.c, it should never happen (at least on s390) that
4420 *neither* of the PLUS components, after find_replacements
4421 was applied, is an address register. */
4422 if (sum1 == scratch && sum2 == scratch)
4424 debug_rtx (src);
4425 gcc_unreachable ();
4428 src = gen_rtx_PLUS (Pmode, sum1, sum2);
4431 /* Emit the LOAD ADDRESS pattern. Note that reload of PLUS
4432 is only ever performed on addresses, so we can mark the
4433 sum as legitimate for LA in any case. */
4434 s390_load_address (target, src);
4438 /* Return true if ADDR is a valid memory address.
4439 STRICT specifies whether strict register checking applies. */
4441 static bool
4442 s390_legitimate_address_p (machine_mode mode, rtx addr, bool strict)
4444 struct s390_address ad;
4446 if (TARGET_Z10
4447 && larl_operand (addr, VOIDmode)
4448 && (mode == VOIDmode
4449 || s390_check_symref_alignment (addr, GET_MODE_SIZE (mode))))
4450 return true;
4452 if (!s390_decompose_address (addr, &ad))
4453 return false;
4455 if (strict)
4457 if (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
4458 return false;
4460 if (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx)))
4461 return false;
4463 else
4465 if (ad.base
4466 && !(REGNO (ad.base) >= FIRST_PSEUDO_REGISTER
4467 || REGNO_REG_CLASS (REGNO (ad.base)) == ADDR_REGS))
4468 return false;
4470 if (ad.indx
4471 && !(REGNO (ad.indx) >= FIRST_PSEUDO_REGISTER
4472 || REGNO_REG_CLASS (REGNO (ad.indx)) == ADDR_REGS))
4473 return false;
4475 return true;
4478 /* Return true if OP is a valid operand for the LA instruction.
4479 In 31-bit, we need to prove that the result is used as an
4480 address, as LA performs only a 31-bit addition. */
4482 bool
4483 legitimate_la_operand_p (rtx op)
4485 struct s390_address addr;
4486 if (!s390_decompose_address (op, &addr))
4487 return false;
4489 return (TARGET_64BIT || addr.pointer);
4492 /* Return true if it is valid *and* preferable to use LA to
4493 compute the sum of OP1 and OP2. */
4495 bool
4496 preferred_la_operand_p (rtx op1, rtx op2)
4498 struct s390_address addr;
4500 if (op2 != const0_rtx)
4501 op1 = gen_rtx_PLUS (Pmode, op1, op2);
4503 if (!s390_decompose_address (op1, &addr))
4504 return false;
4505 if (addr.base && !REGNO_OK_FOR_BASE_P (REGNO (addr.base)))
4506 return false;
4507 if (addr.indx && !REGNO_OK_FOR_INDEX_P (REGNO (addr.indx)))
4508 return false;
4510 /* Avoid LA instructions with index register on z196; it is
4511 preferable to use regular add instructions when possible.
4512 Starting with zEC12 the la with index register is "uncracked"
4513 again. */
4514 if (addr.indx && s390_tune == PROCESSOR_2817_Z196)
4515 return false;
4517 if (!TARGET_64BIT && !addr.pointer)
4518 return false;
4520 if (addr.pointer)
4521 return true;
4523 if ((addr.base && REG_P (addr.base) && REG_POINTER (addr.base))
4524 || (addr.indx && REG_P (addr.indx) && REG_POINTER (addr.indx)))
4525 return true;
4527 return false;
4530 /* Emit a forced load-address operation to load SRC into DST.
4531 This will use the LOAD ADDRESS instruction even in situations
4532 where legitimate_la_operand_p (SRC) returns false. */
4534 void
4535 s390_load_address (rtx dst, rtx src)
4537 if (TARGET_64BIT)
4538 emit_move_insn (dst, src);
4539 else
4540 emit_insn (gen_force_la_31 (dst, src));
4543 /* Return a legitimate reference for ORIG (an address) using the
4544 register REG. If REG is 0, a new pseudo is generated.
4546 There are two types of references that must be handled:
4548 1. Global data references must load the address from the GOT, via
4549 the PIC reg. An insn is emitted to do this load, and the reg is
4550 returned.
4552 2. Static data references, constant pool addresses, and code labels
4553 compute the address as an offset from the GOT, whose base is in
4554 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
4555 differentiate them from global data objects. The returned
4556 address is the PIC reg + an unspec constant.
4558 TARGET_LEGITIMIZE_ADDRESS_P rejects symbolic references unless the PIC
4559 reg also appears in the address. */
4562 legitimize_pic_address (rtx orig, rtx reg)
4564 rtx addr = orig;
4565 rtx addend = const0_rtx;
4566 rtx new_rtx = orig;
4568 gcc_assert (!TLS_SYMBOLIC_CONST (addr));
4570 if (GET_CODE (addr) == CONST)
4571 addr = XEXP (addr, 0);
4573 if (GET_CODE (addr) == PLUS)
4575 addend = XEXP (addr, 1);
4576 addr = XEXP (addr, 0);
4579 if ((GET_CODE (addr) == LABEL_REF
4580 || (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (addr))
4581 || (GET_CODE (addr) == UNSPEC &&
4582 (XINT (addr, 1) == UNSPEC_GOTENT
4583 || (TARGET_CPU_ZARCH && XINT (addr, 1) == UNSPEC_PLT))))
4584 && GET_CODE (addend) == CONST_INT)
4586 /* This can be locally addressed. */
4588 /* larl_operand requires UNSPECs to be wrapped in a const rtx. */
4589 rtx const_addr = (GET_CODE (addr) == UNSPEC ?
4590 gen_rtx_CONST (Pmode, addr) : addr);
4592 if (TARGET_CPU_ZARCH
4593 && larl_operand (const_addr, VOIDmode)
4594 && INTVAL (addend) < HOST_WIDE_INT_1 << 31
4595 && INTVAL (addend) >= -(HOST_WIDE_INT_1 << 31))
4597 if (INTVAL (addend) & 1)
4599 /* LARL can't handle odd offsets, so emit a pair of LARL
4600 and LA. */
4601 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4603 if (!DISP_IN_RANGE (INTVAL (addend)))
4605 HOST_WIDE_INT even = INTVAL (addend) - 1;
4606 addr = gen_rtx_PLUS (Pmode, addr, GEN_INT (even));
4607 addr = gen_rtx_CONST (Pmode, addr);
4608 addend = const1_rtx;
4611 emit_move_insn (temp, addr);
4612 new_rtx = gen_rtx_PLUS (Pmode, temp, addend);
4614 if (reg != 0)
4616 s390_load_address (reg, new_rtx);
4617 new_rtx = reg;
4620 else
4622 /* If the offset is even, we can just use LARL. This
4623 will happen automatically. */
4626 else
4628 /* No larl - Access local symbols relative to the GOT. */
4630 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4632 if (reload_in_progress || reload_completed)
4633 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4635 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
4636 if (addend != const0_rtx)
4637 addr = gen_rtx_PLUS (Pmode, addr, addend);
4638 addr = gen_rtx_CONST (Pmode, addr);
4639 addr = force_const_mem (Pmode, addr);
4640 emit_move_insn (temp, addr);
4642 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4643 if (reg != 0)
4645 s390_load_address (reg, new_rtx);
4646 new_rtx = reg;
4650 else if (GET_CODE (addr) == SYMBOL_REF && addend == const0_rtx)
4652 /* A non-local symbol reference without addend.
4654 The symbol ref is wrapped into an UNSPEC to make sure the
4655 proper operand modifier (@GOT or @GOTENT) will be emitted.
4656 This will tell the linker to put the symbol into the GOT.
4658 Additionally the code dereferencing the GOT slot is emitted here.
4660 An addend to the symref needs to be added afterwards.
4661 legitimize_pic_address calls itself recursively to handle
4662 that case. So no need to do it here. */
4664 if (reg == 0)
4665 reg = gen_reg_rtx (Pmode);
4667 if (TARGET_Z10)
4669 /* Use load relative if possible.
4670 lgrl <target>, sym@GOTENT */
4671 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
4672 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4673 new_rtx = gen_const_mem (GET_MODE (reg), new_rtx);
4675 emit_move_insn (reg, new_rtx);
4676 new_rtx = reg;
4678 else if (flag_pic == 1)
4680 /* Assume GOT offset is a valid displacement operand (< 4k
4681 or < 512k with z990). This is handled the same way in
4682 both 31- and 64-bit code (@GOT).
4683 lg <target>, sym@GOT(r12) */
4685 if (reload_in_progress || reload_completed)
4686 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4688 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
4689 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4690 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
4691 new_rtx = gen_const_mem (Pmode, new_rtx);
4692 emit_move_insn (reg, new_rtx);
4693 new_rtx = reg;
4695 else if (TARGET_CPU_ZARCH)
4697 /* If the GOT offset might be >= 4k, we determine the position
4698 of the GOT entry via a PC-relative LARL (@GOTENT).
4699 larl temp, sym@GOTENT
4700 lg <target>, 0(temp) */
4702 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
4704 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
4705 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
4707 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
4708 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4709 emit_move_insn (temp, new_rtx);
4711 new_rtx = gen_const_mem (Pmode, temp);
4712 emit_move_insn (reg, new_rtx);
4714 new_rtx = reg;
4716 else
4718 /* If the GOT offset might be >= 4k, we have to load it
4719 from the literal pool (@GOT).
4721 lg temp, lit-litbase(r13)
4722 lg <target>, 0(temp)
4723 lit: .long sym@GOT */
4725 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
4727 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
4728 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
4730 if (reload_in_progress || reload_completed)
4731 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4733 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
4734 addr = gen_rtx_CONST (Pmode, addr);
4735 addr = force_const_mem (Pmode, addr);
4736 emit_move_insn (temp, addr);
4738 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4739 new_rtx = gen_const_mem (Pmode, new_rtx);
4740 emit_move_insn (reg, new_rtx);
4741 new_rtx = reg;
4744 else if (GET_CODE (addr) == UNSPEC && GET_CODE (addend) == CONST_INT)
4746 gcc_assert (XVECLEN (addr, 0) == 1);
4747 switch (XINT (addr, 1))
4749 /* These address symbols (or PLT slots) relative to the GOT
4750 (not GOT slots!). In general this will exceed the
4751 displacement range so these value belong into the literal
4752 pool. */
4753 case UNSPEC_GOTOFF:
4754 case UNSPEC_PLTOFF:
4755 new_rtx = force_const_mem (Pmode, orig);
4756 break;
4758 /* For -fPIC the GOT size might exceed the displacement
4759 range so make sure the value is in the literal pool. */
4760 case UNSPEC_GOT:
4761 if (flag_pic == 2)
4762 new_rtx = force_const_mem (Pmode, orig);
4763 break;
4765 /* For @GOTENT larl is used. This is handled like local
4766 symbol refs. */
4767 case UNSPEC_GOTENT:
4768 gcc_unreachable ();
4769 break;
4771 /* @PLT is OK as is on 64-bit, must be converted to
4772 GOT-relative @PLTOFF on 31-bit. */
4773 case UNSPEC_PLT:
4774 if (!TARGET_CPU_ZARCH)
4776 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4778 if (reload_in_progress || reload_completed)
4779 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4781 addr = XVECEXP (addr, 0, 0);
4782 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr),
4783 UNSPEC_PLTOFF);
4784 if (addend != const0_rtx)
4785 addr = gen_rtx_PLUS (Pmode, addr, addend);
4786 addr = gen_rtx_CONST (Pmode, addr);
4787 addr = force_const_mem (Pmode, addr);
4788 emit_move_insn (temp, addr);
4790 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4791 if (reg != 0)
4793 s390_load_address (reg, new_rtx);
4794 new_rtx = reg;
4797 else
4798 /* On 64 bit larl can be used. This case is handled like
4799 local symbol refs. */
4800 gcc_unreachable ();
4801 break;
4803 /* Everything else cannot happen. */
4804 default:
4805 gcc_unreachable ();
4808 else if (addend != const0_rtx)
4810 /* Otherwise, compute the sum. */
4812 rtx base = legitimize_pic_address (addr, reg);
4813 new_rtx = legitimize_pic_address (addend,
4814 base == reg ? NULL_RTX : reg);
4815 if (GET_CODE (new_rtx) == CONST_INT)
4816 new_rtx = plus_constant (Pmode, base, INTVAL (new_rtx));
4817 else
4819 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
4821 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
4822 new_rtx = XEXP (new_rtx, 1);
4824 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
4827 if (GET_CODE (new_rtx) == CONST)
4828 new_rtx = XEXP (new_rtx, 0);
4829 new_rtx = force_operand (new_rtx, 0);
4832 return new_rtx;
4835 /* Load the thread pointer into a register. */
4838 s390_get_thread_pointer (void)
4840 rtx tp = gen_reg_rtx (Pmode);
4842 emit_move_insn (tp, gen_rtx_REG (Pmode, TP_REGNUM));
4843 mark_reg_pointer (tp, BITS_PER_WORD);
4845 return tp;
4848 /* Emit a tls call insn. The call target is the SYMBOL_REF stored
4849 in s390_tls_symbol which always refers to __tls_get_offset.
4850 The returned offset is written to RESULT_REG and an USE rtx is
4851 generated for TLS_CALL. */
4853 static GTY(()) rtx s390_tls_symbol;
4855 static void
4856 s390_emit_tls_call_insn (rtx result_reg, rtx tls_call)
4858 rtx insn;
4860 if (!flag_pic)
4861 emit_insn (s390_load_got ());
4863 if (!s390_tls_symbol)
4864 s390_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_offset");
4866 insn = s390_emit_call (s390_tls_symbol, tls_call, result_reg,
4867 gen_rtx_REG (Pmode, RETURN_REGNUM));
4869 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), result_reg);
4870 RTL_CONST_CALL_P (insn) = 1;
4873 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
4874 this (thread-local) address. REG may be used as temporary. */
4876 static rtx
4877 legitimize_tls_address (rtx addr, rtx reg)
4879 rtx new_rtx, tls_call, temp, base, r2;
4880 rtx_insn *insn;
4882 if (GET_CODE (addr) == SYMBOL_REF)
4883 switch (tls_symbolic_operand (addr))
4885 case TLS_MODEL_GLOBAL_DYNAMIC:
4886 start_sequence ();
4887 r2 = gen_rtx_REG (Pmode, 2);
4888 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_TLSGD);
4889 new_rtx = gen_rtx_CONST (Pmode, tls_call);
4890 new_rtx = force_const_mem (Pmode, new_rtx);
4891 emit_move_insn (r2, new_rtx);
4892 s390_emit_tls_call_insn (r2, tls_call);
4893 insn = get_insns ();
4894 end_sequence ();
4896 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
4897 temp = gen_reg_rtx (Pmode);
4898 emit_libcall_block (insn, temp, r2, new_rtx);
4900 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
4901 if (reg != 0)
4903 s390_load_address (reg, new_rtx);
4904 new_rtx = reg;
4906 break;
4908 case TLS_MODEL_LOCAL_DYNAMIC:
4909 start_sequence ();
4910 r2 = gen_rtx_REG (Pmode, 2);
4911 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM);
4912 new_rtx = gen_rtx_CONST (Pmode, tls_call);
4913 new_rtx = force_const_mem (Pmode, new_rtx);
4914 emit_move_insn (r2, new_rtx);
4915 s390_emit_tls_call_insn (r2, tls_call);
4916 insn = get_insns ();
4917 end_sequence ();
4919 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM_NTPOFF);
4920 temp = gen_reg_rtx (Pmode);
4921 emit_libcall_block (insn, temp, r2, new_rtx);
4923 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
4924 base = gen_reg_rtx (Pmode);
4925 s390_load_address (base, new_rtx);
4927 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_DTPOFF);
4928 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4929 new_rtx = force_const_mem (Pmode, new_rtx);
4930 temp = gen_reg_rtx (Pmode);
4931 emit_move_insn (temp, new_rtx);
4933 new_rtx = gen_rtx_PLUS (Pmode, base, temp);
4934 if (reg != 0)
4936 s390_load_address (reg, new_rtx);
4937 new_rtx = reg;
4939 break;
4941 case TLS_MODEL_INITIAL_EXEC:
4942 if (flag_pic == 1)
4944 /* Assume GOT offset < 4k. This is handled the same way
4945 in both 31- and 64-bit code. */
4947 if (reload_in_progress || reload_completed)
4948 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4950 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
4951 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4952 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
4953 new_rtx = gen_const_mem (Pmode, new_rtx);
4954 temp = gen_reg_rtx (Pmode);
4955 emit_move_insn (temp, new_rtx);
4957 else if (TARGET_CPU_ZARCH)
4959 /* If the GOT offset might be >= 4k, we determine the position
4960 of the GOT entry via a PC-relative LARL. */
4962 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
4963 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4964 temp = gen_reg_rtx (Pmode);
4965 emit_move_insn (temp, new_rtx);
4967 new_rtx = gen_const_mem (Pmode, temp);
4968 temp = gen_reg_rtx (Pmode);
4969 emit_move_insn (temp, new_rtx);
4971 else if (flag_pic)
4973 /* If the GOT offset might be >= 4k, we have to load it
4974 from the literal pool. */
4976 if (reload_in_progress || reload_completed)
4977 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4979 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
4980 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4981 new_rtx = force_const_mem (Pmode, new_rtx);
4982 temp = gen_reg_rtx (Pmode);
4983 emit_move_insn (temp, new_rtx);
4985 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4986 new_rtx = gen_const_mem (Pmode, new_rtx);
4988 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
4989 temp = gen_reg_rtx (Pmode);
4990 emit_insn (gen_rtx_SET (temp, new_rtx));
4992 else
4994 /* In position-dependent code, load the absolute address of
4995 the GOT entry from the literal pool. */
4997 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
4998 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4999 new_rtx = force_const_mem (Pmode, new_rtx);
5000 temp = gen_reg_rtx (Pmode);
5001 emit_move_insn (temp, new_rtx);
5003 new_rtx = temp;
5004 new_rtx = gen_const_mem (Pmode, new_rtx);
5005 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
5006 temp = gen_reg_rtx (Pmode);
5007 emit_insn (gen_rtx_SET (temp, new_rtx));
5010 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
5011 if (reg != 0)
5013 s390_load_address (reg, new_rtx);
5014 new_rtx = reg;
5016 break;
5018 case TLS_MODEL_LOCAL_EXEC:
5019 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
5020 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
5021 new_rtx = force_const_mem (Pmode, new_rtx);
5022 temp = gen_reg_rtx (Pmode);
5023 emit_move_insn (temp, new_rtx);
5025 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
5026 if (reg != 0)
5028 s390_load_address (reg, new_rtx);
5029 new_rtx = reg;
5031 break;
5033 default:
5034 gcc_unreachable ();
5037 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == UNSPEC)
5039 switch (XINT (XEXP (addr, 0), 1))
5041 case UNSPEC_INDNTPOFF:
5042 gcc_assert (TARGET_CPU_ZARCH);
5043 new_rtx = addr;
5044 break;
5046 default:
5047 gcc_unreachable ();
5051 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
5052 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
5054 new_rtx = XEXP (XEXP (addr, 0), 0);
5055 if (GET_CODE (new_rtx) != SYMBOL_REF)
5056 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
5058 new_rtx = legitimize_tls_address (new_rtx, reg);
5059 new_rtx = plus_constant (Pmode, new_rtx,
5060 INTVAL (XEXP (XEXP (addr, 0), 1)));
5061 new_rtx = force_operand (new_rtx, 0);
5064 else
5065 gcc_unreachable (); /* for now ... */
5067 return new_rtx;
5070 /* Emit insns making the address in operands[1] valid for a standard
5071 move to operands[0]. operands[1] is replaced by an address which
5072 should be used instead of the former RTX to emit the move
5073 pattern. */
5075 void
5076 emit_symbolic_move (rtx *operands)
5078 rtx temp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (Pmode);
5080 if (GET_CODE (operands[0]) == MEM)
5081 operands[1] = force_reg (Pmode, operands[1]);
5082 else if (TLS_SYMBOLIC_CONST (operands[1]))
5083 operands[1] = legitimize_tls_address (operands[1], temp);
5084 else if (flag_pic)
5085 operands[1] = legitimize_pic_address (operands[1], temp);
5088 /* Try machine-dependent ways of modifying an illegitimate address X
5089 to be legitimate. If we find one, return the new, valid address.
5091 OLDX is the address as it was before break_out_memory_refs was called.
5092 In some cases it is useful to look at this to decide what needs to be done.
5094 MODE is the mode of the operand pointed to by X.
5096 When -fpic is used, special handling is needed for symbolic references.
5097 See comments by legitimize_pic_address for details. */
5099 static rtx
5100 s390_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
5101 machine_mode mode ATTRIBUTE_UNUSED)
5103 rtx constant_term = const0_rtx;
5105 if (TLS_SYMBOLIC_CONST (x))
5107 x = legitimize_tls_address (x, 0);
5109 if (s390_legitimate_address_p (mode, x, FALSE))
5110 return x;
5112 else if (GET_CODE (x) == PLUS
5113 && (TLS_SYMBOLIC_CONST (XEXP (x, 0))
5114 || TLS_SYMBOLIC_CONST (XEXP (x, 1))))
5116 return x;
5118 else if (flag_pic)
5120 if (SYMBOLIC_CONST (x)
5121 || (GET_CODE (x) == PLUS
5122 && (SYMBOLIC_CONST (XEXP (x, 0))
5123 || SYMBOLIC_CONST (XEXP (x, 1)))))
5124 x = legitimize_pic_address (x, 0);
5126 if (s390_legitimate_address_p (mode, x, FALSE))
5127 return x;
5130 x = eliminate_constant_term (x, &constant_term);
5132 /* Optimize loading of large displacements by splitting them
5133 into the multiple of 4K and the rest; this allows the
5134 former to be CSE'd if possible.
5136 Don't do this if the displacement is added to a register
5137 pointing into the stack frame, as the offsets will
5138 change later anyway. */
5140 if (GET_CODE (constant_term) == CONST_INT
5141 && !TARGET_LONG_DISPLACEMENT
5142 && !DISP_IN_RANGE (INTVAL (constant_term))
5143 && !(REG_P (x) && REGNO_PTR_FRAME_P (REGNO (x))))
5145 HOST_WIDE_INT lower = INTVAL (constant_term) & 0xfff;
5146 HOST_WIDE_INT upper = INTVAL (constant_term) ^ lower;
5148 rtx temp = gen_reg_rtx (Pmode);
5149 rtx val = force_operand (GEN_INT (upper), temp);
5150 if (val != temp)
5151 emit_move_insn (temp, val);
5153 x = gen_rtx_PLUS (Pmode, x, temp);
5154 constant_term = GEN_INT (lower);
5157 if (GET_CODE (x) == PLUS)
5159 if (GET_CODE (XEXP (x, 0)) == REG)
5161 rtx temp = gen_reg_rtx (Pmode);
5162 rtx val = force_operand (XEXP (x, 1), temp);
5163 if (val != temp)
5164 emit_move_insn (temp, val);
5166 x = gen_rtx_PLUS (Pmode, XEXP (x, 0), temp);
5169 else if (GET_CODE (XEXP (x, 1)) == REG)
5171 rtx temp = gen_reg_rtx (Pmode);
5172 rtx val = force_operand (XEXP (x, 0), temp);
5173 if (val != temp)
5174 emit_move_insn (temp, val);
5176 x = gen_rtx_PLUS (Pmode, temp, XEXP (x, 1));
5180 if (constant_term != const0_rtx)
5181 x = gen_rtx_PLUS (Pmode, x, constant_term);
5183 return x;
5186 /* Try a machine-dependent way of reloading an illegitimate address AD
5187 operand. If we find one, push the reload and return the new address.
5189 MODE is the mode of the enclosing MEM. OPNUM is the operand number
5190 and TYPE is the reload type of the current reload. */
5193 legitimize_reload_address (rtx ad, machine_mode mode ATTRIBUTE_UNUSED,
5194 int opnum, int type)
5196 if (!optimize || TARGET_LONG_DISPLACEMENT)
5197 return NULL_RTX;
5199 if (GET_CODE (ad) == PLUS)
5201 rtx tem = simplify_binary_operation (PLUS, Pmode,
5202 XEXP (ad, 0), XEXP (ad, 1));
5203 if (tem)
5204 ad = tem;
5207 if (GET_CODE (ad) == PLUS
5208 && GET_CODE (XEXP (ad, 0)) == REG
5209 && GET_CODE (XEXP (ad, 1)) == CONST_INT
5210 && !DISP_IN_RANGE (INTVAL (XEXP (ad, 1))))
5212 HOST_WIDE_INT lower = INTVAL (XEXP (ad, 1)) & 0xfff;
5213 HOST_WIDE_INT upper = INTVAL (XEXP (ad, 1)) ^ lower;
5214 rtx cst, tem, new_rtx;
5216 cst = GEN_INT (upper);
5217 if (!legitimate_reload_constant_p (cst))
5218 cst = force_const_mem (Pmode, cst);
5220 tem = gen_rtx_PLUS (Pmode, XEXP (ad, 0), cst);
5221 new_rtx = gen_rtx_PLUS (Pmode, tem, GEN_INT (lower));
5223 push_reload (XEXP (tem, 1), 0, &XEXP (tem, 1), 0,
5224 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
5225 opnum, (enum reload_type) type);
5226 return new_rtx;
5229 return NULL_RTX;
5232 /* Emit code to move LEN bytes from DST to SRC. */
5234 bool
5235 s390_expand_movmem (rtx dst, rtx src, rtx len)
5237 /* When tuning for z10 or higher we rely on the Glibc functions to
5238 do the right thing. Only for constant lengths below 64k we will
5239 generate inline code. */
5240 if (s390_tune >= PROCESSOR_2097_Z10
5241 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
5242 return false;
5244 /* Expand memcpy for constant length operands without a loop if it
5245 is shorter that way.
5247 With a constant length argument a
5248 memcpy loop (without pfd) is 36 bytes -> 6 * mvc */
5249 if (GET_CODE (len) == CONST_INT
5250 && INTVAL (len) >= 0
5251 && INTVAL (len) <= 256 * 6
5252 && (!TARGET_MVCLE || INTVAL (len) <= 256))
5254 HOST_WIDE_INT o, l;
5256 for (l = INTVAL (len), o = 0; l > 0; l -= 256, o += 256)
5258 rtx newdst = adjust_address (dst, BLKmode, o);
5259 rtx newsrc = adjust_address (src, BLKmode, o);
5260 emit_insn (gen_movmem_short (newdst, newsrc,
5261 GEN_INT (l > 256 ? 255 : l - 1)));
5265 else if (TARGET_MVCLE)
5267 emit_insn (gen_movmem_long (dst, src, convert_to_mode (Pmode, len, 1)));
5270 else
5272 rtx dst_addr, src_addr, count, blocks, temp;
5273 rtx_code_label *loop_start_label = gen_label_rtx ();
5274 rtx_code_label *loop_end_label = gen_label_rtx ();
5275 rtx_code_label *end_label = gen_label_rtx ();
5276 machine_mode mode;
5278 mode = GET_MODE (len);
5279 if (mode == VOIDmode)
5280 mode = Pmode;
5282 dst_addr = gen_reg_rtx (Pmode);
5283 src_addr = gen_reg_rtx (Pmode);
5284 count = gen_reg_rtx (mode);
5285 blocks = gen_reg_rtx (mode);
5287 convert_move (count, len, 1);
5288 emit_cmp_and_jump_insns (count, const0_rtx,
5289 EQ, NULL_RTX, mode, 1, end_label);
5291 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
5292 emit_move_insn (src_addr, force_operand (XEXP (src, 0), NULL_RTX));
5293 dst = change_address (dst, VOIDmode, dst_addr);
5294 src = change_address (src, VOIDmode, src_addr);
5296 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
5297 OPTAB_DIRECT);
5298 if (temp != count)
5299 emit_move_insn (count, temp);
5301 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5302 OPTAB_DIRECT);
5303 if (temp != blocks)
5304 emit_move_insn (blocks, temp);
5306 emit_cmp_and_jump_insns (blocks, const0_rtx,
5307 EQ, NULL_RTX, mode, 1, loop_end_label);
5309 emit_label (loop_start_label);
5311 if (TARGET_Z10
5312 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 768))
5314 rtx prefetch;
5316 /* Issue a read prefetch for the +3 cache line. */
5317 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, src_addr, GEN_INT (768)),
5318 const0_rtx, const0_rtx);
5319 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5320 emit_insn (prefetch);
5322 /* Issue a write prefetch for the +3 cache line. */
5323 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (768)),
5324 const1_rtx, const0_rtx);
5325 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5326 emit_insn (prefetch);
5329 emit_insn (gen_movmem_short (dst, src, GEN_INT (255)));
5330 s390_load_address (dst_addr,
5331 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
5332 s390_load_address (src_addr,
5333 gen_rtx_PLUS (Pmode, src_addr, GEN_INT (256)));
5335 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5336 OPTAB_DIRECT);
5337 if (temp != blocks)
5338 emit_move_insn (blocks, temp);
5340 emit_cmp_and_jump_insns (blocks, const0_rtx,
5341 EQ, NULL_RTX, mode, 1, loop_end_label);
5343 emit_jump (loop_start_label);
5344 emit_label (loop_end_label);
5346 emit_insn (gen_movmem_short (dst, src,
5347 convert_to_mode (Pmode, count, 1)));
5348 emit_label (end_label);
5350 return true;
5353 /* Emit code to set LEN bytes at DST to VAL.
5354 Make use of clrmem if VAL is zero. */
5356 void
5357 s390_expand_setmem (rtx dst, rtx len, rtx val)
5359 const int very_unlikely = REG_BR_PROB_BASE / 100 - 1;
5361 if (GET_CODE (len) == CONST_INT && INTVAL (len) <= 0)
5362 return;
5364 gcc_assert (GET_CODE (val) == CONST_INT || GET_MODE (val) == QImode);
5366 /* Expand setmem/clrmem for a constant length operand without a
5367 loop if it will be shorter that way.
5368 With a constant length and without pfd argument a
5369 clrmem loop is 32 bytes -> 5.3 * xc
5370 setmem loop is 36 bytes -> 3.6 * (mvi/stc + mvc) */
5371 if (GET_CODE (len) == CONST_INT
5372 && ((INTVAL (len) <= 256 * 5 && val == const0_rtx)
5373 || INTVAL (len) <= 257 * 3)
5374 && (!TARGET_MVCLE || INTVAL (len) <= 256))
5376 HOST_WIDE_INT o, l;
5378 if (val == const0_rtx)
5379 /* clrmem: emit 256 byte blockwise XCs. */
5380 for (l = INTVAL (len), o = 0; l > 0; l -= 256, o += 256)
5382 rtx newdst = adjust_address (dst, BLKmode, o);
5383 emit_insn (gen_clrmem_short (newdst,
5384 GEN_INT (l > 256 ? 255 : l - 1)));
5386 else
5387 /* setmem: emit 1(mvi) + 256(mvc) byte blockwise memsets by
5388 setting first byte to val and using a 256 byte mvc with one
5389 byte overlap to propagate the byte. */
5390 for (l = INTVAL (len), o = 0; l > 0; l -= 257, o += 257)
5392 rtx newdst = adjust_address (dst, BLKmode, o);
5393 emit_move_insn (adjust_address (dst, QImode, o), val);
5394 if (l > 1)
5396 rtx newdstp1 = adjust_address (dst, BLKmode, o + 1);
5397 emit_insn (gen_movmem_short (newdstp1, newdst,
5398 GEN_INT (l > 257 ? 255 : l - 2)));
5403 else if (TARGET_MVCLE)
5405 val = force_not_mem (convert_modes (Pmode, QImode, val, 1));
5406 if (TARGET_64BIT)
5407 emit_insn (gen_setmem_long_di (dst, convert_to_mode (Pmode, len, 1),
5408 val));
5409 else
5410 emit_insn (gen_setmem_long_si (dst, convert_to_mode (Pmode, len, 1),
5411 val));
5414 else
5416 rtx dst_addr, count, blocks, temp, dstp1 = NULL_RTX;
5417 rtx_code_label *loop_start_label = gen_label_rtx ();
5418 rtx_code_label *onebyte_end_label = gen_label_rtx ();
5419 rtx_code_label *zerobyte_end_label = gen_label_rtx ();
5420 rtx_code_label *restbyte_end_label = gen_label_rtx ();
5421 machine_mode mode;
5423 mode = GET_MODE (len);
5424 if (mode == VOIDmode)
5425 mode = Pmode;
5427 dst_addr = gen_reg_rtx (Pmode);
5428 count = gen_reg_rtx (mode);
5429 blocks = gen_reg_rtx (mode);
5431 convert_move (count, len, 1);
5432 emit_cmp_and_jump_insns (count, const0_rtx,
5433 EQ, NULL_RTX, mode, 1, zerobyte_end_label,
5434 very_unlikely);
5436 /* We need to make a copy of the target address since memset is
5437 supposed to return it unmodified. We have to make it here
5438 already since the new reg is used at onebyte_end_label. */
5439 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
5440 dst = change_address (dst, VOIDmode, dst_addr);
5442 if (val != const0_rtx)
5444 /* When using the overlapping mvc the original target
5445 address is only accessed as single byte entity (even by
5446 the mvc reading this value). */
5447 set_mem_size (dst, 1);
5448 dstp1 = adjust_address (dst, VOIDmode, 1);
5449 emit_cmp_and_jump_insns (count,
5450 const1_rtx, EQ, NULL_RTX, mode, 1,
5451 onebyte_end_label, very_unlikely);
5454 /* There is one unconditional (mvi+mvc)/xc after the loop
5455 dealing with the rest of the bytes, subtracting two (mvi+mvc)
5456 or one (xc) here leaves this number of bytes to be handled by
5457 it. */
5458 temp = expand_binop (mode, add_optab, count,
5459 val == const0_rtx ? constm1_rtx : GEN_INT (-2),
5460 count, 1, OPTAB_DIRECT);
5461 if (temp != count)
5462 emit_move_insn (count, temp);
5464 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5465 OPTAB_DIRECT);
5466 if (temp != blocks)
5467 emit_move_insn (blocks, temp);
5469 emit_cmp_and_jump_insns (blocks, const0_rtx,
5470 EQ, NULL_RTX, mode, 1, restbyte_end_label);
5472 emit_jump (loop_start_label);
5474 if (val != const0_rtx)
5476 /* The 1 byte != 0 special case. Not handled efficiently
5477 since we require two jumps for that. However, this
5478 should be very rare. */
5479 emit_label (onebyte_end_label);
5480 emit_move_insn (adjust_address (dst, QImode, 0), val);
5481 emit_jump (zerobyte_end_label);
5484 emit_label (loop_start_label);
5486 if (TARGET_Z10
5487 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 1024))
5489 /* Issue a write prefetch for the +4 cache line. */
5490 rtx prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr,
5491 GEN_INT (1024)),
5492 const1_rtx, const0_rtx);
5493 emit_insn (prefetch);
5494 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5497 if (val == const0_rtx)
5498 emit_insn (gen_clrmem_short (dst, GEN_INT (255)));
5499 else
5501 /* Set the first byte in the block to the value and use an
5502 overlapping mvc for the block. */
5503 emit_move_insn (adjust_address (dst, QImode, 0), val);
5504 emit_insn (gen_movmem_short (dstp1, dst, GEN_INT (254)));
5506 s390_load_address (dst_addr,
5507 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
5509 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5510 OPTAB_DIRECT);
5511 if (temp != blocks)
5512 emit_move_insn (blocks, temp);
5514 emit_cmp_and_jump_insns (blocks, const0_rtx,
5515 NE, NULL_RTX, mode, 1, loop_start_label);
5517 emit_label (restbyte_end_label);
5519 if (val == const0_rtx)
5520 emit_insn (gen_clrmem_short (dst, convert_to_mode (Pmode, count, 1)));
5521 else
5523 /* Set the first byte in the block to the value and use an
5524 overlapping mvc for the block. */
5525 emit_move_insn (adjust_address (dst, QImode, 0), val);
5526 /* execute only uses the lowest 8 bits of count that's
5527 exactly what we need here. */
5528 emit_insn (gen_movmem_short (dstp1, dst,
5529 convert_to_mode (Pmode, count, 1)));
5532 emit_label (zerobyte_end_label);
5536 /* Emit code to compare LEN bytes at OP0 with those at OP1,
5537 and return the result in TARGET. */
5539 bool
5540 s390_expand_cmpmem (rtx target, rtx op0, rtx op1, rtx len)
5542 rtx ccreg = gen_rtx_REG (CCUmode, CC_REGNUM);
5543 rtx tmp;
5545 /* When tuning for z10 or higher we rely on the Glibc functions to
5546 do the right thing. Only for constant lengths below 64k we will
5547 generate inline code. */
5548 if (s390_tune >= PROCESSOR_2097_Z10
5549 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
5550 return false;
5552 /* As the result of CMPINT is inverted compared to what we need,
5553 we have to swap the operands. */
5554 tmp = op0; op0 = op1; op1 = tmp;
5556 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
5558 if (INTVAL (len) > 0)
5560 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (INTVAL (len) - 1)));
5561 emit_insn (gen_cmpint (target, ccreg));
5563 else
5564 emit_move_insn (target, const0_rtx);
5566 else if (TARGET_MVCLE)
5568 emit_insn (gen_cmpmem_long (op0, op1, convert_to_mode (Pmode, len, 1)));
5569 emit_insn (gen_cmpint (target, ccreg));
5571 else
5573 rtx addr0, addr1, count, blocks, temp;
5574 rtx_code_label *loop_start_label = gen_label_rtx ();
5575 rtx_code_label *loop_end_label = gen_label_rtx ();
5576 rtx_code_label *end_label = gen_label_rtx ();
5577 machine_mode mode;
5579 mode = GET_MODE (len);
5580 if (mode == VOIDmode)
5581 mode = Pmode;
5583 addr0 = gen_reg_rtx (Pmode);
5584 addr1 = gen_reg_rtx (Pmode);
5585 count = gen_reg_rtx (mode);
5586 blocks = gen_reg_rtx (mode);
5588 convert_move (count, len, 1);
5589 emit_cmp_and_jump_insns (count, const0_rtx,
5590 EQ, NULL_RTX, mode, 1, end_label);
5592 emit_move_insn (addr0, force_operand (XEXP (op0, 0), NULL_RTX));
5593 emit_move_insn (addr1, force_operand (XEXP (op1, 0), NULL_RTX));
5594 op0 = change_address (op0, VOIDmode, addr0);
5595 op1 = change_address (op1, VOIDmode, addr1);
5597 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
5598 OPTAB_DIRECT);
5599 if (temp != count)
5600 emit_move_insn (count, temp);
5602 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5603 OPTAB_DIRECT);
5604 if (temp != blocks)
5605 emit_move_insn (blocks, temp);
5607 emit_cmp_and_jump_insns (blocks, const0_rtx,
5608 EQ, NULL_RTX, mode, 1, loop_end_label);
5610 emit_label (loop_start_label);
5612 if (TARGET_Z10
5613 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 512))
5615 rtx prefetch;
5617 /* Issue a read prefetch for the +2 cache line of operand 1. */
5618 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr0, GEN_INT (512)),
5619 const0_rtx, const0_rtx);
5620 emit_insn (prefetch);
5621 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5623 /* Issue a read prefetch for the +2 cache line of operand 2. */
5624 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr1, GEN_INT (512)),
5625 const0_rtx, const0_rtx);
5626 emit_insn (prefetch);
5627 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5630 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (255)));
5631 temp = gen_rtx_NE (VOIDmode, ccreg, const0_rtx);
5632 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
5633 gen_rtx_LABEL_REF (VOIDmode, end_label), pc_rtx);
5634 temp = gen_rtx_SET (pc_rtx, temp);
5635 emit_jump_insn (temp);
5637 s390_load_address (addr0,
5638 gen_rtx_PLUS (Pmode, addr0, GEN_INT (256)));
5639 s390_load_address (addr1,
5640 gen_rtx_PLUS (Pmode, addr1, GEN_INT (256)));
5642 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5643 OPTAB_DIRECT);
5644 if (temp != blocks)
5645 emit_move_insn (blocks, temp);
5647 emit_cmp_and_jump_insns (blocks, const0_rtx,
5648 EQ, NULL_RTX, mode, 1, loop_end_label);
5650 emit_jump (loop_start_label);
5651 emit_label (loop_end_label);
5653 emit_insn (gen_cmpmem_short (op0, op1,
5654 convert_to_mode (Pmode, count, 1)));
5655 emit_label (end_label);
5657 emit_insn (gen_cmpint (target, ccreg));
5659 return true;
5662 /* Emit a conditional jump to LABEL for condition code mask MASK using
5663 comparsion operator COMPARISON. Return the emitted jump insn. */
5665 static rtx_insn *
5666 s390_emit_ccraw_jump (HOST_WIDE_INT mask, enum rtx_code comparison, rtx label)
5668 rtx temp;
5670 gcc_assert (comparison == EQ || comparison == NE);
5671 gcc_assert (mask > 0 && mask < 15);
5673 temp = gen_rtx_fmt_ee (comparison, VOIDmode,
5674 gen_rtx_REG (CCRAWmode, CC_REGNUM), GEN_INT (mask));
5675 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
5676 gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx);
5677 temp = gen_rtx_SET (pc_rtx, temp);
5678 return emit_jump_insn (temp);
5681 /* Emit the instructions to implement strlen of STRING and store the
5682 result in TARGET. The string has the known ALIGNMENT. This
5683 version uses vector instructions and is therefore not appropriate
5684 for targets prior to z13. */
5686 void
5687 s390_expand_vec_strlen (rtx target, rtx string, rtx alignment)
5689 int very_unlikely = REG_BR_PROB_BASE / 100 - 1;
5690 int very_likely = REG_BR_PROB_BASE - 1;
5691 rtx highest_index_to_load_reg = gen_reg_rtx (Pmode);
5692 rtx str_reg = gen_reg_rtx (V16QImode);
5693 rtx str_addr_base_reg = gen_reg_rtx (Pmode);
5694 rtx str_idx_reg = gen_reg_rtx (Pmode);
5695 rtx result_reg = gen_reg_rtx (V16QImode);
5696 rtx is_aligned_label = gen_label_rtx ();
5697 rtx into_loop_label = NULL_RTX;
5698 rtx loop_start_label = gen_label_rtx ();
5699 rtx temp;
5700 rtx len = gen_reg_rtx (QImode);
5701 rtx cond;
5703 s390_load_address (str_addr_base_reg, XEXP (string, 0));
5704 emit_move_insn (str_idx_reg, const0_rtx);
5706 if (INTVAL (alignment) < 16)
5708 /* Check whether the address happens to be aligned properly so
5709 jump directly to the aligned loop. */
5710 emit_cmp_and_jump_insns (gen_rtx_AND (Pmode,
5711 str_addr_base_reg, GEN_INT (15)),
5712 const0_rtx, EQ, NULL_RTX,
5713 Pmode, 1, is_aligned_label);
5715 temp = gen_reg_rtx (Pmode);
5716 temp = expand_binop (Pmode, and_optab, str_addr_base_reg,
5717 GEN_INT (15), temp, 1, OPTAB_DIRECT);
5718 gcc_assert (REG_P (temp));
5719 highest_index_to_load_reg =
5720 expand_binop (Pmode, sub_optab, GEN_INT (15), temp,
5721 highest_index_to_load_reg, 1, OPTAB_DIRECT);
5722 gcc_assert (REG_P (highest_index_to_load_reg));
5723 emit_insn (gen_vllv16qi (str_reg,
5724 convert_to_mode (SImode, highest_index_to_load_reg, 1),
5725 gen_rtx_MEM (BLKmode, str_addr_base_reg)));
5727 into_loop_label = gen_label_rtx ();
5728 s390_emit_jump (into_loop_label, NULL_RTX);
5729 emit_barrier ();
5732 emit_label (is_aligned_label);
5733 LABEL_NUSES (is_aligned_label) = INTVAL (alignment) < 16 ? 2 : 1;
5735 /* Reaching this point we are only performing 16 bytes aligned
5736 loads. */
5737 emit_move_insn (highest_index_to_load_reg, GEN_INT (15));
5739 emit_label (loop_start_label);
5740 LABEL_NUSES (loop_start_label) = 1;
5742 /* Load 16 bytes of the string into VR. */
5743 emit_move_insn (str_reg,
5744 gen_rtx_MEM (V16QImode,
5745 gen_rtx_PLUS (Pmode, str_idx_reg,
5746 str_addr_base_reg)));
5747 if (into_loop_label != NULL_RTX)
5749 emit_label (into_loop_label);
5750 LABEL_NUSES (into_loop_label) = 1;
5753 /* Increment string index by 16 bytes. */
5754 expand_binop (Pmode, add_optab, str_idx_reg, GEN_INT (16),
5755 str_idx_reg, 1, OPTAB_DIRECT);
5757 emit_insn (gen_vec_vfenesv16qi (result_reg, str_reg, str_reg,
5758 GEN_INT (VSTRING_FLAG_ZS | VSTRING_FLAG_CS)));
5760 add_int_reg_note (s390_emit_ccraw_jump (8, NE, loop_start_label),
5761 REG_BR_PROB, very_likely);
5762 emit_insn (gen_vec_extractv16qi (len, result_reg, GEN_INT (7)));
5764 /* If the string pointer wasn't aligned we have loaded less then 16
5765 bytes and the remaining bytes got filled with zeros (by vll).
5766 Now we have to check whether the resulting index lies within the
5767 bytes actually part of the string. */
5769 cond = s390_emit_compare (GT, convert_to_mode (Pmode, len, 1),
5770 highest_index_to_load_reg);
5771 s390_load_address (highest_index_to_load_reg,
5772 gen_rtx_PLUS (Pmode, highest_index_to_load_reg,
5773 const1_rtx));
5774 if (TARGET_64BIT)
5775 emit_insn (gen_movdicc (str_idx_reg, cond,
5776 highest_index_to_load_reg, str_idx_reg));
5777 else
5778 emit_insn (gen_movsicc (str_idx_reg, cond,
5779 highest_index_to_load_reg, str_idx_reg));
5781 add_int_reg_note (s390_emit_jump (is_aligned_label, cond), REG_BR_PROB,
5782 very_unlikely);
5784 expand_binop (Pmode, add_optab, str_idx_reg,
5785 GEN_INT (-16), str_idx_reg, 1, OPTAB_DIRECT);
5786 /* FIXME: len is already zero extended - so avoid the llgcr emitted
5787 here. */
5788 temp = expand_binop (Pmode, add_optab, str_idx_reg,
5789 convert_to_mode (Pmode, len, 1),
5790 target, 1, OPTAB_DIRECT);
5791 if (temp != target)
5792 emit_move_insn (target, temp);
5795 void
5796 s390_expand_vec_movstr (rtx result, rtx dst, rtx src)
5798 int very_unlikely = REG_BR_PROB_BASE / 100 - 1;
5799 rtx temp = gen_reg_rtx (Pmode);
5800 rtx src_addr = XEXP (src, 0);
5801 rtx dst_addr = XEXP (dst, 0);
5802 rtx src_addr_reg = gen_reg_rtx (Pmode);
5803 rtx dst_addr_reg = gen_reg_rtx (Pmode);
5804 rtx offset = gen_reg_rtx (Pmode);
5805 rtx vsrc = gen_reg_rtx (V16QImode);
5806 rtx vpos = gen_reg_rtx (V16QImode);
5807 rtx loadlen = gen_reg_rtx (SImode);
5808 rtx gpos_qi = gen_reg_rtx(QImode);
5809 rtx gpos = gen_reg_rtx (SImode);
5810 rtx done_label = gen_label_rtx ();
5811 rtx loop_label = gen_label_rtx ();
5812 rtx exit_label = gen_label_rtx ();
5813 rtx full_label = gen_label_rtx ();
5815 /* Perform a quick check for string ending on the first up to 16
5816 bytes and exit early if successful. */
5818 emit_insn (gen_vlbb (vsrc, src, GEN_INT (6)));
5819 emit_insn (gen_lcbb (loadlen, src_addr, GEN_INT (6)));
5820 emit_insn (gen_vfenezv16qi (vpos, vsrc, vsrc));
5821 emit_insn (gen_vec_extractv16qi (gpos_qi, vpos, GEN_INT (7)));
5822 emit_move_insn (gpos, gen_rtx_SUBREG (SImode, gpos_qi, 0));
5823 /* gpos is the byte index if a zero was found and 16 otherwise.
5824 So if it is lower than the loaded bytes we have a hit. */
5825 emit_cmp_and_jump_insns (gpos, loadlen, GE, NULL_RTX, SImode, 1,
5826 full_label);
5827 emit_insn (gen_vstlv16qi (vsrc, gpos, dst));
5829 force_expand_binop (Pmode, add_optab, dst_addr, gpos, result,
5830 1, OPTAB_DIRECT);
5831 emit_jump (exit_label);
5832 emit_barrier ();
5834 emit_label (full_label);
5835 LABEL_NUSES (full_label) = 1;
5837 /* Calculate `offset' so that src + offset points to the last byte
5838 before 16 byte alignment. */
5840 /* temp = src_addr & 0xf */
5841 force_expand_binop (Pmode, and_optab, src_addr, GEN_INT (15), temp,
5842 1, OPTAB_DIRECT);
5844 /* offset = 0xf - temp */
5845 emit_move_insn (offset, GEN_INT (15));
5846 force_expand_binop (Pmode, sub_optab, offset, temp, offset,
5847 1, OPTAB_DIRECT);
5849 /* Store `offset' bytes in the dstination string. The quick check
5850 has loaded at least `offset' bytes into vsrc. */
5852 emit_insn (gen_vstlv16qi (vsrc, gen_lowpart (SImode, offset), dst));
5854 /* Advance to the next byte to be loaded. */
5855 force_expand_binop (Pmode, add_optab, offset, const1_rtx, offset,
5856 1, OPTAB_DIRECT);
5858 /* Make sure the addresses are single regs which can be used as a
5859 base. */
5860 emit_move_insn (src_addr_reg, src_addr);
5861 emit_move_insn (dst_addr_reg, dst_addr);
5863 /* MAIN LOOP */
5865 emit_label (loop_label);
5866 LABEL_NUSES (loop_label) = 1;
5868 emit_move_insn (vsrc,
5869 gen_rtx_MEM (V16QImode,
5870 gen_rtx_PLUS (Pmode, src_addr_reg, offset)));
5872 emit_insn (gen_vec_vfenesv16qi (vpos, vsrc, vsrc,
5873 GEN_INT (VSTRING_FLAG_ZS | VSTRING_FLAG_CS)));
5874 add_int_reg_note (s390_emit_ccraw_jump (8, EQ, done_label),
5875 REG_BR_PROB, very_unlikely);
5877 emit_move_insn (gen_rtx_MEM (V16QImode,
5878 gen_rtx_PLUS (Pmode, dst_addr_reg, offset)),
5879 vsrc);
5880 /* offset += 16 */
5881 force_expand_binop (Pmode, add_optab, offset, GEN_INT (16),
5882 offset, 1, OPTAB_DIRECT);
5884 emit_jump (loop_label);
5885 emit_barrier ();
5887 /* REGULAR EXIT */
5889 /* We are done. Add the offset of the zero character to the dst_addr
5890 pointer to get the result. */
5892 emit_label (done_label);
5893 LABEL_NUSES (done_label) = 1;
5895 force_expand_binop (Pmode, add_optab, dst_addr_reg, offset, dst_addr_reg,
5896 1, OPTAB_DIRECT);
5898 emit_insn (gen_vec_extractv16qi (gpos_qi, vpos, GEN_INT (7)));
5899 emit_move_insn (gpos, gen_rtx_SUBREG (SImode, gpos_qi, 0));
5901 emit_insn (gen_vstlv16qi (vsrc, gpos, gen_rtx_MEM (BLKmode, dst_addr_reg)));
5903 force_expand_binop (Pmode, add_optab, dst_addr_reg, gpos, result,
5904 1, OPTAB_DIRECT);
5906 /* EARLY EXIT */
5908 emit_label (exit_label);
5909 LABEL_NUSES (exit_label) = 1;
5913 /* Expand conditional increment or decrement using alc/slb instructions.
5914 Should generate code setting DST to either SRC or SRC + INCREMENT,
5915 depending on the result of the comparison CMP_OP0 CMP_CODE CMP_OP1.
5916 Returns true if successful, false otherwise.
5918 That makes it possible to implement some if-constructs without jumps e.g.:
5919 (borrow = CC0 | CC1 and carry = CC2 | CC3)
5920 unsigned int a, b, c;
5921 if (a < b) c++; -> CCU b > a -> CC2; c += carry;
5922 if (a < b) c--; -> CCL3 a - b -> borrow; c -= borrow;
5923 if (a <= b) c++; -> CCL3 b - a -> borrow; c += carry;
5924 if (a <= b) c--; -> CCU a <= b -> borrow; c -= borrow;
5926 Checks for EQ and NE with a nonzero value need an additional xor e.g.:
5927 if (a == b) c++; -> CCL3 a ^= b; 0 - a -> borrow; c += carry;
5928 if (a == b) c--; -> CCU a ^= b; a <= 0 -> CC0 | CC1; c -= borrow;
5929 if (a != b) c++; -> CCU a ^= b; a > 0 -> CC2; c += carry;
5930 if (a != b) c--; -> CCL3 a ^= b; 0 - a -> borrow; c -= borrow; */
5932 bool
5933 s390_expand_addcc (enum rtx_code cmp_code, rtx cmp_op0, rtx cmp_op1,
5934 rtx dst, rtx src, rtx increment)
5936 machine_mode cmp_mode;
5937 machine_mode cc_mode;
5938 rtx op_res;
5939 rtx insn;
5940 rtvec p;
5941 int ret;
5943 if ((GET_MODE (cmp_op0) == SImode || GET_MODE (cmp_op0) == VOIDmode)
5944 && (GET_MODE (cmp_op1) == SImode || GET_MODE (cmp_op1) == VOIDmode))
5945 cmp_mode = SImode;
5946 else if ((GET_MODE (cmp_op0) == DImode || GET_MODE (cmp_op0) == VOIDmode)
5947 && (GET_MODE (cmp_op1) == DImode || GET_MODE (cmp_op1) == VOIDmode))
5948 cmp_mode = DImode;
5949 else
5950 return false;
5952 /* Try ADD LOGICAL WITH CARRY. */
5953 if (increment == const1_rtx)
5955 /* Determine CC mode to use. */
5956 if (cmp_code == EQ || cmp_code == NE)
5958 if (cmp_op1 != const0_rtx)
5960 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
5961 NULL_RTX, 0, OPTAB_WIDEN);
5962 cmp_op1 = const0_rtx;
5965 cmp_code = cmp_code == EQ ? LEU : GTU;
5968 if (cmp_code == LTU || cmp_code == LEU)
5970 rtx tem = cmp_op0;
5971 cmp_op0 = cmp_op1;
5972 cmp_op1 = tem;
5973 cmp_code = swap_condition (cmp_code);
5976 switch (cmp_code)
5978 case GTU:
5979 cc_mode = CCUmode;
5980 break;
5982 case GEU:
5983 cc_mode = CCL3mode;
5984 break;
5986 default:
5987 return false;
5990 /* Emit comparison instruction pattern. */
5991 if (!register_operand (cmp_op0, cmp_mode))
5992 cmp_op0 = force_reg (cmp_mode, cmp_op0);
5994 insn = gen_rtx_SET (gen_rtx_REG (cc_mode, CC_REGNUM),
5995 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
5996 /* We use insn_invalid_p here to add clobbers if required. */
5997 ret = insn_invalid_p (emit_insn (insn), false);
5998 gcc_assert (!ret);
6000 /* Emit ALC instruction pattern. */
6001 op_res = gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
6002 gen_rtx_REG (cc_mode, CC_REGNUM),
6003 const0_rtx);
6005 if (src != const0_rtx)
6007 if (!register_operand (src, GET_MODE (dst)))
6008 src = force_reg (GET_MODE (dst), src);
6010 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, src);
6011 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, const0_rtx);
6014 p = rtvec_alloc (2);
6015 RTVEC_ELT (p, 0) =
6016 gen_rtx_SET (dst, op_res);
6017 RTVEC_ELT (p, 1) =
6018 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
6019 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
6021 return true;
6024 /* Try SUBTRACT LOGICAL WITH BORROW. */
6025 if (increment == constm1_rtx)
6027 /* Determine CC mode to use. */
6028 if (cmp_code == EQ || cmp_code == NE)
6030 if (cmp_op1 != const0_rtx)
6032 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
6033 NULL_RTX, 0, OPTAB_WIDEN);
6034 cmp_op1 = const0_rtx;
6037 cmp_code = cmp_code == EQ ? LEU : GTU;
6040 if (cmp_code == GTU || cmp_code == GEU)
6042 rtx tem = cmp_op0;
6043 cmp_op0 = cmp_op1;
6044 cmp_op1 = tem;
6045 cmp_code = swap_condition (cmp_code);
6048 switch (cmp_code)
6050 case LEU:
6051 cc_mode = CCUmode;
6052 break;
6054 case LTU:
6055 cc_mode = CCL3mode;
6056 break;
6058 default:
6059 return false;
6062 /* Emit comparison instruction pattern. */
6063 if (!register_operand (cmp_op0, cmp_mode))
6064 cmp_op0 = force_reg (cmp_mode, cmp_op0);
6066 insn = gen_rtx_SET (gen_rtx_REG (cc_mode, CC_REGNUM),
6067 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
6068 /* We use insn_invalid_p here to add clobbers if required. */
6069 ret = insn_invalid_p (emit_insn (insn), false);
6070 gcc_assert (!ret);
6072 /* Emit SLB instruction pattern. */
6073 if (!register_operand (src, GET_MODE (dst)))
6074 src = force_reg (GET_MODE (dst), src);
6076 op_res = gen_rtx_MINUS (GET_MODE (dst),
6077 gen_rtx_MINUS (GET_MODE (dst), src, const0_rtx),
6078 gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
6079 gen_rtx_REG (cc_mode, CC_REGNUM),
6080 const0_rtx));
6081 p = rtvec_alloc (2);
6082 RTVEC_ELT (p, 0) =
6083 gen_rtx_SET (dst, op_res);
6084 RTVEC_ELT (p, 1) =
6085 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
6086 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
6088 return true;
6091 return false;
6094 /* Expand code for the insv template. Return true if successful. */
6096 bool
6097 s390_expand_insv (rtx dest, rtx op1, rtx op2, rtx src)
6099 int bitsize = INTVAL (op1);
6100 int bitpos = INTVAL (op2);
6101 machine_mode mode = GET_MODE (dest);
6102 machine_mode smode;
6103 int smode_bsize, mode_bsize;
6104 rtx op, clobber;
6106 if (bitsize + bitpos > GET_MODE_BITSIZE (mode))
6107 return false;
6109 /* Generate INSERT IMMEDIATE (IILL et al). */
6110 /* (set (ze (reg)) (const_int)). */
6111 if (TARGET_ZARCH
6112 && register_operand (dest, word_mode)
6113 && (bitpos % 16) == 0
6114 && (bitsize % 16) == 0
6115 && const_int_operand (src, VOIDmode))
6117 HOST_WIDE_INT val = INTVAL (src);
6118 int regpos = bitpos + bitsize;
6120 while (regpos > bitpos)
6122 machine_mode putmode;
6123 int putsize;
6125 if (TARGET_EXTIMM && (regpos % 32 == 0) && (regpos >= bitpos + 32))
6126 putmode = SImode;
6127 else
6128 putmode = HImode;
6130 putsize = GET_MODE_BITSIZE (putmode);
6131 regpos -= putsize;
6132 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
6133 GEN_INT (putsize),
6134 GEN_INT (regpos)),
6135 gen_int_mode (val, putmode));
6136 val >>= putsize;
6138 gcc_assert (regpos == bitpos);
6139 return true;
6142 smode = smallest_mode_for_size (bitsize, MODE_INT);
6143 smode_bsize = GET_MODE_BITSIZE (smode);
6144 mode_bsize = GET_MODE_BITSIZE (mode);
6146 /* Generate STORE CHARACTERS UNDER MASK (STCM et al). */
6147 if (bitpos == 0
6148 && (bitsize % BITS_PER_UNIT) == 0
6149 && MEM_P (dest)
6150 && (register_operand (src, word_mode)
6151 || const_int_operand (src, VOIDmode)))
6153 /* Emit standard pattern if possible. */
6154 if (smode_bsize == bitsize)
6156 emit_move_insn (adjust_address (dest, smode, 0),
6157 gen_lowpart (smode, src));
6158 return true;
6161 /* (set (ze (mem)) (const_int)). */
6162 else if (const_int_operand (src, VOIDmode))
6164 int size = bitsize / BITS_PER_UNIT;
6165 rtx src_mem = adjust_address (force_const_mem (word_mode, src),
6166 BLKmode,
6167 UNITS_PER_WORD - size);
6169 dest = adjust_address (dest, BLKmode, 0);
6170 set_mem_size (dest, size);
6171 s390_expand_movmem (dest, src_mem, GEN_INT (size));
6172 return true;
6175 /* (set (ze (mem)) (reg)). */
6176 else if (register_operand (src, word_mode))
6178 if (bitsize <= 32)
6179 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, op1,
6180 const0_rtx), src);
6181 else
6183 /* Emit st,stcmh sequence. */
6184 int stcmh_width = bitsize - 32;
6185 int size = stcmh_width / BITS_PER_UNIT;
6187 emit_move_insn (adjust_address (dest, SImode, size),
6188 gen_lowpart (SImode, src));
6189 set_mem_size (dest, size);
6190 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
6191 GEN_INT (stcmh_width),
6192 const0_rtx),
6193 gen_rtx_LSHIFTRT (word_mode, src, GEN_INT (32)));
6195 return true;
6199 /* Generate INSERT CHARACTERS UNDER MASK (IC, ICM et al). */
6200 if ((bitpos % BITS_PER_UNIT) == 0
6201 && (bitsize % BITS_PER_UNIT) == 0
6202 && (bitpos & 32) == ((bitpos + bitsize - 1) & 32)
6203 && MEM_P (src)
6204 && (mode == DImode || mode == SImode)
6205 && register_operand (dest, mode))
6207 /* Emit a strict_low_part pattern if possible. */
6208 if (smode_bsize == bitsize && bitpos == mode_bsize - smode_bsize)
6210 op = gen_rtx_STRICT_LOW_PART (VOIDmode, gen_lowpart (smode, dest));
6211 op = gen_rtx_SET (op, gen_lowpart (smode, src));
6212 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
6213 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber)));
6214 return true;
6217 /* ??? There are more powerful versions of ICM that are not
6218 completely represented in the md file. */
6221 /* For z10, generate ROTATE THEN INSERT SELECTED BITS (RISBG et al). */
6222 if (TARGET_Z10 && (mode == DImode || mode == SImode))
6224 machine_mode mode_s = GET_MODE (src);
6226 if (CONSTANT_P (src))
6228 /* For constant zero values the representation with AND
6229 appears to be folded in more situations than the (set
6230 (zero_extract) ...).
6231 We only do this when the start and end of the bitfield
6232 remain in the same SImode chunk. That way nihf or nilf
6233 can be used.
6234 The AND patterns might still generate a risbg for this. */
6235 if (src == const0_rtx && bitpos / 32 == (bitpos + bitsize - 1) / 32)
6236 return false;
6237 else
6238 src = force_reg (mode, src);
6240 else if (mode_s != mode)
6242 gcc_assert (GET_MODE_BITSIZE (mode_s) >= bitsize);
6243 src = force_reg (mode_s, src);
6244 src = gen_lowpart (mode, src);
6247 op = gen_rtx_ZERO_EXTRACT (mode, dest, op1, op2),
6248 op = gen_rtx_SET (op, src);
6250 if (!TARGET_ZEC12)
6252 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
6253 op = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber));
6255 emit_insn (op);
6257 return true;
6260 return false;
6263 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic which returns a
6264 register that holds VAL of mode MODE shifted by COUNT bits. */
6266 static inline rtx
6267 s390_expand_mask_and_shift (rtx val, machine_mode mode, rtx count)
6269 val = expand_simple_binop (SImode, AND, val, GEN_INT (GET_MODE_MASK (mode)),
6270 NULL_RTX, 1, OPTAB_DIRECT);
6271 return expand_simple_binop (SImode, ASHIFT, val, count,
6272 NULL_RTX, 1, OPTAB_DIRECT);
6275 /* Generate a vector comparison COND of CMP_OP1 and CMP_OP2 and store
6276 the result in TARGET. */
6278 void
6279 s390_expand_vec_compare (rtx target, enum rtx_code cond,
6280 rtx cmp_op1, rtx cmp_op2)
6282 machine_mode mode = GET_MODE (target);
6283 bool neg_p = false, swap_p = false;
6284 rtx tmp;
6286 if (GET_MODE (cmp_op1) == V2DFmode)
6288 switch (cond)
6290 /* NE a != b -> !(a == b) */
6291 case NE: cond = EQ; neg_p = true; break;
6292 /* UNGT a u> b -> !(b >= a) */
6293 case UNGT: cond = GE; neg_p = true; swap_p = true; break;
6294 /* UNGE a u>= b -> !(b > a) */
6295 case UNGE: cond = GT; neg_p = true; swap_p = true; break;
6296 /* LE: a <= b -> b >= a */
6297 case LE: cond = GE; swap_p = true; break;
6298 /* UNLE: a u<= b -> !(a > b) */
6299 case UNLE: cond = GT; neg_p = true; break;
6300 /* LT: a < b -> b > a */
6301 case LT: cond = GT; swap_p = true; break;
6302 /* UNLT: a u< b -> !(a >= b) */
6303 case UNLT: cond = GE; neg_p = true; break;
6304 case UNEQ:
6305 emit_insn (gen_vec_cmpuneqv2df (target, cmp_op1, cmp_op2));
6306 return;
6307 case LTGT:
6308 emit_insn (gen_vec_cmpltgtv2df (target, cmp_op1, cmp_op2));
6309 return;
6310 case ORDERED:
6311 emit_insn (gen_vec_orderedv2df (target, cmp_op1, cmp_op2));
6312 return;
6313 case UNORDERED:
6314 emit_insn (gen_vec_unorderedv2df (target, cmp_op1, cmp_op2));
6315 return;
6316 default: break;
6319 else
6321 switch (cond)
6323 /* NE: a != b -> !(a == b) */
6324 case NE: cond = EQ; neg_p = true; break;
6325 /* GE: a >= b -> !(b > a) */
6326 case GE: cond = GT; neg_p = true; swap_p = true; break;
6327 /* GEU: a >= b -> !(b > a) */
6328 case GEU: cond = GTU; neg_p = true; swap_p = true; break;
6329 /* LE: a <= b -> !(a > b) */
6330 case LE: cond = GT; neg_p = true; break;
6331 /* LEU: a <= b -> !(a > b) */
6332 case LEU: cond = GTU; neg_p = true; break;
6333 /* LT: a < b -> b > a */
6334 case LT: cond = GT; swap_p = true; break;
6335 /* LTU: a < b -> b > a */
6336 case LTU: cond = GTU; swap_p = true; break;
6337 default: break;
6341 if (swap_p)
6343 tmp = cmp_op1; cmp_op1 = cmp_op2; cmp_op2 = tmp;
6346 emit_insn (gen_rtx_SET (target, gen_rtx_fmt_ee (cond,
6347 mode,
6348 cmp_op1, cmp_op2)));
6349 if (neg_p)
6350 emit_insn (gen_rtx_SET (target, gen_rtx_NOT (mode, target)));
6353 /* Expand the comparison CODE of CMP1 and CMP2 and copy 1 or 0 into
6354 TARGET if either all (ALL_P is true) or any (ALL_P is false) of the
6355 elements in CMP1 and CMP2 fulfill the comparison.
6356 This function is only used to emit patterns for the vx builtins and
6357 therefore only handles comparison codes required by the
6358 builtins. */
6359 void
6360 s390_expand_vec_compare_cc (rtx target, enum rtx_code code,
6361 rtx cmp1, rtx cmp2, bool all_p)
6363 machine_mode cc_producer_mode, cc_consumer_mode, scratch_mode;
6364 rtx tmp_reg = gen_reg_rtx (SImode);
6365 bool swap_p = false;
6367 if (GET_MODE_CLASS (GET_MODE (cmp1)) == MODE_VECTOR_INT)
6369 switch (code)
6371 case EQ:
6372 case NE:
6373 cc_producer_mode = CCVEQmode;
6374 break;
6375 case GE:
6376 case LT:
6377 code = swap_condition (code);
6378 swap_p = true;
6379 /* fallthrough */
6380 case GT:
6381 case LE:
6382 cc_producer_mode = CCVIHmode;
6383 break;
6384 case GEU:
6385 case LTU:
6386 code = swap_condition (code);
6387 swap_p = true;
6388 /* fallthrough */
6389 case GTU:
6390 case LEU:
6391 cc_producer_mode = CCVIHUmode;
6392 break;
6393 default:
6394 gcc_unreachable ();
6397 scratch_mode = GET_MODE (cmp1);
6398 /* These codes represent inverted CC interpretations. Inverting
6399 an ALL CC mode results in an ANY CC mode and the other way
6400 around. Invert the all_p flag here to compensate for
6401 that. */
6402 if (code == NE || code == LE || code == LEU)
6403 all_p = !all_p;
6405 cc_consumer_mode = all_p ? CCVIALLmode : CCVIANYmode;
6407 else if (GET_MODE_CLASS (GET_MODE (cmp1)) == MODE_VECTOR_FLOAT)
6409 bool inv_p = false;
6411 switch (code)
6413 case EQ: cc_producer_mode = CCVEQmode; break;
6414 case NE: cc_producer_mode = CCVEQmode; inv_p = true; break;
6415 case GT: cc_producer_mode = CCVFHmode; break;
6416 case GE: cc_producer_mode = CCVFHEmode; break;
6417 case UNLE: cc_producer_mode = CCVFHmode; inv_p = true; break;
6418 case UNLT: cc_producer_mode = CCVFHEmode; inv_p = true; break;
6419 case LT: cc_producer_mode = CCVFHmode; code = GT; swap_p = true; break;
6420 case LE: cc_producer_mode = CCVFHEmode; code = GE; swap_p = true; break;
6421 default: gcc_unreachable ();
6423 scratch_mode = mode_for_vector (
6424 int_mode_for_mode (GET_MODE_INNER (GET_MODE (cmp1))),
6425 GET_MODE_NUNITS (GET_MODE (cmp1)));
6426 gcc_assert (scratch_mode != BLKmode);
6428 if (inv_p)
6429 all_p = !all_p;
6431 cc_consumer_mode = all_p ? CCVFALLmode : CCVFANYmode;
6433 else
6434 gcc_unreachable ();
6436 if (swap_p)
6438 rtx tmp = cmp2;
6439 cmp2 = cmp1;
6440 cmp1 = tmp;
6443 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6444 gen_rtvec (2, gen_rtx_SET (
6445 gen_rtx_REG (cc_producer_mode, CC_REGNUM),
6446 gen_rtx_COMPARE (cc_producer_mode, cmp1, cmp2)),
6447 gen_rtx_CLOBBER (VOIDmode,
6448 gen_rtx_SCRATCH (scratch_mode)))));
6449 emit_move_insn (target, const0_rtx);
6450 emit_move_insn (tmp_reg, const1_rtx);
6452 emit_move_insn (target,
6453 gen_rtx_IF_THEN_ELSE (SImode,
6454 gen_rtx_fmt_ee (code, VOIDmode,
6455 gen_rtx_REG (cc_consumer_mode, CC_REGNUM),
6456 const0_rtx),
6457 tmp_reg, target));
6460 /* Invert the comparison CODE applied to a CC mode. This is only safe
6461 if we know whether there result was created by a floating point
6462 compare or not. For the CCV modes this is encoded as part of the
6463 mode. */
6464 enum rtx_code
6465 s390_reverse_condition (machine_mode mode, enum rtx_code code)
6467 /* Reversal of FP compares takes care -- an ordered compare
6468 becomes an unordered compare and vice versa. */
6469 if (mode == CCVFALLmode || mode == CCVFANYmode)
6470 return reverse_condition_maybe_unordered (code);
6471 else if (mode == CCVIALLmode || mode == CCVIANYmode)
6472 return reverse_condition (code);
6473 else
6474 gcc_unreachable ();
6477 /* Generate a vector comparison expression loading either elements of
6478 THEN or ELS into TARGET depending on the comparison COND of CMP_OP1
6479 and CMP_OP2. */
6481 void
6482 s390_expand_vcond (rtx target, rtx then, rtx els,
6483 enum rtx_code cond, rtx cmp_op1, rtx cmp_op2)
6485 rtx tmp;
6486 machine_mode result_mode;
6487 rtx result_target;
6489 machine_mode target_mode = GET_MODE (target);
6490 machine_mode cmp_mode = GET_MODE (cmp_op1);
6491 rtx op = (cond == LT) ? els : then;
6493 /* Try to optimize x < 0 ? -1 : 0 into (signed) x >> 31
6494 and x < 0 ? 1 : 0 into (unsigned) x >> 31. Likewise
6495 for short and byte (x >> 15 and x >> 7 respectively). */
6496 if ((cond == LT || cond == GE)
6497 && target_mode == cmp_mode
6498 && cmp_op2 == CONST0_RTX (cmp_mode)
6499 && op == CONST0_RTX (target_mode)
6500 && s390_vector_mode_supported_p (target_mode)
6501 && GET_MODE_CLASS (target_mode) == MODE_VECTOR_INT)
6503 rtx negop = (cond == LT) ? then : els;
6505 int shift = GET_MODE_BITSIZE (GET_MODE_INNER (target_mode)) - 1;
6507 /* if x < 0 ? 1 : 0 or if x >= 0 ? 0 : 1 */
6508 if (negop == CONST1_RTX (target_mode))
6510 rtx res = expand_simple_binop (cmp_mode, LSHIFTRT, cmp_op1,
6511 GEN_INT (shift), target,
6512 1, OPTAB_DIRECT);
6513 if (res != target)
6514 emit_move_insn (target, res);
6515 return;
6518 /* if x < 0 ? -1 : 0 or if x >= 0 ? 0 : -1 */
6519 else if (all_ones_operand (negop, target_mode))
6521 rtx res = expand_simple_binop (cmp_mode, ASHIFTRT, cmp_op1,
6522 GEN_INT (shift), target,
6523 0, OPTAB_DIRECT);
6524 if (res != target)
6525 emit_move_insn (target, res);
6526 return;
6530 /* We always use an integral type vector to hold the comparison
6531 result. */
6532 result_mode = cmp_mode == V2DFmode ? V2DImode : cmp_mode;
6533 result_target = gen_reg_rtx (result_mode);
6535 /* We allow vector immediates as comparison operands that
6536 can be handled by the optimization above but not by the
6537 following code. Hence, force them into registers here. */
6538 if (!REG_P (cmp_op1))
6539 cmp_op1 = force_reg (GET_MODE (cmp_op1), cmp_op1);
6541 if (!REG_P (cmp_op2))
6542 cmp_op2 = force_reg (GET_MODE (cmp_op2), cmp_op2);
6544 s390_expand_vec_compare (result_target, cond,
6545 cmp_op1, cmp_op2);
6547 /* If the results are supposed to be either -1 or 0 we are done
6548 since this is what our compare instructions generate anyway. */
6549 if (all_ones_operand (then, GET_MODE (then))
6550 && const0_operand (els, GET_MODE (els)))
6552 emit_move_insn (target, gen_rtx_SUBREG (target_mode,
6553 result_target, 0));
6554 return;
6557 /* Otherwise we will do a vsel afterwards. */
6558 /* This gets triggered e.g.
6559 with gcc.c-torture/compile/pr53410-1.c */
6560 if (!REG_P (then))
6561 then = force_reg (target_mode, then);
6563 if (!REG_P (els))
6564 els = force_reg (target_mode, els);
6566 tmp = gen_rtx_fmt_ee (EQ, VOIDmode,
6567 result_target,
6568 CONST0_RTX (result_mode));
6570 /* We compared the result against zero above so we have to swap then
6571 and els here. */
6572 tmp = gen_rtx_IF_THEN_ELSE (target_mode, tmp, els, then);
6574 gcc_assert (target_mode == GET_MODE (then));
6575 emit_insn (gen_rtx_SET (target, tmp));
6578 /* Emit the RTX necessary to initialize the vector TARGET with values
6579 in VALS. */
6580 void
6581 s390_expand_vec_init (rtx target, rtx vals)
6583 machine_mode mode = GET_MODE (target);
6584 machine_mode inner_mode = GET_MODE_INNER (mode);
6585 int n_elts = GET_MODE_NUNITS (mode);
6586 bool all_same = true, all_regs = true, all_const_int = true;
6587 rtx x;
6588 int i;
6590 for (i = 0; i < n_elts; ++i)
6592 x = XVECEXP (vals, 0, i);
6594 if (!CONST_INT_P (x))
6595 all_const_int = false;
6597 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
6598 all_same = false;
6600 if (!REG_P (x))
6601 all_regs = false;
6604 /* Use vector gen mask or vector gen byte mask if possible. */
6605 if (all_same && all_const_int
6606 && (XVECEXP (vals, 0, 0) == const0_rtx
6607 || s390_contiguous_bitmask_vector_p (XVECEXP (vals, 0, 0),
6608 NULL, NULL)
6609 || s390_bytemask_vector_p (XVECEXP (vals, 0, 0), NULL)))
6611 emit_insn (gen_rtx_SET (target,
6612 gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0))));
6613 return;
6616 if (all_same)
6618 emit_insn (gen_rtx_SET (target,
6619 gen_rtx_VEC_DUPLICATE (mode,
6620 XVECEXP (vals, 0, 0))));
6621 return;
6624 if (all_regs && REG_P (target) && n_elts == 2 && inner_mode == DImode)
6626 /* Use vector load pair. */
6627 emit_insn (gen_rtx_SET (target,
6628 gen_rtx_VEC_CONCAT (mode,
6629 XVECEXP (vals, 0, 0),
6630 XVECEXP (vals, 0, 1))));
6631 return;
6634 /* We are about to set the vector elements one by one. Zero out the
6635 full register first in order to help the data flow framework to
6636 detect it as full VR set. */
6637 emit_insn (gen_rtx_SET (target, CONST0_RTX (mode)));
6639 /* Unfortunately the vec_init expander is not allowed to fail. So
6640 we have to implement the fallback ourselves. */
6641 for (i = 0; i < n_elts; i++)
6643 rtx elem = XVECEXP (vals, 0, i);
6644 if (!general_operand (elem, GET_MODE (elem)))
6645 elem = force_reg (inner_mode, elem);
6647 emit_insn (gen_rtx_SET (target,
6648 gen_rtx_UNSPEC (mode,
6649 gen_rtvec (3, elem,
6650 GEN_INT (i), target),
6651 UNSPEC_VEC_SET)));
6655 /* Structure to hold the initial parameters for a compare_and_swap operation
6656 in HImode and QImode. */
6658 struct alignment_context
6660 rtx memsi; /* SI aligned memory location. */
6661 rtx shift; /* Bit offset with regard to lsb. */
6662 rtx modemask; /* Mask of the HQImode shifted by SHIFT bits. */
6663 rtx modemaski; /* ~modemask */
6664 bool aligned; /* True if memory is aligned, false else. */
6667 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic to initialize
6668 structure AC for transparent simplifying, if the memory alignment is known
6669 to be at least 32bit. MEM is the memory location for the actual operation
6670 and MODE its mode. */
6672 static void
6673 init_alignment_context (struct alignment_context *ac, rtx mem,
6674 machine_mode mode)
6676 ac->shift = GEN_INT (GET_MODE_SIZE (SImode) - GET_MODE_SIZE (mode));
6677 ac->aligned = (MEM_ALIGN (mem) >= GET_MODE_BITSIZE (SImode));
6679 if (ac->aligned)
6680 ac->memsi = adjust_address (mem, SImode, 0); /* Memory is aligned. */
6681 else
6683 /* Alignment is unknown. */
6684 rtx byteoffset, addr, align;
6686 /* Force the address into a register. */
6687 addr = force_reg (Pmode, XEXP (mem, 0));
6689 /* Align it to SImode. */
6690 align = expand_simple_binop (Pmode, AND, addr,
6691 GEN_INT (-GET_MODE_SIZE (SImode)),
6692 NULL_RTX, 1, OPTAB_DIRECT);
6693 /* Generate MEM. */
6694 ac->memsi = gen_rtx_MEM (SImode, align);
6695 MEM_VOLATILE_P (ac->memsi) = MEM_VOLATILE_P (mem);
6696 set_mem_alias_set (ac->memsi, ALIAS_SET_MEMORY_BARRIER);
6697 set_mem_align (ac->memsi, GET_MODE_BITSIZE (SImode));
6699 /* Calculate shiftcount. */
6700 byteoffset = expand_simple_binop (Pmode, AND, addr,
6701 GEN_INT (GET_MODE_SIZE (SImode) - 1),
6702 NULL_RTX, 1, OPTAB_DIRECT);
6703 /* As we already have some offset, evaluate the remaining distance. */
6704 ac->shift = expand_simple_binop (SImode, MINUS, ac->shift, byteoffset,
6705 NULL_RTX, 1, OPTAB_DIRECT);
6708 /* Shift is the byte count, but we need the bitcount. */
6709 ac->shift = expand_simple_binop (SImode, ASHIFT, ac->shift, GEN_INT (3),
6710 NULL_RTX, 1, OPTAB_DIRECT);
6712 /* Calculate masks. */
6713 ac->modemask = expand_simple_binop (SImode, ASHIFT,
6714 GEN_INT (GET_MODE_MASK (mode)),
6715 ac->shift, NULL_RTX, 1, OPTAB_DIRECT);
6716 ac->modemaski = expand_simple_unop (SImode, NOT, ac->modemask,
6717 NULL_RTX, 1);
6720 /* A subroutine of s390_expand_cs_hqi. Insert INS into VAL. If possible,
6721 use a single insv insn into SEQ2. Otherwise, put prep insns in SEQ1 and
6722 perform the merge in SEQ2. */
6724 static rtx
6725 s390_two_part_insv (struct alignment_context *ac, rtx *seq1, rtx *seq2,
6726 machine_mode mode, rtx val, rtx ins)
6728 rtx tmp;
6730 if (ac->aligned)
6732 start_sequence ();
6733 tmp = copy_to_mode_reg (SImode, val);
6734 if (s390_expand_insv (tmp, GEN_INT (GET_MODE_BITSIZE (mode)),
6735 const0_rtx, ins))
6737 *seq1 = NULL;
6738 *seq2 = get_insns ();
6739 end_sequence ();
6740 return tmp;
6742 end_sequence ();
6745 /* Failed to use insv. Generate a two part shift and mask. */
6746 start_sequence ();
6747 tmp = s390_expand_mask_and_shift (ins, mode, ac->shift);
6748 *seq1 = get_insns ();
6749 end_sequence ();
6751 start_sequence ();
6752 tmp = expand_simple_binop (SImode, IOR, tmp, val, NULL_RTX, 1, OPTAB_DIRECT);
6753 *seq2 = get_insns ();
6754 end_sequence ();
6756 return tmp;
6759 /* Expand an atomic compare and swap operation for HImode and QImode. MEM is
6760 the memory location, CMP the old value to compare MEM with and NEW_RTX the
6761 value to set if CMP == MEM. */
6763 void
6764 s390_expand_cs_hqi (machine_mode mode, rtx btarget, rtx vtarget, rtx mem,
6765 rtx cmp, rtx new_rtx, bool is_weak)
6767 struct alignment_context ac;
6768 rtx cmpv, newv, val, cc, seq0, seq1, seq2, seq3;
6769 rtx res = gen_reg_rtx (SImode);
6770 rtx_code_label *csloop = NULL, *csend = NULL;
6772 gcc_assert (MEM_P (mem));
6774 init_alignment_context (&ac, mem, mode);
6776 /* Load full word. Subsequent loads are performed by CS. */
6777 val = expand_simple_binop (SImode, AND, ac.memsi, ac.modemaski,
6778 NULL_RTX, 1, OPTAB_DIRECT);
6780 /* Prepare insertions of cmp and new_rtx into the loaded value. When
6781 possible, we try to use insv to make this happen efficiently. If
6782 that fails we'll generate code both inside and outside the loop. */
6783 cmpv = s390_two_part_insv (&ac, &seq0, &seq2, mode, val, cmp);
6784 newv = s390_two_part_insv (&ac, &seq1, &seq3, mode, val, new_rtx);
6786 if (seq0)
6787 emit_insn (seq0);
6788 if (seq1)
6789 emit_insn (seq1);
6791 /* Start CS loop. */
6792 if (!is_weak)
6794 /* Begin assuming success. */
6795 emit_move_insn (btarget, const1_rtx);
6797 csloop = gen_label_rtx ();
6798 csend = gen_label_rtx ();
6799 emit_label (csloop);
6802 /* val = "<mem>00..0<mem>"
6803 * cmp = "00..0<cmp>00..0"
6804 * new = "00..0<new>00..0"
6807 emit_insn (seq2);
6808 emit_insn (seq3);
6810 cc = s390_emit_compare_and_swap (EQ, res, ac.memsi, cmpv, newv);
6811 if (is_weak)
6812 emit_insn (gen_cstorecc4 (btarget, cc, XEXP (cc, 0), XEXP (cc, 1)));
6813 else
6815 rtx tmp;
6817 /* Jump to end if we're done (likely?). */
6818 s390_emit_jump (csend, cc);
6820 /* Check for changes outside mode, and loop internal if so.
6821 Arrange the moves so that the compare is adjacent to the
6822 branch so that we can generate CRJ. */
6823 tmp = copy_to_reg (val);
6824 force_expand_binop (SImode, and_optab, res, ac.modemaski, val,
6825 1, OPTAB_DIRECT);
6826 cc = s390_emit_compare (NE, val, tmp);
6827 s390_emit_jump (csloop, cc);
6829 /* Failed. */
6830 emit_move_insn (btarget, const0_rtx);
6831 emit_label (csend);
6834 /* Return the correct part of the bitfield. */
6835 convert_move (vtarget, expand_simple_binop (SImode, LSHIFTRT, res, ac.shift,
6836 NULL_RTX, 1, OPTAB_DIRECT), 1);
6839 /* Expand an atomic operation CODE of mode MODE. MEM is the memory location
6840 and VAL the value to play with. If AFTER is true then store the value
6841 MEM holds after the operation, if AFTER is false then store the value MEM
6842 holds before the operation. If TARGET is zero then discard that value, else
6843 store it to TARGET. */
6845 void
6846 s390_expand_atomic (machine_mode mode, enum rtx_code code,
6847 rtx target, rtx mem, rtx val, bool after)
6849 struct alignment_context ac;
6850 rtx cmp;
6851 rtx new_rtx = gen_reg_rtx (SImode);
6852 rtx orig = gen_reg_rtx (SImode);
6853 rtx_code_label *csloop = gen_label_rtx ();
6855 gcc_assert (!target || register_operand (target, VOIDmode));
6856 gcc_assert (MEM_P (mem));
6858 init_alignment_context (&ac, mem, mode);
6860 /* Shift val to the correct bit positions.
6861 Preserve "icm", but prevent "ex icm". */
6862 if (!(ac.aligned && code == SET && MEM_P (val)))
6863 val = s390_expand_mask_and_shift (val, mode, ac.shift);
6865 /* Further preparation insns. */
6866 if (code == PLUS || code == MINUS)
6867 emit_move_insn (orig, val);
6868 else if (code == MULT || code == AND) /* val = "11..1<val>11..1" */
6869 val = expand_simple_binop (SImode, XOR, val, ac.modemaski,
6870 NULL_RTX, 1, OPTAB_DIRECT);
6872 /* Load full word. Subsequent loads are performed by CS. */
6873 cmp = force_reg (SImode, ac.memsi);
6875 /* Start CS loop. */
6876 emit_label (csloop);
6877 emit_move_insn (new_rtx, cmp);
6879 /* Patch new with val at correct position. */
6880 switch (code)
6882 case PLUS:
6883 case MINUS:
6884 val = expand_simple_binop (SImode, code, new_rtx, orig,
6885 NULL_RTX, 1, OPTAB_DIRECT);
6886 val = expand_simple_binop (SImode, AND, val, ac.modemask,
6887 NULL_RTX, 1, OPTAB_DIRECT);
6888 /* FALLTHRU */
6889 case SET:
6890 if (ac.aligned && MEM_P (val))
6891 store_bit_field (new_rtx, GET_MODE_BITSIZE (mode), 0,
6892 0, 0, SImode, val, false);
6893 else
6895 new_rtx = expand_simple_binop (SImode, AND, new_rtx, ac.modemaski,
6896 NULL_RTX, 1, OPTAB_DIRECT);
6897 new_rtx = expand_simple_binop (SImode, IOR, new_rtx, val,
6898 NULL_RTX, 1, OPTAB_DIRECT);
6900 break;
6901 case AND:
6902 case IOR:
6903 case XOR:
6904 new_rtx = expand_simple_binop (SImode, code, new_rtx, val,
6905 NULL_RTX, 1, OPTAB_DIRECT);
6906 break;
6907 case MULT: /* NAND */
6908 new_rtx = expand_simple_binop (SImode, AND, new_rtx, val,
6909 NULL_RTX, 1, OPTAB_DIRECT);
6910 new_rtx = expand_simple_binop (SImode, XOR, new_rtx, ac.modemask,
6911 NULL_RTX, 1, OPTAB_DIRECT);
6912 break;
6913 default:
6914 gcc_unreachable ();
6917 s390_emit_jump (csloop, s390_emit_compare_and_swap (NE, cmp,
6918 ac.memsi, cmp, new_rtx));
6920 /* Return the correct part of the bitfield. */
6921 if (target)
6922 convert_move (target, expand_simple_binop (SImode, LSHIFTRT,
6923 after ? new_rtx : cmp, ac.shift,
6924 NULL_RTX, 1, OPTAB_DIRECT), 1);
6927 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
6928 We need to emit DTP-relative relocations. */
6930 static void s390_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
6932 static void
6933 s390_output_dwarf_dtprel (FILE *file, int size, rtx x)
6935 switch (size)
6937 case 4:
6938 fputs ("\t.long\t", file);
6939 break;
6940 case 8:
6941 fputs ("\t.quad\t", file);
6942 break;
6943 default:
6944 gcc_unreachable ();
6946 output_addr_const (file, x);
6947 fputs ("@DTPOFF", file);
6950 /* Return the proper mode for REGNO being represented in the dwarf
6951 unwind table. */
6952 machine_mode
6953 s390_dwarf_frame_reg_mode (int regno)
6955 machine_mode save_mode = default_dwarf_frame_reg_mode (regno);
6957 /* Make sure not to return DImode for any GPR with -m31 -mzarch. */
6958 if (GENERAL_REGNO_P (regno))
6959 save_mode = Pmode;
6961 /* The rightmost 64 bits of vector registers are call-clobbered. */
6962 if (GET_MODE_SIZE (save_mode) > 8)
6963 save_mode = DImode;
6965 return save_mode;
6968 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
6969 /* Implement TARGET_MANGLE_TYPE. */
6971 static const char *
6972 s390_mangle_type (const_tree type)
6974 type = TYPE_MAIN_VARIANT (type);
6976 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
6977 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
6978 return NULL;
6980 if (type == s390_builtin_types[BT_BV16QI]) return "U6__boolc";
6981 if (type == s390_builtin_types[BT_BV8HI]) return "U6__bools";
6982 if (type == s390_builtin_types[BT_BV4SI]) return "U6__booli";
6983 if (type == s390_builtin_types[BT_BV2DI]) return "U6__booll";
6985 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
6986 && TARGET_LONG_DOUBLE_128)
6987 return "g";
6989 /* For all other types, use normal C++ mangling. */
6990 return NULL;
6992 #endif
6994 /* In the name of slightly smaller debug output, and to cater to
6995 general assembler lossage, recognize various UNSPEC sequences
6996 and turn them back into a direct symbol reference. */
6998 static rtx
6999 s390_delegitimize_address (rtx orig_x)
7001 rtx x, y;
7003 orig_x = delegitimize_mem_from_attrs (orig_x);
7004 x = orig_x;
7006 /* Extract the symbol ref from:
7007 (plus:SI (reg:SI 12 %r12)
7008 (const:SI (unspec:SI [(symbol_ref/f:SI ("*.LC0"))]
7009 UNSPEC_GOTOFF/PLTOFF)))
7011 (plus:SI (reg:SI 12 %r12)
7012 (const:SI (plus:SI (unspec:SI [(symbol_ref:SI ("L"))]
7013 UNSPEC_GOTOFF/PLTOFF)
7014 (const_int 4 [0x4])))) */
7015 if (GET_CODE (x) == PLUS
7016 && REG_P (XEXP (x, 0))
7017 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM
7018 && GET_CODE (XEXP (x, 1)) == CONST)
7020 HOST_WIDE_INT offset = 0;
7022 /* The const operand. */
7023 y = XEXP (XEXP (x, 1), 0);
7025 if (GET_CODE (y) == PLUS
7026 && GET_CODE (XEXP (y, 1)) == CONST_INT)
7028 offset = INTVAL (XEXP (y, 1));
7029 y = XEXP (y, 0);
7032 if (GET_CODE (y) == UNSPEC
7033 && (XINT (y, 1) == UNSPEC_GOTOFF
7034 || XINT (y, 1) == UNSPEC_PLTOFF))
7035 return plus_constant (Pmode, XVECEXP (y, 0, 0), offset);
7038 if (GET_CODE (x) != MEM)
7039 return orig_x;
7041 x = XEXP (x, 0);
7042 if (GET_CODE (x) == PLUS
7043 && GET_CODE (XEXP (x, 1)) == CONST
7044 && GET_CODE (XEXP (x, 0)) == REG
7045 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
7047 y = XEXP (XEXP (x, 1), 0);
7048 if (GET_CODE (y) == UNSPEC
7049 && XINT (y, 1) == UNSPEC_GOT)
7050 y = XVECEXP (y, 0, 0);
7051 else
7052 return orig_x;
7054 else if (GET_CODE (x) == CONST)
7056 /* Extract the symbol ref from:
7057 (mem:QI (const:DI (unspec:DI [(symbol_ref:DI ("foo"))]
7058 UNSPEC_PLT/GOTENT))) */
7060 y = XEXP (x, 0);
7061 if (GET_CODE (y) == UNSPEC
7062 && (XINT (y, 1) == UNSPEC_GOTENT
7063 || XINT (y, 1) == UNSPEC_PLT))
7064 y = XVECEXP (y, 0, 0);
7065 else
7066 return orig_x;
7068 else
7069 return orig_x;
7071 if (GET_MODE (orig_x) != Pmode)
7073 if (GET_MODE (orig_x) == BLKmode)
7074 return orig_x;
7075 y = lowpart_subreg (GET_MODE (orig_x), y, Pmode);
7076 if (y == NULL_RTX)
7077 return orig_x;
7079 return y;
7082 /* Output operand OP to stdio stream FILE.
7083 OP is an address (register + offset) which is not used to address data;
7084 instead the rightmost bits are interpreted as the value. */
7086 static void
7087 print_addrstyle_operand (FILE *file, rtx op)
7089 HOST_WIDE_INT offset;
7090 rtx base;
7092 /* Extract base register and offset. */
7093 if (!s390_decompose_addrstyle_without_index (op, &base, &offset))
7094 gcc_unreachable ();
7096 /* Sanity check. */
7097 if (base)
7099 gcc_assert (GET_CODE (base) == REG);
7100 gcc_assert (REGNO (base) < FIRST_PSEUDO_REGISTER);
7101 gcc_assert (REGNO_REG_CLASS (REGNO (base)) == ADDR_REGS);
7104 /* Offsets are constricted to twelve bits. */
7105 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset & ((1 << 12) - 1));
7106 if (base)
7107 fprintf (file, "(%s)", reg_names[REGNO (base)]);
7110 /* Assigns the number of NOP halfwords to be emitted before and after the
7111 function label to *HW_BEFORE and *HW_AFTER. Both pointers must not be NULL.
7112 If hotpatching is disabled for the function, the values are set to zero.
7115 static void
7116 s390_function_num_hotpatch_hw (tree decl,
7117 int *hw_before,
7118 int *hw_after)
7120 tree attr;
7122 attr = lookup_attribute ("hotpatch", DECL_ATTRIBUTES (decl));
7124 /* Handle the arguments of the hotpatch attribute. The values
7125 specified via attribute might override the cmdline argument
7126 values. */
7127 if (attr)
7129 tree args = TREE_VALUE (attr);
7131 *hw_before = TREE_INT_CST_LOW (TREE_VALUE (args));
7132 *hw_after = TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (args)));
7134 else
7136 /* Use the values specified by the cmdline arguments. */
7137 *hw_before = s390_hotpatch_hw_before_label;
7138 *hw_after = s390_hotpatch_hw_after_label;
7142 /* Write the current .machine and .machinemode specification to the assembler
7143 file. */
7145 #ifdef HAVE_AS_MACHINE_MACHINEMODE
7146 static void
7147 s390_asm_output_machine_for_arch (FILE *asm_out_file)
7149 fprintf (asm_out_file, "\t.machinemode %s\n",
7150 (TARGET_ZARCH) ? "zarch" : "esa");
7151 fprintf (asm_out_file, "\t.machine \"%s", processor_table[s390_arch].name);
7152 if (S390_USE_ARCHITECTURE_MODIFIERS)
7154 int cpu_flags;
7156 cpu_flags = processor_flags_table[(int) s390_arch];
7157 if (TARGET_HTM && !(cpu_flags & PF_TX))
7158 fprintf (asm_out_file, "+htm");
7159 else if (!TARGET_HTM && (cpu_flags & PF_TX))
7160 fprintf (asm_out_file, "+nohtm");
7161 if (TARGET_VX && !(cpu_flags & PF_VX))
7162 fprintf (asm_out_file, "+vx");
7163 else if (!TARGET_VX && (cpu_flags & PF_VX))
7164 fprintf (asm_out_file, "+novx");
7166 fprintf (asm_out_file, "\"\n");
7169 /* Write an extra function header before the very start of the function. */
7171 void
7172 s390_asm_output_function_prefix (FILE *asm_out_file,
7173 const char *fnname ATTRIBUTE_UNUSED)
7175 if (DECL_FUNCTION_SPECIFIC_TARGET (current_function_decl) == NULL)
7176 return;
7177 /* Since only the function specific options are saved but not the indications
7178 which options are set, it's too much work here to figure out which options
7179 have actually changed. Thus, generate .machine and .machinemode whenever a
7180 function has the target attribute or pragma. */
7181 fprintf (asm_out_file, "\t.machinemode push\n");
7182 fprintf (asm_out_file, "\t.machine push\n");
7183 s390_asm_output_machine_for_arch (asm_out_file);
7186 /* Write an extra function footer after the very end of the function. */
7188 void
7189 s390_asm_declare_function_size (FILE *asm_out_file,
7190 const char *fnname, tree decl)
7192 if (!flag_inhibit_size_directive)
7193 ASM_OUTPUT_MEASURED_SIZE (asm_out_file, fnname);
7194 if (DECL_FUNCTION_SPECIFIC_TARGET (decl) == NULL)
7195 return;
7196 fprintf (asm_out_file, "\t.machine pop\n");
7197 fprintf (asm_out_file, "\t.machinemode pop\n");
7199 #endif
7201 /* Write the extra assembler code needed to declare a function properly. */
7203 void
7204 s390_asm_output_function_label (FILE *asm_out_file, const char *fname,
7205 tree decl)
7207 int hw_before, hw_after;
7209 s390_function_num_hotpatch_hw (decl, &hw_before, &hw_after);
7210 if (hw_before > 0)
7212 unsigned int function_alignment;
7213 int i;
7215 /* Add a trampoline code area before the function label and initialize it
7216 with two-byte nop instructions. This area can be overwritten with code
7217 that jumps to a patched version of the function. */
7218 asm_fprintf (asm_out_file, "\tnopr\t%%r0"
7219 "\t# pre-label NOPs for hotpatch (%d halfwords)\n",
7220 hw_before);
7221 for (i = 1; i < hw_before; i++)
7222 fputs ("\tnopr\t%r0\n", asm_out_file);
7224 /* Note: The function label must be aligned so that (a) the bytes of the
7225 following nop do not cross a cacheline boundary, and (b) a jump address
7226 (eight bytes for 64 bit targets, 4 bytes for 32 bit targets) can be
7227 stored directly before the label without crossing a cacheline
7228 boundary. All this is necessary to make sure the trampoline code can
7229 be changed atomically.
7230 This alignment is done automatically using the FOUNCTION_BOUNDARY, but
7231 if there are NOPs before the function label, the alignment is placed
7232 before them. So it is necessary to duplicate the alignment after the
7233 NOPs. */
7234 function_alignment = MAX (8, DECL_ALIGN (decl) / BITS_PER_UNIT);
7235 if (! DECL_USER_ALIGN (decl))
7236 function_alignment = MAX (function_alignment,
7237 (unsigned int) align_functions);
7238 fputs ("\t# alignment for hotpatch\n", asm_out_file);
7239 ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (function_alignment));
7242 if (S390_USE_TARGET_ATTRIBUTE && TARGET_DEBUG_ARG)
7244 asm_fprintf (asm_out_file, "\t# fn:%s ar%d\n", fname, s390_arch);
7245 asm_fprintf (asm_out_file, "\t# fn:%s tu%d\n", fname, s390_tune);
7246 asm_fprintf (asm_out_file, "\t# fn:%s sg%d\n", fname, s390_stack_guard);
7247 asm_fprintf (asm_out_file, "\t# fn:%s ss%d\n", fname, s390_stack_size);
7248 asm_fprintf (asm_out_file, "\t# fn:%s bc%d\n", fname, s390_branch_cost);
7249 asm_fprintf (asm_out_file, "\t# fn:%s wf%d\n", fname,
7250 s390_warn_framesize);
7251 asm_fprintf (asm_out_file, "\t# fn:%s ba%d\n", fname, TARGET_BACKCHAIN);
7252 asm_fprintf (asm_out_file, "\t# fn:%s hd%d\n", fname, TARGET_HARD_DFP);
7253 asm_fprintf (asm_out_file, "\t# fn:%s hf%d\n", fname, !TARGET_SOFT_FLOAT);
7254 asm_fprintf (asm_out_file, "\t# fn:%s ht%d\n", fname, TARGET_OPT_HTM);
7255 asm_fprintf (asm_out_file, "\t# fn:%s vx%d\n", fname, TARGET_OPT_VX);
7256 asm_fprintf (asm_out_file, "\t# fn:%s ps%d\n", fname,
7257 TARGET_PACKED_STACK);
7258 asm_fprintf (asm_out_file, "\t# fn:%s se%d\n", fname, TARGET_SMALL_EXEC);
7259 asm_fprintf (asm_out_file, "\t# fn:%s mv%d\n", fname, TARGET_MVCLE);
7260 asm_fprintf (asm_out_file, "\t# fn:%s zv%d\n", fname, TARGET_ZVECTOR);
7261 asm_fprintf (asm_out_file, "\t# fn:%s wd%d\n", fname,
7262 s390_warn_dynamicstack_p);
7264 ASM_OUTPUT_LABEL (asm_out_file, fname);
7265 if (hw_after > 0)
7266 asm_fprintf (asm_out_file,
7267 "\t# post-label NOPs for hotpatch (%d halfwords)\n",
7268 hw_after);
7271 /* Output machine-dependent UNSPECs occurring in address constant X
7272 in assembler syntax to stdio stream FILE. Returns true if the
7273 constant X could be recognized, false otherwise. */
7275 static bool
7276 s390_output_addr_const_extra (FILE *file, rtx x)
7278 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 1)
7279 switch (XINT (x, 1))
7281 case UNSPEC_GOTENT:
7282 output_addr_const (file, XVECEXP (x, 0, 0));
7283 fprintf (file, "@GOTENT");
7284 return true;
7285 case UNSPEC_GOT:
7286 output_addr_const (file, XVECEXP (x, 0, 0));
7287 fprintf (file, "@GOT");
7288 return true;
7289 case UNSPEC_GOTOFF:
7290 output_addr_const (file, XVECEXP (x, 0, 0));
7291 fprintf (file, "@GOTOFF");
7292 return true;
7293 case UNSPEC_PLT:
7294 output_addr_const (file, XVECEXP (x, 0, 0));
7295 fprintf (file, "@PLT");
7296 return true;
7297 case UNSPEC_PLTOFF:
7298 output_addr_const (file, XVECEXP (x, 0, 0));
7299 fprintf (file, "@PLTOFF");
7300 return true;
7301 case UNSPEC_TLSGD:
7302 output_addr_const (file, XVECEXP (x, 0, 0));
7303 fprintf (file, "@TLSGD");
7304 return true;
7305 case UNSPEC_TLSLDM:
7306 assemble_name (file, get_some_local_dynamic_name ());
7307 fprintf (file, "@TLSLDM");
7308 return true;
7309 case UNSPEC_DTPOFF:
7310 output_addr_const (file, XVECEXP (x, 0, 0));
7311 fprintf (file, "@DTPOFF");
7312 return true;
7313 case UNSPEC_NTPOFF:
7314 output_addr_const (file, XVECEXP (x, 0, 0));
7315 fprintf (file, "@NTPOFF");
7316 return true;
7317 case UNSPEC_GOTNTPOFF:
7318 output_addr_const (file, XVECEXP (x, 0, 0));
7319 fprintf (file, "@GOTNTPOFF");
7320 return true;
7321 case UNSPEC_INDNTPOFF:
7322 output_addr_const (file, XVECEXP (x, 0, 0));
7323 fprintf (file, "@INDNTPOFF");
7324 return true;
7327 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 2)
7328 switch (XINT (x, 1))
7330 case UNSPEC_POOL_OFFSET:
7331 x = gen_rtx_MINUS (GET_MODE (x), XVECEXP (x, 0, 0), XVECEXP (x, 0, 1));
7332 output_addr_const (file, x);
7333 return true;
7335 return false;
7338 /* Output address operand ADDR in assembler syntax to
7339 stdio stream FILE. */
7341 void
7342 print_operand_address (FILE *file, rtx addr)
7344 struct s390_address ad;
7345 memset (&ad, 0, sizeof (s390_address));
7347 if (s390_loadrelative_operand_p (addr, NULL, NULL))
7349 if (!TARGET_Z10)
7351 output_operand_lossage ("symbolic memory references are "
7352 "only supported on z10 or later");
7353 return;
7355 output_addr_const (file, addr);
7356 return;
7359 if (!s390_decompose_address (addr, &ad)
7360 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7361 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
7362 output_operand_lossage ("cannot decompose address");
7364 if (ad.disp)
7365 output_addr_const (file, ad.disp);
7366 else
7367 fprintf (file, "0");
7369 if (ad.base && ad.indx)
7370 fprintf (file, "(%s,%s)", reg_names[REGNO (ad.indx)],
7371 reg_names[REGNO (ad.base)]);
7372 else if (ad.base)
7373 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
7376 /* Output operand X in assembler syntax to stdio stream FILE.
7377 CODE specified the format flag. The following format flags
7378 are recognized:
7380 'C': print opcode suffix for branch condition.
7381 'D': print opcode suffix for inverse branch condition.
7382 'E': print opcode suffix for branch on index instruction.
7383 'G': print the size of the operand in bytes.
7384 'J': print tls_load/tls_gdcall/tls_ldcall suffix
7385 'M': print the second word of a TImode operand.
7386 'N': print the second word of a DImode operand.
7387 'O': print only the displacement of a memory reference or address.
7388 'R': print only the base register of a memory reference or address.
7389 'S': print S-type memory reference (base+displacement).
7390 'Y': print address style operand without index (e.g. shift count or setmem
7391 operand).
7393 'b': print integer X as if it's an unsigned byte.
7394 'c': print integer X as if it's an signed byte.
7395 'e': "end" contiguous bitmask X in either DImode or vector inner mode.
7396 'f': "end" contiguous bitmask X in SImode.
7397 'h': print integer X as if it's a signed halfword.
7398 'i': print the first nonzero HImode part of X.
7399 'j': print the first HImode part unequal to -1 of X.
7400 'k': print the first nonzero SImode part of X.
7401 'm': print the first SImode part unequal to -1 of X.
7402 'o': print integer X as if it's an unsigned 32bit word.
7403 's': "start" of contiguous bitmask X in either DImode or vector inner mode.
7404 't': CONST_INT: "start" of contiguous bitmask X in SImode.
7405 CONST_VECTOR: Generate a bitmask for vgbm instruction.
7406 'x': print integer X as if it's an unsigned halfword.
7407 'v': print register number as vector register (v1 instead of f1).
7410 void
7411 print_operand (FILE *file, rtx x, int code)
7413 HOST_WIDE_INT ival;
7415 switch (code)
7417 case 'C':
7418 fprintf (file, s390_branch_condition_mnemonic (x, FALSE));
7419 return;
7421 case 'D':
7422 fprintf (file, s390_branch_condition_mnemonic (x, TRUE));
7423 return;
7425 case 'E':
7426 if (GET_CODE (x) == LE)
7427 fprintf (file, "l");
7428 else if (GET_CODE (x) == GT)
7429 fprintf (file, "h");
7430 else
7431 output_operand_lossage ("invalid comparison operator "
7432 "for 'E' output modifier");
7433 return;
7435 case 'J':
7436 if (GET_CODE (x) == SYMBOL_REF)
7438 fprintf (file, "%s", ":tls_load:");
7439 output_addr_const (file, x);
7441 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
7443 fprintf (file, "%s", ":tls_gdcall:");
7444 output_addr_const (file, XVECEXP (x, 0, 0));
7446 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM)
7448 fprintf (file, "%s", ":tls_ldcall:");
7449 const char *name = get_some_local_dynamic_name ();
7450 gcc_assert (name);
7451 assemble_name (file, name);
7453 else
7454 output_operand_lossage ("invalid reference for 'J' output modifier");
7455 return;
7457 case 'G':
7458 fprintf (file, "%u", GET_MODE_SIZE (GET_MODE (x)));
7459 return;
7461 case 'O':
7463 struct s390_address ad;
7464 int ret;
7466 ret = s390_decompose_address (MEM_P (x) ? XEXP (x, 0) : x, &ad);
7468 if (!ret
7469 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7470 || ad.indx)
7472 output_operand_lossage ("invalid address for 'O' output modifier");
7473 return;
7476 if (ad.disp)
7477 output_addr_const (file, ad.disp);
7478 else
7479 fprintf (file, "0");
7481 return;
7483 case 'R':
7485 struct s390_address ad;
7486 int ret;
7488 ret = s390_decompose_address (MEM_P (x) ? XEXP (x, 0) : x, &ad);
7490 if (!ret
7491 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7492 || ad.indx)
7494 output_operand_lossage ("invalid address for 'R' output modifier");
7495 return;
7498 if (ad.base)
7499 fprintf (file, "%s", reg_names[REGNO (ad.base)]);
7500 else
7501 fprintf (file, "0");
7503 return;
7505 case 'S':
7507 struct s390_address ad;
7508 int ret;
7510 if (!MEM_P (x))
7512 output_operand_lossage ("memory reference expected for "
7513 "'S' output modifier");
7514 return;
7516 ret = s390_decompose_address (XEXP (x, 0), &ad);
7518 if (!ret
7519 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7520 || ad.indx)
7522 output_operand_lossage ("invalid address for 'S' output modifier");
7523 return;
7526 if (ad.disp)
7527 output_addr_const (file, ad.disp);
7528 else
7529 fprintf (file, "0");
7531 if (ad.base)
7532 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
7534 return;
7536 case 'N':
7537 if (GET_CODE (x) == REG)
7538 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
7539 else if (GET_CODE (x) == MEM)
7540 x = change_address (x, VOIDmode,
7541 plus_constant (Pmode, XEXP (x, 0), 4));
7542 else
7543 output_operand_lossage ("register or memory expression expected "
7544 "for 'N' output modifier");
7545 break;
7547 case 'M':
7548 if (GET_CODE (x) == REG)
7549 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
7550 else if (GET_CODE (x) == MEM)
7551 x = change_address (x, VOIDmode,
7552 plus_constant (Pmode, XEXP (x, 0), 8));
7553 else
7554 output_operand_lossage ("register or memory expression expected "
7555 "for 'M' output modifier");
7556 break;
7558 case 'Y':
7559 print_addrstyle_operand (file, x);
7560 return;
7563 switch (GET_CODE (x))
7565 case REG:
7566 /* Print FP regs as fx instead of vx when they are accessed
7567 through non-vector mode. */
7568 if (code == 'v'
7569 || VECTOR_NOFP_REG_P (x)
7570 || (FP_REG_P (x) && VECTOR_MODE_P (GET_MODE (x)))
7571 || (VECTOR_REG_P (x)
7572 && (GET_MODE_SIZE (GET_MODE (x)) /
7573 s390_class_max_nregs (FP_REGS, GET_MODE (x))) > 8))
7574 fprintf (file, "%%v%s", reg_names[REGNO (x)] + 2);
7575 else
7576 fprintf (file, "%s", reg_names[REGNO (x)]);
7577 break;
7579 case MEM:
7580 output_address (GET_MODE (x), XEXP (x, 0));
7581 break;
7583 case CONST:
7584 case CODE_LABEL:
7585 case LABEL_REF:
7586 case SYMBOL_REF:
7587 output_addr_const (file, x);
7588 break;
7590 case CONST_INT:
7591 ival = INTVAL (x);
7592 switch (code)
7594 case 0:
7595 break;
7596 case 'b':
7597 ival &= 0xff;
7598 break;
7599 case 'c':
7600 ival = ((ival & 0xff) ^ 0x80) - 0x80;
7601 break;
7602 case 'x':
7603 ival &= 0xffff;
7604 break;
7605 case 'h':
7606 ival = ((ival & 0xffff) ^ 0x8000) - 0x8000;
7607 break;
7608 case 'i':
7609 ival = s390_extract_part (x, HImode, 0);
7610 break;
7611 case 'j':
7612 ival = s390_extract_part (x, HImode, -1);
7613 break;
7614 case 'k':
7615 ival = s390_extract_part (x, SImode, 0);
7616 break;
7617 case 'm':
7618 ival = s390_extract_part (x, SImode, -1);
7619 break;
7620 case 'o':
7621 ival &= 0xffffffff;
7622 break;
7623 case 'e': case 'f':
7624 case 's': case 't':
7626 int start, end;
7627 int len;
7628 bool ok;
7630 len = (code == 's' || code == 'e' ? 64 : 32);
7631 ok = s390_contiguous_bitmask_p (ival, true, len, &start, &end);
7632 gcc_assert (ok);
7633 if (code == 's' || code == 't')
7634 ival = start;
7635 else
7636 ival = end;
7638 break;
7639 default:
7640 output_operand_lossage ("invalid constant for output modifier '%c'", code);
7642 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
7643 break;
7645 case CONST_WIDE_INT:
7646 if (code == 'b')
7647 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7648 CONST_WIDE_INT_ELT (x, 0) & 0xff);
7649 else if (code == 'x')
7650 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7651 CONST_WIDE_INT_ELT (x, 0) & 0xffff);
7652 else if (code == 'h')
7653 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7654 ((CONST_WIDE_INT_ELT (x, 0) & 0xffff) ^ 0x8000) - 0x8000);
7655 else
7657 if (code == 0)
7658 output_operand_lossage ("invalid constant - try using "
7659 "an output modifier");
7660 else
7661 output_operand_lossage ("invalid constant for output modifier '%c'",
7662 code);
7664 break;
7665 case CONST_VECTOR:
7666 switch (code)
7668 case 'h':
7669 gcc_assert (const_vec_duplicate_p (x));
7670 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7671 ((INTVAL (XVECEXP (x, 0, 0)) & 0xffff) ^ 0x8000) - 0x8000);
7672 break;
7673 case 'e':
7674 case 's':
7676 int start, end;
7677 bool ok;
7679 ok = s390_contiguous_bitmask_vector_p (x, &start, &end);
7680 gcc_assert (ok);
7681 ival = (code == 's') ? start : end;
7682 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
7684 break;
7685 case 't':
7687 unsigned mask;
7688 bool ok = s390_bytemask_vector_p (x, &mask);
7689 gcc_assert (ok);
7690 fprintf (file, "%u", mask);
7692 break;
7694 default:
7695 output_operand_lossage ("invalid constant vector for output "
7696 "modifier '%c'", code);
7698 break;
7700 default:
7701 if (code == 0)
7702 output_operand_lossage ("invalid expression - try using "
7703 "an output modifier");
7704 else
7705 output_operand_lossage ("invalid expression for output "
7706 "modifier '%c'", code);
7707 break;
7711 /* Target hook for assembling integer objects. We need to define it
7712 here to work a round a bug in some versions of GAS, which couldn't
7713 handle values smaller than INT_MIN when printed in decimal. */
7715 static bool
7716 s390_assemble_integer (rtx x, unsigned int size, int aligned_p)
7718 if (size == 8 && aligned_p
7719 && GET_CODE (x) == CONST_INT && INTVAL (x) < INT_MIN)
7721 fprintf (asm_out_file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n",
7722 INTVAL (x));
7723 return true;
7725 return default_assemble_integer (x, size, aligned_p);
7728 /* Returns true if register REGNO is used for forming
7729 a memory address in expression X. */
7731 static bool
7732 reg_used_in_mem_p (int regno, rtx x)
7734 enum rtx_code code = GET_CODE (x);
7735 int i, j;
7736 const char *fmt;
7738 if (code == MEM)
7740 if (refers_to_regno_p (regno, XEXP (x, 0)))
7741 return true;
7743 else if (code == SET
7744 && GET_CODE (SET_DEST (x)) == PC)
7746 if (refers_to_regno_p (regno, SET_SRC (x)))
7747 return true;
7750 fmt = GET_RTX_FORMAT (code);
7751 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
7753 if (fmt[i] == 'e'
7754 && reg_used_in_mem_p (regno, XEXP (x, i)))
7755 return true;
7757 else if (fmt[i] == 'E')
7758 for (j = 0; j < XVECLEN (x, i); j++)
7759 if (reg_used_in_mem_p (regno, XVECEXP (x, i, j)))
7760 return true;
7762 return false;
7765 /* Returns true if expression DEP_RTX sets an address register
7766 used by instruction INSN to address memory. */
7768 static bool
7769 addr_generation_dependency_p (rtx dep_rtx, rtx_insn *insn)
7771 rtx target, pat;
7773 if (NONJUMP_INSN_P (dep_rtx))
7774 dep_rtx = PATTERN (dep_rtx);
7776 if (GET_CODE (dep_rtx) == SET)
7778 target = SET_DEST (dep_rtx);
7779 if (GET_CODE (target) == STRICT_LOW_PART)
7780 target = XEXP (target, 0);
7781 while (GET_CODE (target) == SUBREG)
7782 target = SUBREG_REG (target);
7784 if (GET_CODE (target) == REG)
7786 int regno = REGNO (target);
7788 if (s390_safe_attr_type (insn) == TYPE_LA)
7790 pat = PATTERN (insn);
7791 if (GET_CODE (pat) == PARALLEL)
7793 gcc_assert (XVECLEN (pat, 0) == 2);
7794 pat = XVECEXP (pat, 0, 0);
7796 gcc_assert (GET_CODE (pat) == SET);
7797 return refers_to_regno_p (regno, SET_SRC (pat));
7799 else if (get_attr_atype (insn) == ATYPE_AGEN)
7800 return reg_used_in_mem_p (regno, PATTERN (insn));
7803 return false;
7806 /* Return 1, if dep_insn sets register used in insn in the agen unit. */
7809 s390_agen_dep_p (rtx_insn *dep_insn, rtx_insn *insn)
7811 rtx dep_rtx = PATTERN (dep_insn);
7812 int i;
7814 if (GET_CODE (dep_rtx) == SET
7815 && addr_generation_dependency_p (dep_rtx, insn))
7816 return 1;
7817 else if (GET_CODE (dep_rtx) == PARALLEL)
7819 for (i = 0; i < XVECLEN (dep_rtx, 0); i++)
7821 if (addr_generation_dependency_p (XVECEXP (dep_rtx, 0, i), insn))
7822 return 1;
7825 return 0;
7829 /* A C statement (sans semicolon) to update the integer scheduling priority
7830 INSN_PRIORITY (INSN). Increase the priority to execute the INSN earlier,
7831 reduce the priority to execute INSN later. Do not define this macro if
7832 you do not need to adjust the scheduling priorities of insns.
7834 A STD instruction should be scheduled earlier,
7835 in order to use the bypass. */
7836 static int
7837 s390_adjust_priority (rtx_insn *insn, int priority)
7839 if (! INSN_P (insn))
7840 return priority;
7842 if (s390_tune <= PROCESSOR_2064_Z900)
7843 return priority;
7845 switch (s390_safe_attr_type (insn))
7847 case TYPE_FSTOREDF:
7848 case TYPE_FSTORESF:
7849 priority = priority << 3;
7850 break;
7851 case TYPE_STORE:
7852 case TYPE_STM:
7853 priority = priority << 1;
7854 break;
7855 default:
7856 break;
7858 return priority;
7862 /* The number of instructions that can be issued per cycle. */
7864 static int
7865 s390_issue_rate (void)
7867 switch (s390_tune)
7869 case PROCESSOR_2084_Z990:
7870 case PROCESSOR_2094_Z9_109:
7871 case PROCESSOR_2094_Z9_EC:
7872 case PROCESSOR_2817_Z196:
7873 return 3;
7874 case PROCESSOR_2097_Z10:
7875 return 2;
7876 case PROCESSOR_9672_G5:
7877 case PROCESSOR_9672_G6:
7878 case PROCESSOR_2064_Z900:
7879 /* Starting with EC12 we use the sched_reorder hook to take care
7880 of instruction dispatch constraints. The algorithm only
7881 picks the best instruction and assumes only a single
7882 instruction gets issued per cycle. */
7883 case PROCESSOR_2827_ZEC12:
7884 case PROCESSOR_2964_Z13:
7885 default:
7886 return 1;
7890 static int
7891 s390_first_cycle_multipass_dfa_lookahead (void)
7893 return 4;
7896 /* Annotate every literal pool reference in X by an UNSPEC_LTREF expression.
7897 Fix up MEMs as required. */
7899 static void
7900 annotate_constant_pool_refs (rtx *x)
7902 int i, j;
7903 const char *fmt;
7905 gcc_assert (GET_CODE (*x) != SYMBOL_REF
7906 || !CONSTANT_POOL_ADDRESS_P (*x));
7908 /* Literal pool references can only occur inside a MEM ... */
7909 if (GET_CODE (*x) == MEM)
7911 rtx memref = XEXP (*x, 0);
7913 if (GET_CODE (memref) == SYMBOL_REF
7914 && CONSTANT_POOL_ADDRESS_P (memref))
7916 rtx base = cfun->machine->base_reg;
7917 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, memref, base),
7918 UNSPEC_LTREF);
7920 *x = replace_equiv_address (*x, addr);
7921 return;
7924 if (GET_CODE (memref) == CONST
7925 && GET_CODE (XEXP (memref, 0)) == PLUS
7926 && GET_CODE (XEXP (XEXP (memref, 0), 1)) == CONST_INT
7927 && GET_CODE (XEXP (XEXP (memref, 0), 0)) == SYMBOL_REF
7928 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (memref, 0), 0)))
7930 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (memref, 0), 1));
7931 rtx sym = XEXP (XEXP (memref, 0), 0);
7932 rtx base = cfun->machine->base_reg;
7933 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
7934 UNSPEC_LTREF);
7936 *x = replace_equiv_address (*x, plus_constant (Pmode, addr, off));
7937 return;
7941 /* ... or a load-address type pattern. */
7942 if (GET_CODE (*x) == SET)
7944 rtx addrref = SET_SRC (*x);
7946 if (GET_CODE (addrref) == SYMBOL_REF
7947 && CONSTANT_POOL_ADDRESS_P (addrref))
7949 rtx base = cfun->machine->base_reg;
7950 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addrref, base),
7951 UNSPEC_LTREF);
7953 SET_SRC (*x) = addr;
7954 return;
7957 if (GET_CODE (addrref) == CONST
7958 && GET_CODE (XEXP (addrref, 0)) == PLUS
7959 && GET_CODE (XEXP (XEXP (addrref, 0), 1)) == CONST_INT
7960 && GET_CODE (XEXP (XEXP (addrref, 0), 0)) == SYMBOL_REF
7961 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (addrref, 0), 0)))
7963 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (addrref, 0), 1));
7964 rtx sym = XEXP (XEXP (addrref, 0), 0);
7965 rtx base = cfun->machine->base_reg;
7966 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
7967 UNSPEC_LTREF);
7969 SET_SRC (*x) = plus_constant (Pmode, addr, off);
7970 return;
7974 /* Annotate LTREL_BASE as well. */
7975 if (GET_CODE (*x) == UNSPEC
7976 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
7978 rtx base = cfun->machine->base_reg;
7979 *x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XVECEXP (*x, 0, 0), base),
7980 UNSPEC_LTREL_BASE);
7981 return;
7984 fmt = GET_RTX_FORMAT (GET_CODE (*x));
7985 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
7987 if (fmt[i] == 'e')
7989 annotate_constant_pool_refs (&XEXP (*x, i));
7991 else if (fmt[i] == 'E')
7993 for (j = 0; j < XVECLEN (*x, i); j++)
7994 annotate_constant_pool_refs (&XVECEXP (*x, i, j));
7999 /* Split all branches that exceed the maximum distance.
8000 Returns true if this created a new literal pool entry. */
8002 static int
8003 s390_split_branches (void)
8005 rtx temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
8006 int new_literal = 0, ret;
8007 rtx_insn *insn;
8008 rtx pat, target;
8009 rtx *label;
8011 /* We need correct insn addresses. */
8013 shorten_branches (get_insns ());
8015 /* Find all branches that exceed 64KB, and split them. */
8017 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8019 if (! JUMP_P (insn) || tablejump_p (insn, NULL, NULL))
8020 continue;
8022 pat = PATTERN (insn);
8023 if (GET_CODE (pat) == PARALLEL)
8024 pat = XVECEXP (pat, 0, 0);
8025 if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx)
8026 continue;
8028 if (GET_CODE (SET_SRC (pat)) == LABEL_REF)
8030 label = &SET_SRC (pat);
8032 else if (GET_CODE (SET_SRC (pat)) == IF_THEN_ELSE)
8034 if (GET_CODE (XEXP (SET_SRC (pat), 1)) == LABEL_REF)
8035 label = &XEXP (SET_SRC (pat), 1);
8036 else if (GET_CODE (XEXP (SET_SRC (pat), 2)) == LABEL_REF)
8037 label = &XEXP (SET_SRC (pat), 2);
8038 else
8039 continue;
8041 else
8042 continue;
8044 if (get_attr_length (insn) <= 4)
8045 continue;
8047 /* We are going to use the return register as scratch register,
8048 make sure it will be saved/restored by the prologue/epilogue. */
8049 cfun_frame_layout.save_return_addr_p = 1;
8051 if (!flag_pic)
8053 new_literal = 1;
8054 rtx mem = force_const_mem (Pmode, *label);
8055 rtx_insn *set_insn = emit_insn_before (gen_rtx_SET (temp_reg, mem),
8056 insn);
8057 INSN_ADDRESSES_NEW (set_insn, -1);
8058 annotate_constant_pool_refs (&PATTERN (set_insn));
8060 target = temp_reg;
8062 else
8064 new_literal = 1;
8065 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, *label),
8066 UNSPEC_LTREL_OFFSET);
8067 target = gen_rtx_CONST (Pmode, target);
8068 target = force_const_mem (Pmode, target);
8069 rtx_insn *set_insn = emit_insn_before (gen_rtx_SET (temp_reg, target),
8070 insn);
8071 INSN_ADDRESSES_NEW (set_insn, -1);
8072 annotate_constant_pool_refs (&PATTERN (set_insn));
8074 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XEXP (target, 0),
8075 cfun->machine->base_reg),
8076 UNSPEC_LTREL_BASE);
8077 target = gen_rtx_PLUS (Pmode, temp_reg, target);
8080 ret = validate_change (insn, label, target, 0);
8081 gcc_assert (ret);
8084 return new_literal;
8088 /* Find an annotated literal pool symbol referenced in RTX X,
8089 and store it at REF. Will abort if X contains references to
8090 more than one such pool symbol; multiple references to the same
8091 symbol are allowed, however.
8093 The rtx pointed to by REF must be initialized to NULL_RTX
8094 by the caller before calling this routine. */
8096 static void
8097 find_constant_pool_ref (rtx x, rtx *ref)
8099 int i, j;
8100 const char *fmt;
8102 /* Ignore LTREL_BASE references. */
8103 if (GET_CODE (x) == UNSPEC
8104 && XINT (x, 1) == UNSPEC_LTREL_BASE)
8105 return;
8106 /* Likewise POOL_ENTRY insns. */
8107 if (GET_CODE (x) == UNSPEC_VOLATILE
8108 && XINT (x, 1) == UNSPECV_POOL_ENTRY)
8109 return;
8111 gcc_assert (GET_CODE (x) != SYMBOL_REF
8112 || !CONSTANT_POOL_ADDRESS_P (x));
8114 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_LTREF)
8116 rtx sym = XVECEXP (x, 0, 0);
8117 gcc_assert (GET_CODE (sym) == SYMBOL_REF
8118 && CONSTANT_POOL_ADDRESS_P (sym));
8120 if (*ref == NULL_RTX)
8121 *ref = sym;
8122 else
8123 gcc_assert (*ref == sym);
8125 return;
8128 fmt = GET_RTX_FORMAT (GET_CODE (x));
8129 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8131 if (fmt[i] == 'e')
8133 find_constant_pool_ref (XEXP (x, i), ref);
8135 else if (fmt[i] == 'E')
8137 for (j = 0; j < XVECLEN (x, i); j++)
8138 find_constant_pool_ref (XVECEXP (x, i, j), ref);
8143 /* Replace every reference to the annotated literal pool
8144 symbol REF in X by its base plus OFFSET. */
8146 static void
8147 replace_constant_pool_ref (rtx *x, rtx ref, rtx offset)
8149 int i, j;
8150 const char *fmt;
8152 gcc_assert (*x != ref);
8154 if (GET_CODE (*x) == UNSPEC
8155 && XINT (*x, 1) == UNSPEC_LTREF
8156 && XVECEXP (*x, 0, 0) == ref)
8158 *x = gen_rtx_PLUS (Pmode, XVECEXP (*x, 0, 1), offset);
8159 return;
8162 if (GET_CODE (*x) == PLUS
8163 && GET_CODE (XEXP (*x, 1)) == CONST_INT
8164 && GET_CODE (XEXP (*x, 0)) == UNSPEC
8165 && XINT (XEXP (*x, 0), 1) == UNSPEC_LTREF
8166 && XVECEXP (XEXP (*x, 0), 0, 0) == ref)
8168 rtx addr = gen_rtx_PLUS (Pmode, XVECEXP (XEXP (*x, 0), 0, 1), offset);
8169 *x = plus_constant (Pmode, addr, INTVAL (XEXP (*x, 1)));
8170 return;
8173 fmt = GET_RTX_FORMAT (GET_CODE (*x));
8174 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
8176 if (fmt[i] == 'e')
8178 replace_constant_pool_ref (&XEXP (*x, i), ref, offset);
8180 else if (fmt[i] == 'E')
8182 for (j = 0; j < XVECLEN (*x, i); j++)
8183 replace_constant_pool_ref (&XVECEXP (*x, i, j), ref, offset);
8188 /* Check whether X contains an UNSPEC_LTREL_BASE.
8189 Return its constant pool symbol if found, NULL_RTX otherwise. */
8191 static rtx
8192 find_ltrel_base (rtx x)
8194 int i, j;
8195 const char *fmt;
8197 if (GET_CODE (x) == UNSPEC
8198 && XINT (x, 1) == UNSPEC_LTREL_BASE)
8199 return XVECEXP (x, 0, 0);
8201 fmt = GET_RTX_FORMAT (GET_CODE (x));
8202 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8204 if (fmt[i] == 'e')
8206 rtx fnd = find_ltrel_base (XEXP (x, i));
8207 if (fnd)
8208 return fnd;
8210 else if (fmt[i] == 'E')
8212 for (j = 0; j < XVECLEN (x, i); j++)
8214 rtx fnd = find_ltrel_base (XVECEXP (x, i, j));
8215 if (fnd)
8216 return fnd;
8221 return NULL_RTX;
8224 /* Replace any occurrence of UNSPEC_LTREL_BASE in X with its base. */
8226 static void
8227 replace_ltrel_base (rtx *x)
8229 int i, j;
8230 const char *fmt;
8232 if (GET_CODE (*x) == UNSPEC
8233 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
8235 *x = XVECEXP (*x, 0, 1);
8236 return;
8239 fmt = GET_RTX_FORMAT (GET_CODE (*x));
8240 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
8242 if (fmt[i] == 'e')
8244 replace_ltrel_base (&XEXP (*x, i));
8246 else if (fmt[i] == 'E')
8248 for (j = 0; j < XVECLEN (*x, i); j++)
8249 replace_ltrel_base (&XVECEXP (*x, i, j));
8255 /* We keep a list of constants which we have to add to internal
8256 constant tables in the middle of large functions. */
8258 #define NR_C_MODES 32
8259 machine_mode constant_modes[NR_C_MODES] =
8261 TFmode, TImode, TDmode,
8262 V16QImode, V8HImode, V4SImode, V2DImode, V1TImode,
8263 V4SFmode, V2DFmode, V1TFmode,
8264 DFmode, DImode, DDmode,
8265 V8QImode, V4HImode, V2SImode, V1DImode, V2SFmode, V1DFmode,
8266 SFmode, SImode, SDmode,
8267 V4QImode, V2HImode, V1SImode, V1SFmode,
8268 HImode,
8269 V2QImode, V1HImode,
8270 QImode,
8271 V1QImode
8274 struct constant
8276 struct constant *next;
8277 rtx value;
8278 rtx_code_label *label;
8281 struct constant_pool
8283 struct constant_pool *next;
8284 rtx_insn *first_insn;
8285 rtx_insn *pool_insn;
8286 bitmap insns;
8287 rtx_insn *emit_pool_after;
8289 struct constant *constants[NR_C_MODES];
8290 struct constant *execute;
8291 rtx_code_label *label;
8292 int size;
8295 /* Allocate new constant_pool structure. */
8297 static struct constant_pool *
8298 s390_alloc_pool (void)
8300 struct constant_pool *pool;
8301 int i;
8303 pool = (struct constant_pool *) xmalloc (sizeof *pool);
8304 pool->next = NULL;
8305 for (i = 0; i < NR_C_MODES; i++)
8306 pool->constants[i] = NULL;
8308 pool->execute = NULL;
8309 pool->label = gen_label_rtx ();
8310 pool->first_insn = NULL;
8311 pool->pool_insn = NULL;
8312 pool->insns = BITMAP_ALLOC (NULL);
8313 pool->size = 0;
8314 pool->emit_pool_after = NULL;
8316 return pool;
8319 /* Create new constant pool covering instructions starting at INSN
8320 and chain it to the end of POOL_LIST. */
8322 static struct constant_pool *
8323 s390_start_pool (struct constant_pool **pool_list, rtx_insn *insn)
8325 struct constant_pool *pool, **prev;
8327 pool = s390_alloc_pool ();
8328 pool->first_insn = insn;
8330 for (prev = pool_list; *prev; prev = &(*prev)->next)
8332 *prev = pool;
8334 return pool;
8337 /* End range of instructions covered by POOL at INSN and emit
8338 placeholder insn representing the pool. */
8340 static void
8341 s390_end_pool (struct constant_pool *pool, rtx_insn *insn)
8343 rtx pool_size = GEN_INT (pool->size + 8 /* alignment slop */);
8345 if (!insn)
8346 insn = get_last_insn ();
8348 pool->pool_insn = emit_insn_after (gen_pool (pool_size), insn);
8349 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8352 /* Add INSN to the list of insns covered by POOL. */
8354 static void
8355 s390_add_pool_insn (struct constant_pool *pool, rtx insn)
8357 bitmap_set_bit (pool->insns, INSN_UID (insn));
8360 /* Return pool out of POOL_LIST that covers INSN. */
8362 static struct constant_pool *
8363 s390_find_pool (struct constant_pool *pool_list, rtx insn)
8365 struct constant_pool *pool;
8367 for (pool = pool_list; pool; pool = pool->next)
8368 if (bitmap_bit_p (pool->insns, INSN_UID (insn)))
8369 break;
8371 return pool;
8374 /* Add constant VAL of mode MODE to the constant pool POOL. */
8376 static void
8377 s390_add_constant (struct constant_pool *pool, rtx val, machine_mode mode)
8379 struct constant *c;
8380 int i;
8382 for (i = 0; i < NR_C_MODES; i++)
8383 if (constant_modes[i] == mode)
8384 break;
8385 gcc_assert (i != NR_C_MODES);
8387 for (c = pool->constants[i]; c != NULL; c = c->next)
8388 if (rtx_equal_p (val, c->value))
8389 break;
8391 if (c == NULL)
8393 c = (struct constant *) xmalloc (sizeof *c);
8394 c->value = val;
8395 c->label = gen_label_rtx ();
8396 c->next = pool->constants[i];
8397 pool->constants[i] = c;
8398 pool->size += GET_MODE_SIZE (mode);
8402 /* Return an rtx that represents the offset of X from the start of
8403 pool POOL. */
8405 static rtx
8406 s390_pool_offset (struct constant_pool *pool, rtx x)
8408 rtx label;
8410 label = gen_rtx_LABEL_REF (GET_MODE (x), pool->label);
8411 x = gen_rtx_UNSPEC (GET_MODE (x), gen_rtvec (2, x, label),
8412 UNSPEC_POOL_OFFSET);
8413 return gen_rtx_CONST (GET_MODE (x), x);
8416 /* Find constant VAL of mode MODE in the constant pool POOL.
8417 Return an RTX describing the distance from the start of
8418 the pool to the location of the new constant. */
8420 static rtx
8421 s390_find_constant (struct constant_pool *pool, rtx val,
8422 machine_mode mode)
8424 struct constant *c;
8425 int i;
8427 for (i = 0; i < NR_C_MODES; i++)
8428 if (constant_modes[i] == mode)
8429 break;
8430 gcc_assert (i != NR_C_MODES);
8432 for (c = pool->constants[i]; c != NULL; c = c->next)
8433 if (rtx_equal_p (val, c->value))
8434 break;
8436 gcc_assert (c);
8438 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
8441 /* Check whether INSN is an execute. Return the label_ref to its
8442 execute target template if so, NULL_RTX otherwise. */
8444 static rtx
8445 s390_execute_label (rtx insn)
8447 if (NONJUMP_INSN_P (insn)
8448 && GET_CODE (PATTERN (insn)) == PARALLEL
8449 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == UNSPEC
8450 && XINT (XVECEXP (PATTERN (insn), 0, 0), 1) == UNSPEC_EXECUTE)
8451 return XVECEXP (XVECEXP (PATTERN (insn), 0, 0), 0, 2);
8453 return NULL_RTX;
8456 /* Add execute target for INSN to the constant pool POOL. */
8458 static void
8459 s390_add_execute (struct constant_pool *pool, rtx insn)
8461 struct constant *c;
8463 for (c = pool->execute; c != NULL; c = c->next)
8464 if (INSN_UID (insn) == INSN_UID (c->value))
8465 break;
8467 if (c == NULL)
8469 c = (struct constant *) xmalloc (sizeof *c);
8470 c->value = insn;
8471 c->label = gen_label_rtx ();
8472 c->next = pool->execute;
8473 pool->execute = c;
8474 pool->size += 6;
8478 /* Find execute target for INSN in the constant pool POOL.
8479 Return an RTX describing the distance from the start of
8480 the pool to the location of the execute target. */
8482 static rtx
8483 s390_find_execute (struct constant_pool *pool, rtx insn)
8485 struct constant *c;
8487 for (c = pool->execute; c != NULL; c = c->next)
8488 if (INSN_UID (insn) == INSN_UID (c->value))
8489 break;
8491 gcc_assert (c);
8493 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
8496 /* For an execute INSN, extract the execute target template. */
8498 static rtx
8499 s390_execute_target (rtx insn)
8501 rtx pattern = PATTERN (insn);
8502 gcc_assert (s390_execute_label (insn));
8504 if (XVECLEN (pattern, 0) == 2)
8506 pattern = copy_rtx (XVECEXP (pattern, 0, 1));
8508 else
8510 rtvec vec = rtvec_alloc (XVECLEN (pattern, 0) - 1);
8511 int i;
8513 for (i = 0; i < XVECLEN (pattern, 0) - 1; i++)
8514 RTVEC_ELT (vec, i) = copy_rtx (XVECEXP (pattern, 0, i + 1));
8516 pattern = gen_rtx_PARALLEL (VOIDmode, vec);
8519 return pattern;
8522 /* Indicate that INSN cannot be duplicated. This is the case for
8523 execute insns that carry a unique label. */
8525 static bool
8526 s390_cannot_copy_insn_p (rtx_insn *insn)
8528 rtx label = s390_execute_label (insn);
8529 return label && label != const0_rtx;
8532 /* Dump out the constants in POOL. If REMOTE_LABEL is true,
8533 do not emit the pool base label. */
8535 static void
8536 s390_dump_pool (struct constant_pool *pool, bool remote_label)
8538 struct constant *c;
8539 rtx_insn *insn = pool->pool_insn;
8540 int i;
8542 /* Switch to rodata section. */
8543 if (TARGET_CPU_ZARCH)
8545 insn = emit_insn_after (gen_pool_section_start (), insn);
8546 INSN_ADDRESSES_NEW (insn, -1);
8549 /* Ensure minimum pool alignment. */
8550 if (TARGET_CPU_ZARCH)
8551 insn = emit_insn_after (gen_pool_align (GEN_INT (8)), insn);
8552 else
8553 insn = emit_insn_after (gen_pool_align (GEN_INT (4)), insn);
8554 INSN_ADDRESSES_NEW (insn, -1);
8556 /* Emit pool base label. */
8557 if (!remote_label)
8559 insn = emit_label_after (pool->label, insn);
8560 INSN_ADDRESSES_NEW (insn, -1);
8563 /* Dump constants in descending alignment requirement order,
8564 ensuring proper alignment for every constant. */
8565 for (i = 0; i < NR_C_MODES; i++)
8566 for (c = pool->constants[i]; c; c = c->next)
8568 /* Convert UNSPEC_LTREL_OFFSET unspecs to pool-relative references. */
8569 rtx value = copy_rtx (c->value);
8570 if (GET_CODE (value) == CONST
8571 && GET_CODE (XEXP (value, 0)) == UNSPEC
8572 && XINT (XEXP (value, 0), 1) == UNSPEC_LTREL_OFFSET
8573 && XVECLEN (XEXP (value, 0), 0) == 1)
8574 value = s390_pool_offset (pool, XVECEXP (XEXP (value, 0), 0, 0));
8576 insn = emit_label_after (c->label, insn);
8577 INSN_ADDRESSES_NEW (insn, -1);
8579 value = gen_rtx_UNSPEC_VOLATILE (constant_modes[i],
8580 gen_rtvec (1, value),
8581 UNSPECV_POOL_ENTRY);
8582 insn = emit_insn_after (value, insn);
8583 INSN_ADDRESSES_NEW (insn, -1);
8586 /* Ensure minimum alignment for instructions. */
8587 insn = emit_insn_after (gen_pool_align (GEN_INT (2)), insn);
8588 INSN_ADDRESSES_NEW (insn, -1);
8590 /* Output in-pool execute template insns. */
8591 for (c = pool->execute; c; c = c->next)
8593 insn = emit_label_after (c->label, insn);
8594 INSN_ADDRESSES_NEW (insn, -1);
8596 insn = emit_insn_after (s390_execute_target (c->value), insn);
8597 INSN_ADDRESSES_NEW (insn, -1);
8600 /* Switch back to previous section. */
8601 if (TARGET_CPU_ZARCH)
8603 insn = emit_insn_after (gen_pool_section_end (), insn);
8604 INSN_ADDRESSES_NEW (insn, -1);
8607 insn = emit_barrier_after (insn);
8608 INSN_ADDRESSES_NEW (insn, -1);
8610 /* Remove placeholder insn. */
8611 remove_insn (pool->pool_insn);
8614 /* Free all memory used by POOL. */
8616 static void
8617 s390_free_pool (struct constant_pool *pool)
8619 struct constant *c, *next;
8620 int i;
8622 for (i = 0; i < NR_C_MODES; i++)
8623 for (c = pool->constants[i]; c; c = next)
8625 next = c->next;
8626 free (c);
8629 for (c = pool->execute; c; c = next)
8631 next = c->next;
8632 free (c);
8635 BITMAP_FREE (pool->insns);
8636 free (pool);
8640 /* Collect main literal pool. Return NULL on overflow. */
8642 static struct constant_pool *
8643 s390_mainpool_start (void)
8645 struct constant_pool *pool;
8646 rtx_insn *insn;
8648 pool = s390_alloc_pool ();
8650 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8652 if (NONJUMP_INSN_P (insn)
8653 && GET_CODE (PATTERN (insn)) == SET
8654 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC_VOLATILE
8655 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPECV_MAIN_POOL)
8657 /* There might be two main_pool instructions if base_reg
8658 is call-clobbered; one for shrink-wrapped code and one
8659 for the rest. We want to keep the first. */
8660 if (pool->pool_insn)
8662 insn = PREV_INSN (insn);
8663 delete_insn (NEXT_INSN (insn));
8664 continue;
8666 pool->pool_insn = insn;
8669 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
8671 s390_add_execute (pool, insn);
8673 else if (NONJUMP_INSN_P (insn) || CALL_P (insn))
8675 rtx pool_ref = NULL_RTX;
8676 find_constant_pool_ref (PATTERN (insn), &pool_ref);
8677 if (pool_ref)
8679 rtx constant = get_pool_constant (pool_ref);
8680 machine_mode mode = get_pool_mode (pool_ref);
8681 s390_add_constant (pool, constant, mode);
8685 /* If hot/cold partitioning is enabled we have to make sure that
8686 the literal pool is emitted in the same section where the
8687 initialization of the literal pool base pointer takes place.
8688 emit_pool_after is only used in the non-overflow case on non
8689 Z cpus where we can emit the literal pool at the end of the
8690 function body within the text section. */
8691 if (NOTE_P (insn)
8692 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS
8693 && !pool->emit_pool_after)
8694 pool->emit_pool_after = PREV_INSN (insn);
8697 gcc_assert (pool->pool_insn || pool->size == 0);
8699 if (pool->size >= 4096)
8701 /* We're going to chunkify the pool, so remove the main
8702 pool placeholder insn. */
8703 remove_insn (pool->pool_insn);
8705 s390_free_pool (pool);
8706 pool = NULL;
8709 /* If the functions ends with the section where the literal pool
8710 should be emitted set the marker to its end. */
8711 if (pool && !pool->emit_pool_after)
8712 pool->emit_pool_after = get_last_insn ();
8714 return pool;
8717 /* POOL holds the main literal pool as collected by s390_mainpool_start.
8718 Modify the current function to output the pool constants as well as
8719 the pool register setup instruction. */
8721 static void
8722 s390_mainpool_finish (struct constant_pool *pool)
8724 rtx base_reg = cfun->machine->base_reg;
8726 /* If the pool is empty, we're done. */
8727 if (pool->size == 0)
8729 /* We don't actually need a base register after all. */
8730 cfun->machine->base_reg = NULL_RTX;
8732 if (pool->pool_insn)
8733 remove_insn (pool->pool_insn);
8734 s390_free_pool (pool);
8735 return;
8738 /* We need correct insn addresses. */
8739 shorten_branches (get_insns ());
8741 /* On zSeries, we use a LARL to load the pool register. The pool is
8742 located in the .rodata section, so we emit it after the function. */
8743 if (TARGET_CPU_ZARCH)
8745 rtx set = gen_main_base_64 (base_reg, pool->label);
8746 rtx_insn *insn = emit_insn_after (set, pool->pool_insn);
8747 INSN_ADDRESSES_NEW (insn, -1);
8748 remove_insn (pool->pool_insn);
8750 insn = get_last_insn ();
8751 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
8752 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8754 s390_dump_pool (pool, 0);
8757 /* On S/390, if the total size of the function's code plus literal pool
8758 does not exceed 4096 bytes, we use BASR to set up a function base
8759 pointer, and emit the literal pool at the end of the function. */
8760 else if (INSN_ADDRESSES (INSN_UID (pool->emit_pool_after))
8761 + pool->size + 8 /* alignment slop */ < 4096)
8763 rtx set = gen_main_base_31_small (base_reg, pool->label);
8764 rtx_insn *insn = emit_insn_after (set, pool->pool_insn);
8765 INSN_ADDRESSES_NEW (insn, -1);
8766 remove_insn (pool->pool_insn);
8768 insn = emit_label_after (pool->label, insn);
8769 INSN_ADDRESSES_NEW (insn, -1);
8771 /* emit_pool_after will be set by s390_mainpool_start to the
8772 last insn of the section where the literal pool should be
8773 emitted. */
8774 insn = pool->emit_pool_after;
8776 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
8777 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8779 s390_dump_pool (pool, 1);
8782 /* Otherwise, we emit an inline literal pool and use BASR to branch
8783 over it, setting up the pool register at the same time. */
8784 else
8786 rtx_code_label *pool_end = gen_label_rtx ();
8788 rtx pat = gen_main_base_31_large (base_reg, pool->label, pool_end);
8789 rtx_insn *insn = emit_jump_insn_after (pat, pool->pool_insn);
8790 JUMP_LABEL (insn) = pool_end;
8791 INSN_ADDRESSES_NEW (insn, -1);
8792 remove_insn (pool->pool_insn);
8794 insn = emit_label_after (pool->label, insn);
8795 INSN_ADDRESSES_NEW (insn, -1);
8797 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
8798 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8800 insn = emit_label_after (pool_end, pool->pool_insn);
8801 INSN_ADDRESSES_NEW (insn, -1);
8803 s390_dump_pool (pool, 1);
8807 /* Replace all literal pool references. */
8809 for (rtx_insn *insn = get_insns (); insn; insn = NEXT_INSN (insn))
8811 if (INSN_P (insn))
8812 replace_ltrel_base (&PATTERN (insn));
8814 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
8816 rtx addr, pool_ref = NULL_RTX;
8817 find_constant_pool_ref (PATTERN (insn), &pool_ref);
8818 if (pool_ref)
8820 if (s390_execute_label (insn))
8821 addr = s390_find_execute (pool, insn);
8822 else
8823 addr = s390_find_constant (pool, get_pool_constant (pool_ref),
8824 get_pool_mode (pool_ref));
8826 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
8827 INSN_CODE (insn) = -1;
8833 /* Free the pool. */
8834 s390_free_pool (pool);
8837 /* POOL holds the main literal pool as collected by s390_mainpool_start.
8838 We have decided we cannot use this pool, so revert all changes
8839 to the current function that were done by s390_mainpool_start. */
8840 static void
8841 s390_mainpool_cancel (struct constant_pool *pool)
8843 /* We didn't actually change the instruction stream, so simply
8844 free the pool memory. */
8845 s390_free_pool (pool);
8849 /* Chunkify the literal pool. */
8851 #define S390_POOL_CHUNK_MIN 0xc00
8852 #define S390_POOL_CHUNK_MAX 0xe00
8854 static struct constant_pool *
8855 s390_chunkify_start (void)
8857 struct constant_pool *curr_pool = NULL, *pool_list = NULL;
8858 int extra_size = 0;
8859 bitmap far_labels;
8860 rtx pending_ltrel = NULL_RTX;
8861 rtx_insn *insn;
8863 rtx (*gen_reload_base) (rtx, rtx) =
8864 TARGET_CPU_ZARCH? gen_reload_base_64 : gen_reload_base_31;
8867 /* We need correct insn addresses. */
8869 shorten_branches (get_insns ());
8871 /* Scan all insns and move literals to pool chunks. */
8873 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8875 bool section_switch_p = false;
8877 /* Check for pending LTREL_BASE. */
8878 if (INSN_P (insn))
8880 rtx ltrel_base = find_ltrel_base (PATTERN (insn));
8881 if (ltrel_base)
8883 gcc_assert (ltrel_base == pending_ltrel);
8884 pending_ltrel = NULL_RTX;
8888 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
8890 if (!curr_pool)
8891 curr_pool = s390_start_pool (&pool_list, insn);
8893 s390_add_execute (curr_pool, insn);
8894 s390_add_pool_insn (curr_pool, insn);
8896 else if (NONJUMP_INSN_P (insn) || CALL_P (insn))
8898 rtx pool_ref = NULL_RTX;
8899 find_constant_pool_ref (PATTERN (insn), &pool_ref);
8900 if (pool_ref)
8902 rtx constant = get_pool_constant (pool_ref);
8903 machine_mode mode = get_pool_mode (pool_ref);
8905 if (!curr_pool)
8906 curr_pool = s390_start_pool (&pool_list, insn);
8908 s390_add_constant (curr_pool, constant, mode);
8909 s390_add_pool_insn (curr_pool, insn);
8911 /* Don't split the pool chunk between a LTREL_OFFSET load
8912 and the corresponding LTREL_BASE. */
8913 if (GET_CODE (constant) == CONST
8914 && GET_CODE (XEXP (constant, 0)) == UNSPEC
8915 && XINT (XEXP (constant, 0), 1) == UNSPEC_LTREL_OFFSET)
8917 gcc_assert (!pending_ltrel);
8918 pending_ltrel = pool_ref;
8923 if (JUMP_P (insn) || JUMP_TABLE_DATA_P (insn) || LABEL_P (insn))
8925 if (curr_pool)
8926 s390_add_pool_insn (curr_pool, insn);
8927 /* An LTREL_BASE must follow within the same basic block. */
8928 gcc_assert (!pending_ltrel);
8931 if (NOTE_P (insn))
8932 switch (NOTE_KIND (insn))
8934 case NOTE_INSN_SWITCH_TEXT_SECTIONS:
8935 section_switch_p = true;
8936 break;
8937 case NOTE_INSN_VAR_LOCATION:
8938 case NOTE_INSN_CALL_ARG_LOCATION:
8939 continue;
8940 default:
8941 break;
8944 if (!curr_pool
8945 || INSN_ADDRESSES_SIZE () <= (size_t) INSN_UID (insn)
8946 || INSN_ADDRESSES (INSN_UID (insn)) == -1)
8947 continue;
8949 if (TARGET_CPU_ZARCH)
8951 if (curr_pool->size < S390_POOL_CHUNK_MAX)
8952 continue;
8954 s390_end_pool (curr_pool, NULL);
8955 curr_pool = NULL;
8957 else
8959 int chunk_size = INSN_ADDRESSES (INSN_UID (insn))
8960 - INSN_ADDRESSES (INSN_UID (curr_pool->first_insn))
8961 + extra_size;
8963 /* We will later have to insert base register reload insns.
8964 Those will have an effect on code size, which we need to
8965 consider here. This calculation makes rather pessimistic
8966 worst-case assumptions. */
8967 if (LABEL_P (insn))
8968 extra_size += 6;
8970 if (chunk_size < S390_POOL_CHUNK_MIN
8971 && curr_pool->size < S390_POOL_CHUNK_MIN
8972 && !section_switch_p)
8973 continue;
8975 /* Pool chunks can only be inserted after BARRIERs ... */
8976 if (BARRIER_P (insn))
8978 s390_end_pool (curr_pool, insn);
8979 curr_pool = NULL;
8980 extra_size = 0;
8983 /* ... so if we don't find one in time, create one. */
8984 else if (chunk_size > S390_POOL_CHUNK_MAX
8985 || curr_pool->size > S390_POOL_CHUNK_MAX
8986 || section_switch_p)
8988 rtx_insn *label, *jump, *barrier, *next, *prev;
8990 if (!section_switch_p)
8992 /* We can insert the barrier only after a 'real' insn. */
8993 if (! NONJUMP_INSN_P (insn) && ! CALL_P (insn))
8994 continue;
8995 if (get_attr_length (insn) == 0)
8996 continue;
8997 /* Don't separate LTREL_BASE from the corresponding
8998 LTREL_OFFSET load. */
8999 if (pending_ltrel)
9000 continue;
9001 next = insn;
9004 insn = next;
9005 next = NEXT_INSN (insn);
9007 while (next
9008 && NOTE_P (next)
9009 && (NOTE_KIND (next) == NOTE_INSN_VAR_LOCATION
9010 || NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION));
9012 else
9014 gcc_assert (!pending_ltrel);
9016 /* The old pool has to end before the section switch
9017 note in order to make it part of the current
9018 section. */
9019 insn = PREV_INSN (insn);
9022 label = gen_label_rtx ();
9023 prev = insn;
9024 if (prev && NOTE_P (prev))
9025 prev = prev_nonnote_insn (prev);
9026 if (prev)
9027 jump = emit_jump_insn_after_setloc (gen_jump (label), insn,
9028 INSN_LOCATION (prev));
9029 else
9030 jump = emit_jump_insn_after_noloc (gen_jump (label), insn);
9031 barrier = emit_barrier_after (jump);
9032 insn = emit_label_after (label, barrier);
9033 JUMP_LABEL (jump) = label;
9034 LABEL_NUSES (label) = 1;
9036 INSN_ADDRESSES_NEW (jump, -1);
9037 INSN_ADDRESSES_NEW (barrier, -1);
9038 INSN_ADDRESSES_NEW (insn, -1);
9040 s390_end_pool (curr_pool, barrier);
9041 curr_pool = NULL;
9042 extra_size = 0;
9047 if (curr_pool)
9048 s390_end_pool (curr_pool, NULL);
9049 gcc_assert (!pending_ltrel);
9051 /* Find all labels that are branched into
9052 from an insn belonging to a different chunk. */
9054 far_labels = BITMAP_ALLOC (NULL);
9056 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9058 rtx_jump_table_data *table;
9060 /* Labels marked with LABEL_PRESERVE_P can be target
9061 of non-local jumps, so we have to mark them.
9062 The same holds for named labels.
9064 Don't do that, however, if it is the label before
9065 a jump table. */
9067 if (LABEL_P (insn)
9068 && (LABEL_PRESERVE_P (insn) || LABEL_NAME (insn)))
9070 rtx_insn *vec_insn = NEXT_INSN (insn);
9071 if (! vec_insn || ! JUMP_TABLE_DATA_P (vec_insn))
9072 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (insn));
9074 /* Check potential targets in a table jump (casesi_jump). */
9075 else if (tablejump_p (insn, NULL, &table))
9077 rtx vec_pat = PATTERN (table);
9078 int i, diff_p = GET_CODE (vec_pat) == ADDR_DIFF_VEC;
9080 for (i = 0; i < XVECLEN (vec_pat, diff_p); i++)
9082 rtx label = XEXP (XVECEXP (vec_pat, diff_p, i), 0);
9084 if (s390_find_pool (pool_list, label)
9085 != s390_find_pool (pool_list, insn))
9086 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
9089 /* If we have a direct jump (conditional or unconditional),
9090 check all potential targets. */
9091 else if (JUMP_P (insn))
9093 rtx pat = PATTERN (insn);
9095 if (GET_CODE (pat) == PARALLEL)
9096 pat = XVECEXP (pat, 0, 0);
9098 if (GET_CODE (pat) == SET)
9100 rtx label = JUMP_LABEL (insn);
9101 if (label && !ANY_RETURN_P (label))
9103 if (s390_find_pool (pool_list, label)
9104 != s390_find_pool (pool_list, insn))
9105 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
9111 /* Insert base register reload insns before every pool. */
9113 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
9115 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
9116 curr_pool->label);
9117 rtx_insn *insn = curr_pool->first_insn;
9118 INSN_ADDRESSES_NEW (emit_insn_before (new_insn, insn), -1);
9121 /* Insert base register reload insns at every far label. */
9123 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9124 if (LABEL_P (insn)
9125 && bitmap_bit_p (far_labels, CODE_LABEL_NUMBER (insn)))
9127 struct constant_pool *pool = s390_find_pool (pool_list, insn);
9128 if (pool)
9130 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
9131 pool->label);
9132 INSN_ADDRESSES_NEW (emit_insn_after (new_insn, insn), -1);
9137 BITMAP_FREE (far_labels);
9140 /* Recompute insn addresses. */
9142 init_insn_lengths ();
9143 shorten_branches (get_insns ());
9145 return pool_list;
9148 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
9149 After we have decided to use this list, finish implementing
9150 all changes to the current function as required. */
9152 static void
9153 s390_chunkify_finish (struct constant_pool *pool_list)
9155 struct constant_pool *curr_pool = NULL;
9156 rtx_insn *insn;
9159 /* Replace all literal pool references. */
9161 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9163 if (INSN_P (insn))
9164 replace_ltrel_base (&PATTERN (insn));
9166 curr_pool = s390_find_pool (pool_list, insn);
9167 if (!curr_pool)
9168 continue;
9170 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
9172 rtx addr, pool_ref = NULL_RTX;
9173 find_constant_pool_ref (PATTERN (insn), &pool_ref);
9174 if (pool_ref)
9176 if (s390_execute_label (insn))
9177 addr = s390_find_execute (curr_pool, insn);
9178 else
9179 addr = s390_find_constant (curr_pool,
9180 get_pool_constant (pool_ref),
9181 get_pool_mode (pool_ref));
9183 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
9184 INSN_CODE (insn) = -1;
9189 /* Dump out all literal pools. */
9191 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
9192 s390_dump_pool (curr_pool, 0);
9194 /* Free pool list. */
9196 while (pool_list)
9198 struct constant_pool *next = pool_list->next;
9199 s390_free_pool (pool_list);
9200 pool_list = next;
9204 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
9205 We have decided we cannot use this list, so revert all changes
9206 to the current function that were done by s390_chunkify_start. */
9208 static void
9209 s390_chunkify_cancel (struct constant_pool *pool_list)
9211 struct constant_pool *curr_pool = NULL;
9212 rtx_insn *insn;
9214 /* Remove all pool placeholder insns. */
9216 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
9218 /* Did we insert an extra barrier? Remove it. */
9219 rtx_insn *barrier = PREV_INSN (curr_pool->pool_insn);
9220 rtx_insn *jump = barrier? PREV_INSN (barrier) : NULL;
9221 rtx_insn *label = NEXT_INSN (curr_pool->pool_insn);
9223 if (jump && JUMP_P (jump)
9224 && barrier && BARRIER_P (barrier)
9225 && label && LABEL_P (label)
9226 && GET_CODE (PATTERN (jump)) == SET
9227 && SET_DEST (PATTERN (jump)) == pc_rtx
9228 && GET_CODE (SET_SRC (PATTERN (jump))) == LABEL_REF
9229 && XEXP (SET_SRC (PATTERN (jump)), 0) == label)
9231 remove_insn (jump);
9232 remove_insn (barrier);
9233 remove_insn (label);
9236 remove_insn (curr_pool->pool_insn);
9239 /* Remove all base register reload insns. */
9241 for (insn = get_insns (); insn; )
9243 rtx_insn *next_insn = NEXT_INSN (insn);
9245 if (NONJUMP_INSN_P (insn)
9246 && GET_CODE (PATTERN (insn)) == SET
9247 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
9248 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_RELOAD_BASE)
9249 remove_insn (insn);
9251 insn = next_insn;
9254 /* Free pool list. */
9256 while (pool_list)
9258 struct constant_pool *next = pool_list->next;
9259 s390_free_pool (pool_list);
9260 pool_list = next;
9264 /* Output the constant pool entry EXP in mode MODE with alignment ALIGN. */
9266 void
9267 s390_output_pool_entry (rtx exp, machine_mode mode, unsigned int align)
9269 switch (GET_MODE_CLASS (mode))
9271 case MODE_FLOAT:
9272 case MODE_DECIMAL_FLOAT:
9273 gcc_assert (GET_CODE (exp) == CONST_DOUBLE);
9275 assemble_real (*CONST_DOUBLE_REAL_VALUE (exp), mode, align);
9276 break;
9278 case MODE_INT:
9279 assemble_integer (exp, GET_MODE_SIZE (mode), align, 1);
9280 mark_symbol_refs_as_used (exp);
9281 break;
9283 case MODE_VECTOR_INT:
9284 case MODE_VECTOR_FLOAT:
9286 int i;
9287 machine_mode inner_mode;
9288 gcc_assert (GET_CODE (exp) == CONST_VECTOR);
9290 inner_mode = GET_MODE_INNER (GET_MODE (exp));
9291 for (i = 0; i < XVECLEN (exp, 0); i++)
9292 s390_output_pool_entry (XVECEXP (exp, 0, i),
9293 inner_mode,
9294 i == 0
9295 ? align
9296 : GET_MODE_BITSIZE (inner_mode));
9298 break;
9300 default:
9301 gcc_unreachable ();
9306 /* Return an RTL expression representing the value of the return address
9307 for the frame COUNT steps up from the current frame. FRAME is the
9308 frame pointer of that frame. */
9311 s390_return_addr_rtx (int count, rtx frame ATTRIBUTE_UNUSED)
9313 int offset;
9314 rtx addr;
9316 /* Without backchain, we fail for all but the current frame. */
9318 if (!TARGET_BACKCHAIN && count > 0)
9319 return NULL_RTX;
9321 /* For the current frame, we need to make sure the initial
9322 value of RETURN_REGNUM is actually saved. */
9324 if (count == 0)
9326 /* On non-z architectures branch splitting could overwrite r14. */
9327 if (TARGET_CPU_ZARCH)
9328 return get_hard_reg_initial_val (Pmode, RETURN_REGNUM);
9329 else
9331 cfun_frame_layout.save_return_addr_p = true;
9332 return gen_rtx_MEM (Pmode, return_address_pointer_rtx);
9336 if (TARGET_PACKED_STACK)
9337 offset = -2 * UNITS_PER_LONG;
9338 else
9339 offset = RETURN_REGNUM * UNITS_PER_LONG;
9341 addr = plus_constant (Pmode, frame, offset);
9342 addr = memory_address (Pmode, addr);
9343 return gen_rtx_MEM (Pmode, addr);
9346 /* Return an RTL expression representing the back chain stored in
9347 the current stack frame. */
9350 s390_back_chain_rtx (void)
9352 rtx chain;
9354 gcc_assert (TARGET_BACKCHAIN);
9356 if (TARGET_PACKED_STACK)
9357 chain = plus_constant (Pmode, stack_pointer_rtx,
9358 STACK_POINTER_OFFSET - UNITS_PER_LONG);
9359 else
9360 chain = stack_pointer_rtx;
9362 chain = gen_rtx_MEM (Pmode, chain);
9363 return chain;
9366 /* Find first call clobbered register unused in a function.
9367 This could be used as base register in a leaf function
9368 or for holding the return address before epilogue. */
9370 static int
9371 find_unused_clobbered_reg (void)
9373 int i;
9374 for (i = 0; i < 6; i++)
9375 if (!df_regs_ever_live_p (i))
9376 return i;
9377 return 0;
9381 /* Helper function for s390_regs_ever_clobbered. Sets the fields in DATA for all
9382 clobbered hard regs in SETREG. */
9384 static void
9385 s390_reg_clobbered_rtx (rtx setreg, const_rtx set_insn ATTRIBUTE_UNUSED, void *data)
9387 char *regs_ever_clobbered = (char *)data;
9388 unsigned int i, regno;
9389 machine_mode mode = GET_MODE (setreg);
9391 if (GET_CODE (setreg) == SUBREG)
9393 rtx inner = SUBREG_REG (setreg);
9394 if (!GENERAL_REG_P (inner) && !FP_REG_P (inner))
9395 return;
9396 regno = subreg_regno (setreg);
9398 else if (GENERAL_REG_P (setreg) || FP_REG_P (setreg))
9399 regno = REGNO (setreg);
9400 else
9401 return;
9403 for (i = regno;
9404 i < regno + HARD_REGNO_NREGS (regno, mode);
9405 i++)
9406 regs_ever_clobbered[i] = 1;
9409 /* Walks through all basic blocks of the current function looking
9410 for clobbered hard regs using s390_reg_clobbered_rtx. The fields
9411 of the passed integer array REGS_EVER_CLOBBERED are set to one for
9412 each of those regs. */
9414 static void
9415 s390_regs_ever_clobbered (char regs_ever_clobbered[])
9417 basic_block cur_bb;
9418 rtx_insn *cur_insn;
9419 unsigned int i;
9421 memset (regs_ever_clobbered, 0, 32);
9423 /* For non-leaf functions we have to consider all call clobbered regs to be
9424 clobbered. */
9425 if (!crtl->is_leaf)
9427 for (i = 0; i < 32; i++)
9428 regs_ever_clobbered[i] = call_really_used_regs[i];
9431 /* Make the "magic" eh_return registers live if necessary. For regs_ever_live
9432 this work is done by liveness analysis (mark_regs_live_at_end).
9433 Special care is needed for functions containing landing pads. Landing pads
9434 may use the eh registers, but the code which sets these registers is not
9435 contained in that function. Hence s390_regs_ever_clobbered is not able to
9436 deal with this automatically. */
9437 if (crtl->calls_eh_return || cfun->machine->has_landing_pad_p)
9438 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM ; i++)
9439 if (crtl->calls_eh_return
9440 || (cfun->machine->has_landing_pad_p
9441 && df_regs_ever_live_p (EH_RETURN_DATA_REGNO (i))))
9442 regs_ever_clobbered[EH_RETURN_DATA_REGNO (i)] = 1;
9444 /* For nonlocal gotos all call-saved registers have to be saved.
9445 This flag is also set for the unwinding code in libgcc.
9446 See expand_builtin_unwind_init. For regs_ever_live this is done by
9447 reload. */
9448 if (crtl->saves_all_registers)
9449 for (i = 0; i < 32; i++)
9450 if (!call_really_used_regs[i])
9451 regs_ever_clobbered[i] = 1;
9453 FOR_EACH_BB_FN (cur_bb, cfun)
9455 FOR_BB_INSNS (cur_bb, cur_insn)
9457 rtx pat;
9459 if (!INSN_P (cur_insn))
9460 continue;
9462 pat = PATTERN (cur_insn);
9464 /* Ignore GPR restore insns. */
9465 if (epilogue_completed && RTX_FRAME_RELATED_P (cur_insn))
9467 if (GET_CODE (pat) == SET
9468 && GENERAL_REG_P (SET_DEST (pat)))
9470 /* lgdr */
9471 if (GET_MODE (SET_SRC (pat)) == DImode
9472 && FP_REG_P (SET_SRC (pat)))
9473 continue;
9475 /* l / lg */
9476 if (GET_CODE (SET_SRC (pat)) == MEM)
9477 continue;
9480 /* lm / lmg */
9481 if (GET_CODE (pat) == PARALLEL
9482 && load_multiple_operation (pat, VOIDmode))
9483 continue;
9486 note_stores (pat,
9487 s390_reg_clobbered_rtx,
9488 regs_ever_clobbered);
9493 /* Determine the frame area which actually has to be accessed
9494 in the function epilogue. The values are stored at the
9495 given pointers AREA_BOTTOM (address of the lowest used stack
9496 address) and AREA_TOP (address of the first item which does
9497 not belong to the stack frame). */
9499 static void
9500 s390_frame_area (int *area_bottom, int *area_top)
9502 int b, t;
9504 b = INT_MAX;
9505 t = INT_MIN;
9507 if (cfun_frame_layout.first_restore_gpr != -1)
9509 b = (cfun_frame_layout.gprs_offset
9510 + cfun_frame_layout.first_restore_gpr * UNITS_PER_LONG);
9511 t = b + (cfun_frame_layout.last_restore_gpr
9512 - cfun_frame_layout.first_restore_gpr + 1) * UNITS_PER_LONG;
9515 if (TARGET_64BIT && cfun_save_high_fprs_p)
9517 b = MIN (b, cfun_frame_layout.f8_offset);
9518 t = MAX (t, (cfun_frame_layout.f8_offset
9519 + cfun_frame_layout.high_fprs * 8));
9522 if (!TARGET_64BIT)
9524 if (cfun_fpr_save_p (FPR4_REGNUM))
9526 b = MIN (b, cfun_frame_layout.f4_offset);
9527 t = MAX (t, cfun_frame_layout.f4_offset + 8);
9529 if (cfun_fpr_save_p (FPR6_REGNUM))
9531 b = MIN (b, cfun_frame_layout.f4_offset + 8);
9532 t = MAX (t, cfun_frame_layout.f4_offset + 16);
9535 *area_bottom = b;
9536 *area_top = t;
9538 /* Update gpr_save_slots in the frame layout trying to make use of
9539 FPRs as GPR save slots.
9540 This is a helper routine of s390_register_info. */
9542 static void
9543 s390_register_info_gprtofpr ()
9545 int save_reg_slot = FPR0_REGNUM;
9546 int i, j;
9548 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
9549 return;
9551 for (i = 15; i >= 6; i--)
9553 if (cfun_gpr_save_slot (i) == SAVE_SLOT_NONE)
9554 continue;
9556 /* Advance to the next FP register which can be used as a
9557 GPR save slot. */
9558 while ((!call_really_used_regs[save_reg_slot]
9559 || df_regs_ever_live_p (save_reg_slot)
9560 || cfun_fpr_save_p (save_reg_slot))
9561 && FP_REGNO_P (save_reg_slot))
9562 save_reg_slot++;
9563 if (!FP_REGNO_P (save_reg_slot))
9565 /* We only want to use ldgr/lgdr if we can get rid of
9566 stm/lm entirely. So undo the gpr slot allocation in
9567 case we ran out of FPR save slots. */
9568 for (j = 6; j <= 15; j++)
9569 if (FP_REGNO_P (cfun_gpr_save_slot (j)))
9570 cfun_gpr_save_slot (j) = SAVE_SLOT_STACK;
9571 break;
9573 cfun_gpr_save_slot (i) = save_reg_slot++;
9577 /* Set the bits in fpr_bitmap for FPRs which need to be saved due to
9578 stdarg.
9579 This is a helper routine for s390_register_info. */
9581 static void
9582 s390_register_info_stdarg_fpr ()
9584 int i;
9585 int min_fpr;
9586 int max_fpr;
9588 /* Save the FP argument regs for stdarg. f0, f2 for 31 bit and
9589 f0-f4 for 64 bit. */
9590 if (!cfun->stdarg
9591 || !TARGET_HARD_FLOAT
9592 || !cfun->va_list_fpr_size
9593 || crtl->args.info.fprs >= FP_ARG_NUM_REG)
9594 return;
9596 min_fpr = crtl->args.info.fprs;
9597 max_fpr = min_fpr + cfun->va_list_fpr_size - 1;
9598 if (max_fpr >= FP_ARG_NUM_REG)
9599 max_fpr = FP_ARG_NUM_REG - 1;
9601 /* FPR argument regs start at f0. */
9602 min_fpr += FPR0_REGNUM;
9603 max_fpr += FPR0_REGNUM;
9605 for (i = min_fpr; i <= max_fpr; i++)
9606 cfun_set_fpr_save (i);
9609 /* Reserve the GPR save slots for GPRs which need to be saved due to
9610 stdarg.
9611 This is a helper routine for s390_register_info. */
9613 static void
9614 s390_register_info_stdarg_gpr ()
9616 int i;
9617 int min_gpr;
9618 int max_gpr;
9620 if (!cfun->stdarg
9621 || !cfun->va_list_gpr_size
9622 || crtl->args.info.gprs >= GP_ARG_NUM_REG)
9623 return;
9625 min_gpr = crtl->args.info.gprs;
9626 max_gpr = min_gpr + cfun->va_list_gpr_size - 1;
9627 if (max_gpr >= GP_ARG_NUM_REG)
9628 max_gpr = GP_ARG_NUM_REG - 1;
9630 /* GPR argument regs start at r2. */
9631 min_gpr += GPR2_REGNUM;
9632 max_gpr += GPR2_REGNUM;
9634 /* If r6 was supposed to be saved into an FPR and now needs to go to
9635 the stack for vararg we have to adjust the restore range to make
9636 sure that the restore is done from stack as well. */
9637 if (FP_REGNO_P (cfun_gpr_save_slot (GPR6_REGNUM))
9638 && min_gpr <= GPR6_REGNUM
9639 && max_gpr >= GPR6_REGNUM)
9641 if (cfun_frame_layout.first_restore_gpr == -1
9642 || cfun_frame_layout.first_restore_gpr > GPR6_REGNUM)
9643 cfun_frame_layout.first_restore_gpr = GPR6_REGNUM;
9644 if (cfun_frame_layout.last_restore_gpr == -1
9645 || cfun_frame_layout.last_restore_gpr < GPR6_REGNUM)
9646 cfun_frame_layout.last_restore_gpr = GPR6_REGNUM;
9649 if (cfun_frame_layout.first_save_gpr == -1
9650 || cfun_frame_layout.first_save_gpr > min_gpr)
9651 cfun_frame_layout.first_save_gpr = min_gpr;
9653 if (cfun_frame_layout.last_save_gpr == -1
9654 || cfun_frame_layout.last_save_gpr < max_gpr)
9655 cfun_frame_layout.last_save_gpr = max_gpr;
9657 for (i = min_gpr; i <= max_gpr; i++)
9658 cfun_gpr_save_slot (i) = SAVE_SLOT_STACK;
9661 /* Calculate the save and restore ranges for stm(g) and lm(g) in the
9662 prologue and epilogue. */
9664 static void
9665 s390_register_info_set_ranges ()
9667 int i, j;
9669 /* Find the first and the last save slot supposed to use the stack
9670 to set the restore range.
9671 Vararg regs might be marked as save to stack but only the
9672 call-saved regs really need restoring (i.e. r6). This code
9673 assumes that the vararg regs have not yet been recorded in
9674 cfun_gpr_save_slot. */
9675 for (i = 0; i < 16 && cfun_gpr_save_slot (i) != SAVE_SLOT_STACK; i++);
9676 for (j = 15; j > i && cfun_gpr_save_slot (j) != SAVE_SLOT_STACK; j--);
9677 cfun_frame_layout.first_restore_gpr = (i == 16) ? -1 : i;
9678 cfun_frame_layout.last_restore_gpr = (i == 16) ? -1 : j;
9679 cfun_frame_layout.first_save_gpr = (i == 16) ? -1 : i;
9680 cfun_frame_layout.last_save_gpr = (i == 16) ? -1 : j;
9683 /* The GPR and FPR save slots in cfun->machine->frame_layout are set
9684 for registers which need to be saved in function prologue.
9685 This function can be used until the insns emitted for save/restore
9686 of the regs are visible in the RTL stream. */
9688 static void
9689 s390_register_info ()
9691 int i;
9692 char clobbered_regs[32];
9694 gcc_assert (!epilogue_completed);
9696 if (reload_completed)
9697 /* After reload we rely on our own routine to determine which
9698 registers need saving. */
9699 s390_regs_ever_clobbered (clobbered_regs);
9700 else
9701 /* During reload we use regs_ever_live as a base since reload
9702 does changes in there which we otherwise would not be aware
9703 of. */
9704 for (i = 0; i < 32; i++)
9705 clobbered_regs[i] = df_regs_ever_live_p (i);
9707 for (i = 0; i < 32; i++)
9708 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i];
9710 /* Mark the call-saved FPRs which need to be saved.
9711 This needs to be done before checking the special GPRs since the
9712 stack pointer usage depends on whether high FPRs have to be saved
9713 or not. */
9714 cfun_frame_layout.fpr_bitmap = 0;
9715 cfun_frame_layout.high_fprs = 0;
9716 for (i = FPR0_REGNUM; i <= FPR15_REGNUM; i++)
9717 if (clobbered_regs[i] && !call_really_used_regs[i])
9719 cfun_set_fpr_save (i);
9720 if (i >= FPR8_REGNUM)
9721 cfun_frame_layout.high_fprs++;
9724 /* Register 12 is used for GOT address, but also as temp in prologue
9725 for split-stack stdarg functions (unless r14 is available). */
9726 clobbered_regs[12]
9727 |= ((flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
9728 || (flag_split_stack && cfun->stdarg
9729 && (crtl->is_leaf || TARGET_TPF_PROFILING
9730 || has_hard_reg_initial_val (Pmode, RETURN_REGNUM))));
9732 clobbered_regs[BASE_REGNUM]
9733 |= (cfun->machine->base_reg
9734 && REGNO (cfun->machine->base_reg) == BASE_REGNUM);
9736 clobbered_regs[HARD_FRAME_POINTER_REGNUM]
9737 |= !!frame_pointer_needed;
9739 /* On pre z900 machines this might take until machine dependent
9740 reorg to decide.
9741 save_return_addr_p will only be set on non-zarch machines so
9742 there is no risk that r14 goes into an FPR instead of a stack
9743 slot. */
9744 clobbered_regs[RETURN_REGNUM]
9745 |= (!crtl->is_leaf
9746 || TARGET_TPF_PROFILING
9747 || cfun->machine->split_branches_pending_p
9748 || cfun_frame_layout.save_return_addr_p
9749 || crtl->calls_eh_return);
9751 clobbered_regs[STACK_POINTER_REGNUM]
9752 |= (!crtl->is_leaf
9753 || TARGET_TPF_PROFILING
9754 || cfun_save_high_fprs_p
9755 || get_frame_size () > 0
9756 || (reload_completed && cfun_frame_layout.frame_size > 0)
9757 || cfun->calls_alloca);
9759 memset (cfun_frame_layout.gpr_save_slots, SAVE_SLOT_NONE, 16);
9761 for (i = 6; i < 16; i++)
9762 if (clobbered_regs[i])
9763 cfun_gpr_save_slot (i) = SAVE_SLOT_STACK;
9765 s390_register_info_stdarg_fpr ();
9766 s390_register_info_gprtofpr ();
9767 s390_register_info_set_ranges ();
9768 /* stdarg functions might need to save GPRs 2 to 6. This might
9769 override the GPR->FPR save decision made by
9770 s390_register_info_gprtofpr for r6 since vararg regs must go to
9771 the stack. */
9772 s390_register_info_stdarg_gpr ();
9775 /* This function is called by s390_optimize_prologue in order to get
9776 rid of unnecessary GPR save/restore instructions. The register info
9777 for the GPRs is re-computed and the ranges are re-calculated. */
9779 static void
9780 s390_optimize_register_info ()
9782 char clobbered_regs[32];
9783 int i;
9785 gcc_assert (epilogue_completed);
9786 gcc_assert (!cfun->machine->split_branches_pending_p);
9788 s390_regs_ever_clobbered (clobbered_regs);
9790 for (i = 0; i < 32; i++)
9791 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i];
9793 /* There is still special treatment needed for cases invisible to
9794 s390_regs_ever_clobbered. */
9795 clobbered_regs[RETURN_REGNUM]
9796 |= (TARGET_TPF_PROFILING
9797 /* When expanding builtin_return_addr in ESA mode we do not
9798 know whether r14 will later be needed as scratch reg when
9799 doing branch splitting. So the builtin always accesses the
9800 r14 save slot and we need to stick to the save/restore
9801 decision for r14 even if it turns out that it didn't get
9802 clobbered. */
9803 || cfun_frame_layout.save_return_addr_p
9804 || crtl->calls_eh_return);
9806 memset (cfun_frame_layout.gpr_save_slots, SAVE_SLOT_NONE, 6);
9808 for (i = 6; i < 16; i++)
9809 if (!clobbered_regs[i])
9810 cfun_gpr_save_slot (i) = SAVE_SLOT_NONE;
9812 s390_register_info_set_ranges ();
9813 s390_register_info_stdarg_gpr ();
9816 /* Fill cfun->machine with info about frame of current function. */
9818 static void
9819 s390_frame_info (void)
9821 HOST_WIDE_INT lowest_offset;
9823 cfun_frame_layout.first_save_gpr_slot = cfun_frame_layout.first_save_gpr;
9824 cfun_frame_layout.last_save_gpr_slot = cfun_frame_layout.last_save_gpr;
9826 /* The va_arg builtin uses a constant distance of 16 *
9827 UNITS_PER_LONG (r0-r15) to reach the FPRs from the reg_save_area
9828 pointer. So even if we are going to save the stack pointer in an
9829 FPR we need the stack space in order to keep the offsets
9830 correct. */
9831 if (cfun->stdarg && cfun_save_arg_fprs_p)
9833 cfun_frame_layout.last_save_gpr_slot = STACK_POINTER_REGNUM;
9835 if (cfun_frame_layout.first_save_gpr_slot == -1)
9836 cfun_frame_layout.first_save_gpr_slot = STACK_POINTER_REGNUM;
9839 cfun_frame_layout.frame_size = get_frame_size ();
9840 if (!TARGET_64BIT && cfun_frame_layout.frame_size > 0x7fff0000)
9841 fatal_error (input_location,
9842 "total size of local variables exceeds architecture limit");
9844 if (!TARGET_PACKED_STACK)
9846 /* Fixed stack layout. */
9847 cfun_frame_layout.backchain_offset = 0;
9848 cfun_frame_layout.f0_offset = 16 * UNITS_PER_LONG;
9849 cfun_frame_layout.f4_offset = cfun_frame_layout.f0_offset + 2 * 8;
9850 cfun_frame_layout.f8_offset = -cfun_frame_layout.high_fprs * 8;
9851 cfun_frame_layout.gprs_offset = (cfun_frame_layout.first_save_gpr_slot
9852 * UNITS_PER_LONG);
9854 else if (TARGET_BACKCHAIN)
9856 /* Kernel stack layout - packed stack, backchain, no float */
9857 gcc_assert (TARGET_SOFT_FLOAT);
9858 cfun_frame_layout.backchain_offset = (STACK_POINTER_OFFSET
9859 - UNITS_PER_LONG);
9861 /* The distance between the backchain and the return address
9862 save slot must not change. So we always need a slot for the
9863 stack pointer which resides in between. */
9864 cfun_frame_layout.last_save_gpr_slot = STACK_POINTER_REGNUM;
9866 cfun_frame_layout.gprs_offset
9867 = cfun_frame_layout.backchain_offset - cfun_gprs_save_area_size;
9869 /* FPRs will not be saved. Nevertheless pick sane values to
9870 keep area calculations valid. */
9871 cfun_frame_layout.f0_offset =
9872 cfun_frame_layout.f4_offset =
9873 cfun_frame_layout.f8_offset = cfun_frame_layout.gprs_offset;
9875 else
9877 int num_fprs;
9879 /* Packed stack layout without backchain. */
9881 /* With stdarg FPRs need their dedicated slots. */
9882 num_fprs = (TARGET_64BIT && cfun->stdarg ? 2
9883 : (cfun_fpr_save_p (FPR4_REGNUM) +
9884 cfun_fpr_save_p (FPR6_REGNUM)));
9885 cfun_frame_layout.f4_offset = STACK_POINTER_OFFSET - 8 * num_fprs;
9887 num_fprs = (cfun->stdarg ? 2
9888 : (cfun_fpr_save_p (FPR0_REGNUM)
9889 + cfun_fpr_save_p (FPR2_REGNUM)));
9890 cfun_frame_layout.f0_offset = cfun_frame_layout.f4_offset - 8 * num_fprs;
9892 cfun_frame_layout.gprs_offset
9893 = cfun_frame_layout.f0_offset - cfun_gprs_save_area_size;
9895 cfun_frame_layout.f8_offset = (cfun_frame_layout.gprs_offset
9896 - cfun_frame_layout.high_fprs * 8);
9899 if (cfun_save_high_fprs_p)
9900 cfun_frame_layout.frame_size += cfun_frame_layout.high_fprs * 8;
9902 if (!crtl->is_leaf)
9903 cfun_frame_layout.frame_size += crtl->outgoing_args_size;
9905 /* In the following cases we have to allocate a STACK_POINTER_OFFSET
9906 sized area at the bottom of the stack. This is required also for
9907 leaf functions. When GCC generates a local stack reference it
9908 will always add STACK_POINTER_OFFSET to all these references. */
9909 if (crtl->is_leaf
9910 && !TARGET_TPF_PROFILING
9911 && cfun_frame_layout.frame_size == 0
9912 && !cfun->calls_alloca)
9913 return;
9915 /* Calculate the number of bytes we have used in our own register
9916 save area. With the packed stack layout we can re-use the
9917 remaining bytes for normal stack elements. */
9919 if (TARGET_PACKED_STACK)
9920 lowest_offset = MIN (MIN (cfun_frame_layout.f0_offset,
9921 cfun_frame_layout.f4_offset),
9922 cfun_frame_layout.gprs_offset);
9923 else
9924 lowest_offset = 0;
9926 if (TARGET_BACKCHAIN)
9927 lowest_offset = MIN (lowest_offset, cfun_frame_layout.backchain_offset);
9929 cfun_frame_layout.frame_size += STACK_POINTER_OFFSET - lowest_offset;
9931 /* If under 31 bit an odd number of gprs has to be saved we have to
9932 adjust the frame size to sustain 8 byte alignment of stack
9933 frames. */
9934 cfun_frame_layout.frame_size = ((cfun_frame_layout.frame_size +
9935 STACK_BOUNDARY / BITS_PER_UNIT - 1)
9936 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1));
9939 /* Generate frame layout. Fills in register and frame data for the current
9940 function in cfun->machine. This routine can be called multiple times;
9941 it will re-do the complete frame layout every time. */
9943 static void
9944 s390_init_frame_layout (void)
9946 HOST_WIDE_INT frame_size;
9947 int base_used;
9949 /* After LRA the frame layout is supposed to be read-only and should
9950 not be re-computed. */
9951 if (reload_completed)
9952 return;
9954 /* On S/390 machines, we may need to perform branch splitting, which
9955 will require both base and return address register. We have no
9956 choice but to assume we're going to need them until right at the
9957 end of the machine dependent reorg phase. */
9958 if (!TARGET_CPU_ZARCH)
9959 cfun->machine->split_branches_pending_p = true;
9963 frame_size = cfun_frame_layout.frame_size;
9965 /* Try to predict whether we'll need the base register. */
9966 base_used = cfun->machine->split_branches_pending_p
9967 || crtl->uses_const_pool
9968 || (!DISP_IN_RANGE (frame_size)
9969 && !CONST_OK_FOR_K (frame_size));
9971 /* Decide which register to use as literal pool base. In small
9972 leaf functions, try to use an unused call-clobbered register
9973 as base register to avoid save/restore overhead. */
9974 if (!base_used)
9975 cfun->machine->base_reg = NULL_RTX;
9976 else
9978 int br = 0;
9980 if (crtl->is_leaf)
9981 /* Prefer r5 (most likely to be free). */
9982 for (br = 5; br >= 2 && df_regs_ever_live_p (br); br--)
9984 cfun->machine->base_reg =
9985 gen_rtx_REG (Pmode, (br >= 2) ? br : BASE_REGNUM);
9988 s390_register_info ();
9989 s390_frame_info ();
9991 while (frame_size != cfun_frame_layout.frame_size);
9994 /* Remove the FPR clobbers from a tbegin insn if it can be proven that
9995 the TX is nonescaping. A transaction is considered escaping if
9996 there is at least one path from tbegin returning CC0 to the
9997 function exit block without an tend.
9999 The check so far has some limitations:
10000 - only single tbegin/tend BBs are supported
10001 - the first cond jump after tbegin must separate the CC0 path from ~CC0
10002 - when CC is copied to a GPR and the CC0 check is done with the GPR
10003 this is not supported
10006 static void
10007 s390_optimize_nonescaping_tx (void)
10009 const unsigned int CC0 = 1 << 3;
10010 basic_block tbegin_bb = NULL;
10011 basic_block tend_bb = NULL;
10012 basic_block bb;
10013 rtx_insn *insn;
10014 bool result = true;
10015 int bb_index;
10016 rtx_insn *tbegin_insn = NULL;
10018 if (!cfun->machine->tbegin_p)
10019 return;
10021 for (bb_index = 0; bb_index < n_basic_blocks_for_fn (cfun); bb_index++)
10023 bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
10025 if (!bb)
10026 continue;
10028 FOR_BB_INSNS (bb, insn)
10030 rtx ite, cc, pat, target;
10031 unsigned HOST_WIDE_INT mask;
10033 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
10034 continue;
10036 pat = PATTERN (insn);
10038 if (GET_CODE (pat) == PARALLEL)
10039 pat = XVECEXP (pat, 0, 0);
10041 if (GET_CODE (pat) != SET
10042 || GET_CODE (SET_SRC (pat)) != UNSPEC_VOLATILE)
10043 continue;
10045 if (XINT (SET_SRC (pat), 1) == UNSPECV_TBEGIN)
10047 rtx_insn *tmp;
10049 tbegin_insn = insn;
10051 /* Just return if the tbegin doesn't have clobbers. */
10052 if (GET_CODE (PATTERN (insn)) != PARALLEL)
10053 return;
10055 if (tbegin_bb != NULL)
10056 return;
10058 /* Find the next conditional jump. */
10059 for (tmp = NEXT_INSN (insn);
10060 tmp != NULL_RTX;
10061 tmp = NEXT_INSN (tmp))
10063 if (reg_set_p (gen_rtx_REG (CCmode, CC_REGNUM), tmp))
10064 return;
10065 if (!JUMP_P (tmp))
10066 continue;
10068 ite = SET_SRC (PATTERN (tmp));
10069 if (GET_CODE (ite) != IF_THEN_ELSE)
10070 continue;
10072 cc = XEXP (XEXP (ite, 0), 0);
10073 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc))
10074 || GET_MODE (cc) != CCRAWmode
10075 || GET_CODE (XEXP (XEXP (ite, 0), 1)) != CONST_INT)
10076 return;
10078 if (bb->succs->length () != 2)
10079 return;
10081 mask = INTVAL (XEXP (XEXP (ite, 0), 1));
10082 if (GET_CODE (XEXP (ite, 0)) == NE)
10083 mask ^= 0xf;
10085 if (mask == CC0)
10086 target = XEXP (ite, 1);
10087 else if (mask == (CC0 ^ 0xf))
10088 target = XEXP (ite, 2);
10089 else
10090 return;
10093 edge_iterator ei;
10094 edge e1, e2;
10096 ei = ei_start (bb->succs);
10097 e1 = ei_safe_edge (ei);
10098 ei_next (&ei);
10099 e2 = ei_safe_edge (ei);
10101 if (e2->flags & EDGE_FALLTHRU)
10103 e2 = e1;
10104 e1 = ei_safe_edge (ei);
10107 if (!(e1->flags & EDGE_FALLTHRU))
10108 return;
10110 tbegin_bb = (target == pc_rtx) ? e1->dest : e2->dest;
10112 if (tmp == BB_END (bb))
10113 break;
10117 if (XINT (SET_SRC (pat), 1) == UNSPECV_TEND)
10119 if (tend_bb != NULL)
10120 return;
10121 tend_bb = bb;
10126 /* Either we successfully remove the FPR clobbers here or we are not
10127 able to do anything for this TX. Both cases don't qualify for
10128 another look. */
10129 cfun->machine->tbegin_p = false;
10131 if (tbegin_bb == NULL || tend_bb == NULL)
10132 return;
10134 calculate_dominance_info (CDI_POST_DOMINATORS);
10135 result = dominated_by_p (CDI_POST_DOMINATORS, tbegin_bb, tend_bb);
10136 free_dominance_info (CDI_POST_DOMINATORS);
10138 if (!result)
10139 return;
10141 PATTERN (tbegin_insn) = gen_rtx_PARALLEL (VOIDmode,
10142 gen_rtvec (2,
10143 XVECEXP (PATTERN (tbegin_insn), 0, 0),
10144 XVECEXP (PATTERN (tbegin_insn), 0, 1)));
10145 INSN_CODE (tbegin_insn) = -1;
10146 df_insn_rescan (tbegin_insn);
10148 return;
10151 /* Return true if it is legal to put a value with MODE into REGNO. */
10153 bool
10154 s390_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
10156 if (!TARGET_VX && VECTOR_NOFP_REGNO_P (regno))
10157 return false;
10159 switch (REGNO_REG_CLASS (regno))
10161 case VEC_REGS:
10162 return ((GET_MODE_CLASS (mode) == MODE_INT
10163 && s390_class_max_nregs (VEC_REGS, mode) == 1)
10164 || mode == DFmode
10165 || s390_vector_mode_supported_p (mode));
10166 break;
10167 case FP_REGS:
10168 if (TARGET_VX
10169 && ((GET_MODE_CLASS (mode) == MODE_INT
10170 && s390_class_max_nregs (FP_REGS, mode) == 1)
10171 || mode == DFmode
10172 || s390_vector_mode_supported_p (mode)))
10173 return true;
10175 if (REGNO_PAIR_OK (regno, mode))
10177 if (mode == SImode || mode == DImode)
10178 return true;
10180 if (FLOAT_MODE_P (mode) && GET_MODE_CLASS (mode) != MODE_VECTOR_FLOAT)
10181 return true;
10183 break;
10184 case ADDR_REGS:
10185 if (FRAME_REGNO_P (regno) && mode == Pmode)
10186 return true;
10188 /* fallthrough */
10189 case GENERAL_REGS:
10190 if (REGNO_PAIR_OK (regno, mode))
10192 if (TARGET_ZARCH
10193 || (mode != TFmode && mode != TCmode && mode != TDmode))
10194 return true;
10196 break;
10197 case CC_REGS:
10198 if (GET_MODE_CLASS (mode) == MODE_CC)
10199 return true;
10200 break;
10201 case ACCESS_REGS:
10202 if (REGNO_PAIR_OK (regno, mode))
10204 if (mode == SImode || mode == Pmode)
10205 return true;
10207 break;
10208 default:
10209 return false;
10212 return false;
10215 /* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */
10217 bool
10218 s390_hard_regno_rename_ok (unsigned int old_reg, unsigned int new_reg)
10220 /* Once we've decided upon a register to use as base register, it must
10221 no longer be used for any other purpose. */
10222 if (cfun->machine->base_reg)
10223 if (REGNO (cfun->machine->base_reg) == old_reg
10224 || REGNO (cfun->machine->base_reg) == new_reg)
10225 return false;
10227 /* Prevent regrename from using call-saved regs which haven't
10228 actually been saved. This is necessary since regrename assumes
10229 the backend save/restore decisions are based on
10230 df_regs_ever_live. Since we have our own routine we have to tell
10231 regrename manually about it. */
10232 if (GENERAL_REGNO_P (new_reg)
10233 && !call_really_used_regs[new_reg]
10234 && cfun_gpr_save_slot (new_reg) == SAVE_SLOT_NONE)
10235 return false;
10237 return true;
10240 /* Return nonzero if register REGNO can be used as a scratch register
10241 in peephole2. */
10243 static bool
10244 s390_hard_regno_scratch_ok (unsigned int regno)
10246 /* See s390_hard_regno_rename_ok. */
10247 if (GENERAL_REGNO_P (regno)
10248 && !call_really_used_regs[regno]
10249 && cfun_gpr_save_slot (regno) == SAVE_SLOT_NONE)
10250 return false;
10252 return true;
10255 /* Maximum number of registers to represent a value of mode MODE
10256 in a register of class RCLASS. */
10259 s390_class_max_nregs (enum reg_class rclass, machine_mode mode)
10261 int reg_size;
10262 bool reg_pair_required_p = false;
10264 switch (rclass)
10266 case FP_REGS:
10267 case VEC_REGS:
10268 reg_size = TARGET_VX ? 16 : 8;
10270 /* TF and TD modes would fit into a VR but we put them into a
10271 register pair since we do not have 128bit FP instructions on
10272 full VRs. */
10273 if (TARGET_VX
10274 && SCALAR_FLOAT_MODE_P (mode)
10275 && GET_MODE_SIZE (mode) >= 16)
10276 reg_pair_required_p = true;
10278 /* Even if complex types would fit into a single FPR/VR we force
10279 them into a register pair to deal with the parts more easily.
10280 (FIXME: What about complex ints?) */
10281 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
10282 reg_pair_required_p = true;
10283 break;
10284 case ACCESS_REGS:
10285 reg_size = 4;
10286 break;
10287 default:
10288 reg_size = UNITS_PER_WORD;
10289 break;
10292 if (reg_pair_required_p)
10293 return 2 * ((GET_MODE_SIZE (mode) / 2 + reg_size - 1) / reg_size);
10295 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
10298 /* Return TRUE if changing mode from FROM to TO should not be allowed
10299 for register class CLASS. */
10302 s390_cannot_change_mode_class (machine_mode from_mode,
10303 machine_mode to_mode,
10304 enum reg_class rclass)
10306 machine_mode small_mode;
10307 machine_mode big_mode;
10309 if (GET_MODE_SIZE (from_mode) == GET_MODE_SIZE (to_mode))
10310 return 0;
10312 if (GET_MODE_SIZE (from_mode) < GET_MODE_SIZE (to_mode))
10314 small_mode = from_mode;
10315 big_mode = to_mode;
10317 else
10319 small_mode = to_mode;
10320 big_mode = from_mode;
10323 /* Values residing in VRs are little-endian style. All modes are
10324 placed left-aligned in an VR. This means that we cannot allow
10325 switching between modes with differing sizes. Also if the vector
10326 facility is available we still place TFmode values in VR register
10327 pairs, since the only instructions we have operating on TFmodes
10328 only deal with register pairs. Therefore we have to allow DFmode
10329 subregs of TFmodes to enable the TFmode splitters. */
10330 if (reg_classes_intersect_p (VEC_REGS, rclass)
10331 && (GET_MODE_SIZE (small_mode) < 8
10332 || s390_class_max_nregs (VEC_REGS, big_mode) == 1))
10333 return 1;
10335 /* Likewise for access registers, since they have only half the
10336 word size on 64-bit. */
10337 if (reg_classes_intersect_p (ACCESS_REGS, rclass))
10338 return 1;
10340 return 0;
10343 /* Return true if we use LRA instead of reload pass. */
10344 static bool
10345 s390_lra_p (void)
10347 return s390_lra_flag;
10350 /* Return true if register FROM can be eliminated via register TO. */
10352 static bool
10353 s390_can_eliminate (const int from, const int to)
10355 /* On zSeries machines, we have not marked the base register as fixed.
10356 Instead, we have an elimination rule BASE_REGNUM -> BASE_REGNUM.
10357 If a function requires the base register, we say here that this
10358 elimination cannot be performed. This will cause reload to free
10359 up the base register (as if it were fixed). On the other hand,
10360 if the current function does *not* require the base register, we
10361 say here the elimination succeeds, which in turn allows reload
10362 to allocate the base register for any other purpose. */
10363 if (from == BASE_REGNUM && to == BASE_REGNUM)
10365 if (TARGET_CPU_ZARCH)
10367 s390_init_frame_layout ();
10368 return cfun->machine->base_reg == NULL_RTX;
10371 return false;
10374 /* Everything else must point into the stack frame. */
10375 gcc_assert (to == STACK_POINTER_REGNUM
10376 || to == HARD_FRAME_POINTER_REGNUM);
10378 gcc_assert (from == FRAME_POINTER_REGNUM
10379 || from == ARG_POINTER_REGNUM
10380 || from == RETURN_ADDRESS_POINTER_REGNUM);
10382 /* Make sure we actually saved the return address. */
10383 if (from == RETURN_ADDRESS_POINTER_REGNUM)
10384 if (!crtl->calls_eh_return
10385 && !cfun->stdarg
10386 && !cfun_frame_layout.save_return_addr_p)
10387 return false;
10389 return true;
10392 /* Return offset between register FROM and TO initially after prolog. */
10394 HOST_WIDE_INT
10395 s390_initial_elimination_offset (int from, int to)
10397 HOST_WIDE_INT offset;
10399 /* ??? Why are we called for non-eliminable pairs? */
10400 if (!s390_can_eliminate (from, to))
10401 return 0;
10403 switch (from)
10405 case FRAME_POINTER_REGNUM:
10406 offset = (get_frame_size()
10407 + STACK_POINTER_OFFSET
10408 + crtl->outgoing_args_size);
10409 break;
10411 case ARG_POINTER_REGNUM:
10412 s390_init_frame_layout ();
10413 offset = cfun_frame_layout.frame_size + STACK_POINTER_OFFSET;
10414 break;
10416 case RETURN_ADDRESS_POINTER_REGNUM:
10417 s390_init_frame_layout ();
10419 if (cfun_frame_layout.first_save_gpr_slot == -1)
10421 /* If it turns out that for stdarg nothing went into the reg
10422 save area we also do not need the return address
10423 pointer. */
10424 if (cfun->stdarg && !cfun_save_arg_fprs_p)
10425 return 0;
10427 gcc_unreachable ();
10430 /* In order to make the following work it is not necessary for
10431 r14 to have a save slot. It is sufficient if one other GPR
10432 got one. Since the GPRs are always stored without gaps we
10433 are able to calculate where the r14 save slot would
10434 reside. */
10435 offset = (cfun_frame_layout.frame_size + cfun_frame_layout.gprs_offset +
10436 (RETURN_REGNUM - cfun_frame_layout.first_save_gpr_slot) *
10437 UNITS_PER_LONG);
10438 break;
10440 case BASE_REGNUM:
10441 offset = 0;
10442 break;
10444 default:
10445 gcc_unreachable ();
10448 return offset;
10451 /* Emit insn to save fpr REGNUM at offset OFFSET relative
10452 to register BASE. Return generated insn. */
10454 static rtx
10455 save_fpr (rtx base, int offset, int regnum)
10457 rtx addr;
10458 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
10460 if (regnum >= 16 && regnum <= (16 + FP_ARG_NUM_REG))
10461 set_mem_alias_set (addr, get_varargs_alias_set ());
10462 else
10463 set_mem_alias_set (addr, get_frame_alias_set ());
10465 return emit_move_insn (addr, gen_rtx_REG (DFmode, regnum));
10468 /* Emit insn to restore fpr REGNUM from offset OFFSET relative
10469 to register BASE. Return generated insn. */
10471 static rtx
10472 restore_fpr (rtx base, int offset, int regnum)
10474 rtx addr;
10475 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
10476 set_mem_alias_set (addr, get_frame_alias_set ());
10478 return emit_move_insn (gen_rtx_REG (DFmode, regnum), addr);
10481 /* Return true if REGNO is a global register, but not one
10482 of the special ones that need to be saved/restored in anyway. */
10484 static inline bool
10485 global_not_special_regno_p (int regno)
10487 return (global_regs[regno]
10488 /* These registers are special and need to be
10489 restored in any case. */
10490 && !(regno == STACK_POINTER_REGNUM
10491 || regno == RETURN_REGNUM
10492 || regno == BASE_REGNUM
10493 || (flag_pic && regno == (int)PIC_OFFSET_TABLE_REGNUM)));
10496 /* Generate insn to save registers FIRST to LAST into
10497 the register save area located at offset OFFSET
10498 relative to register BASE. */
10500 static rtx
10501 save_gprs (rtx base, int offset, int first, int last)
10503 rtx addr, insn, note;
10504 int i;
10506 addr = plus_constant (Pmode, base, offset);
10507 addr = gen_rtx_MEM (Pmode, addr);
10509 set_mem_alias_set (addr, get_frame_alias_set ());
10511 /* Special-case single register. */
10512 if (first == last)
10514 if (TARGET_64BIT)
10515 insn = gen_movdi (addr, gen_rtx_REG (Pmode, first));
10516 else
10517 insn = gen_movsi (addr, gen_rtx_REG (Pmode, first));
10519 if (!global_not_special_regno_p (first))
10520 RTX_FRAME_RELATED_P (insn) = 1;
10521 return insn;
10525 insn = gen_store_multiple (addr,
10526 gen_rtx_REG (Pmode, first),
10527 GEN_INT (last - first + 1));
10529 if (first <= 6 && cfun->stdarg)
10530 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
10532 rtx mem = XEXP (XVECEXP (PATTERN (insn), 0, i), 0);
10534 if (first + i <= 6)
10535 set_mem_alias_set (mem, get_varargs_alias_set ());
10538 /* We need to set the FRAME_RELATED flag on all SETs
10539 inside the store-multiple pattern.
10541 However, we must not emit DWARF records for registers 2..5
10542 if they are stored for use by variable arguments ...
10544 ??? Unfortunately, it is not enough to simply not the
10545 FRAME_RELATED flags for those SETs, because the first SET
10546 of the PARALLEL is always treated as if it had the flag
10547 set, even if it does not. Therefore we emit a new pattern
10548 without those registers as REG_FRAME_RELATED_EXPR note. */
10550 if (first >= 6 && !global_not_special_regno_p (first))
10552 rtx pat = PATTERN (insn);
10554 for (i = 0; i < XVECLEN (pat, 0); i++)
10555 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
10556 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (pat,
10557 0, i)))))
10558 RTX_FRAME_RELATED_P (XVECEXP (pat, 0, i)) = 1;
10560 RTX_FRAME_RELATED_P (insn) = 1;
10562 else if (last >= 6)
10564 int start;
10566 for (start = first >= 6 ? first : 6; start <= last; start++)
10567 if (!global_not_special_regno_p (start))
10568 break;
10570 if (start > last)
10571 return insn;
10573 addr = plus_constant (Pmode, base,
10574 offset + (start - first) * UNITS_PER_LONG);
10576 if (start == last)
10578 if (TARGET_64BIT)
10579 note = gen_movdi (gen_rtx_MEM (Pmode, addr),
10580 gen_rtx_REG (Pmode, start));
10581 else
10582 note = gen_movsi (gen_rtx_MEM (Pmode, addr),
10583 gen_rtx_REG (Pmode, start));
10584 note = PATTERN (note);
10586 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
10587 RTX_FRAME_RELATED_P (insn) = 1;
10589 return insn;
10592 note = gen_store_multiple (gen_rtx_MEM (Pmode, addr),
10593 gen_rtx_REG (Pmode, start),
10594 GEN_INT (last - start + 1));
10595 note = PATTERN (note);
10597 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
10599 for (i = 0; i < XVECLEN (note, 0); i++)
10600 if (GET_CODE (XVECEXP (note, 0, i)) == SET
10601 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (note,
10602 0, i)))))
10603 RTX_FRAME_RELATED_P (XVECEXP (note, 0, i)) = 1;
10605 RTX_FRAME_RELATED_P (insn) = 1;
10608 return insn;
10611 /* Generate insn to restore registers FIRST to LAST from
10612 the register save area located at offset OFFSET
10613 relative to register BASE. */
10615 static rtx
10616 restore_gprs (rtx base, int offset, int first, int last)
10618 rtx addr, insn;
10620 addr = plus_constant (Pmode, base, offset);
10621 addr = gen_rtx_MEM (Pmode, addr);
10622 set_mem_alias_set (addr, get_frame_alias_set ());
10624 /* Special-case single register. */
10625 if (first == last)
10627 if (TARGET_64BIT)
10628 insn = gen_movdi (gen_rtx_REG (Pmode, first), addr);
10629 else
10630 insn = gen_movsi (gen_rtx_REG (Pmode, first), addr);
10632 RTX_FRAME_RELATED_P (insn) = 1;
10633 return insn;
10636 insn = gen_load_multiple (gen_rtx_REG (Pmode, first),
10637 addr,
10638 GEN_INT (last - first + 1));
10639 RTX_FRAME_RELATED_P (insn) = 1;
10640 return insn;
10643 /* Return insn sequence to load the GOT register. */
10645 static GTY(()) rtx got_symbol;
10646 rtx_insn *
10647 s390_load_got (void)
10649 rtx_insn *insns;
10651 /* We cannot use pic_offset_table_rtx here since we use this
10652 function also for non-pic if __tls_get_offset is called and in
10653 that case PIC_OFFSET_TABLE_REGNUM as well as pic_offset_table_rtx
10654 aren't usable. */
10655 rtx got_rtx = gen_rtx_REG (Pmode, 12);
10657 if (!got_symbol)
10659 got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
10660 SYMBOL_REF_FLAGS (got_symbol) = SYMBOL_FLAG_LOCAL;
10663 start_sequence ();
10665 if (TARGET_CPU_ZARCH)
10667 emit_move_insn (got_rtx, got_symbol);
10669 else
10671 rtx offset;
10673 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, got_symbol),
10674 UNSPEC_LTREL_OFFSET);
10675 offset = gen_rtx_CONST (Pmode, offset);
10676 offset = force_const_mem (Pmode, offset);
10678 emit_move_insn (got_rtx, offset);
10680 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (offset, 0)),
10681 UNSPEC_LTREL_BASE);
10682 offset = gen_rtx_PLUS (Pmode, got_rtx, offset);
10684 emit_move_insn (got_rtx, offset);
10687 insns = get_insns ();
10688 end_sequence ();
10689 return insns;
10692 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
10693 and the change to the stack pointer. */
10695 static void
10696 s390_emit_stack_tie (void)
10698 rtx mem = gen_frame_mem (BLKmode,
10699 gen_rtx_REG (Pmode, STACK_POINTER_REGNUM));
10701 emit_insn (gen_stack_tie (mem));
10704 /* Copy GPRS into FPR save slots. */
10706 static void
10707 s390_save_gprs_to_fprs (void)
10709 int i;
10711 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
10712 return;
10714 for (i = 6; i < 16; i++)
10716 if (FP_REGNO_P (cfun_gpr_save_slot (i)))
10718 rtx_insn *insn =
10719 emit_move_insn (gen_rtx_REG (DImode, cfun_gpr_save_slot (i)),
10720 gen_rtx_REG (DImode, i));
10721 RTX_FRAME_RELATED_P (insn) = 1;
10722 /* This prevents dwarf2cfi from interpreting the set. Doing
10723 so it might emit def_cfa_register infos setting an FPR as
10724 new CFA. */
10725 add_reg_note (insn, REG_CFA_REGISTER, copy_rtx (PATTERN (insn)));
10730 /* Restore GPRs from FPR save slots. */
10732 static void
10733 s390_restore_gprs_from_fprs (void)
10735 int i;
10737 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
10738 return;
10740 for (i = 6; i < 16; i++)
10742 rtx_insn *insn;
10744 if (!FP_REGNO_P (cfun_gpr_save_slot (i)))
10745 continue;
10747 rtx fpr = gen_rtx_REG (DImode, cfun_gpr_save_slot (i));
10749 if (i == STACK_POINTER_REGNUM)
10750 insn = emit_insn (gen_stack_restore_from_fpr (fpr));
10751 else
10752 insn = emit_move_insn (gen_rtx_REG (DImode, i), fpr);
10754 df_set_regs_ever_live (i, true);
10755 add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DImode, i));
10756 if (i == STACK_POINTER_REGNUM)
10757 add_reg_note (insn, REG_CFA_DEF_CFA,
10758 plus_constant (Pmode, stack_pointer_rtx,
10759 STACK_POINTER_OFFSET));
10760 RTX_FRAME_RELATED_P (insn) = 1;
10765 /* A pass run immediately before shrink-wrapping and prologue and epilogue
10766 generation. */
10768 namespace {
10770 const pass_data pass_data_s390_early_mach =
10772 RTL_PASS, /* type */
10773 "early_mach", /* name */
10774 OPTGROUP_NONE, /* optinfo_flags */
10775 TV_MACH_DEP, /* tv_id */
10776 0, /* properties_required */
10777 0, /* properties_provided */
10778 0, /* properties_destroyed */
10779 0, /* todo_flags_start */
10780 ( TODO_df_verify | TODO_df_finish ), /* todo_flags_finish */
10783 class pass_s390_early_mach : public rtl_opt_pass
10785 public:
10786 pass_s390_early_mach (gcc::context *ctxt)
10787 : rtl_opt_pass (pass_data_s390_early_mach, ctxt)
10790 /* opt_pass methods: */
10791 virtual unsigned int execute (function *);
10793 }; // class pass_s390_early_mach
10795 unsigned int
10796 pass_s390_early_mach::execute (function *fun)
10798 rtx_insn *insn;
10800 /* Try to get rid of the FPR clobbers. */
10801 s390_optimize_nonescaping_tx ();
10803 /* Re-compute register info. */
10804 s390_register_info ();
10806 /* If we're using a base register, ensure that it is always valid for
10807 the first non-prologue instruction. */
10808 if (fun->machine->base_reg)
10809 emit_insn_at_entry (gen_main_pool (fun->machine->base_reg));
10811 /* Annotate all constant pool references to let the scheduler know
10812 they implicitly use the base register. */
10813 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10814 if (INSN_P (insn))
10816 annotate_constant_pool_refs (&PATTERN (insn));
10817 df_insn_rescan (insn);
10819 return 0;
10822 } // anon namespace
10824 /* Expand the prologue into a bunch of separate insns. */
10826 void
10827 s390_emit_prologue (void)
10829 rtx insn, addr;
10830 rtx temp_reg;
10831 int i;
10832 int offset;
10833 int next_fpr = 0;
10835 /* Choose best register to use for temp use within prologue.
10836 TPF with profiling must avoid the register 14 - the tracing function
10837 needs the original contents of r14 to be preserved. */
10839 if (!has_hard_reg_initial_val (Pmode, RETURN_REGNUM)
10840 && !crtl->is_leaf
10841 && !TARGET_TPF_PROFILING)
10842 temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
10843 else if (flag_split_stack && cfun->stdarg)
10844 temp_reg = gen_rtx_REG (Pmode, 12);
10845 else
10846 temp_reg = gen_rtx_REG (Pmode, 1);
10848 s390_save_gprs_to_fprs ();
10850 /* Save call saved gprs. */
10851 if (cfun_frame_layout.first_save_gpr != -1)
10853 insn = save_gprs (stack_pointer_rtx,
10854 cfun_frame_layout.gprs_offset +
10855 UNITS_PER_LONG * (cfun_frame_layout.first_save_gpr
10856 - cfun_frame_layout.first_save_gpr_slot),
10857 cfun_frame_layout.first_save_gpr,
10858 cfun_frame_layout.last_save_gpr);
10859 emit_insn (insn);
10862 /* Dummy insn to mark literal pool slot. */
10864 if (cfun->machine->base_reg)
10865 emit_insn (gen_main_pool (cfun->machine->base_reg));
10867 offset = cfun_frame_layout.f0_offset;
10869 /* Save f0 and f2. */
10870 for (i = FPR0_REGNUM; i <= FPR0_REGNUM + 1; i++)
10872 if (cfun_fpr_save_p (i))
10874 save_fpr (stack_pointer_rtx, offset, i);
10875 offset += 8;
10877 else if (!TARGET_PACKED_STACK || cfun->stdarg)
10878 offset += 8;
10881 /* Save f4 and f6. */
10882 offset = cfun_frame_layout.f4_offset;
10883 for (i = FPR4_REGNUM; i <= FPR4_REGNUM + 1; i++)
10885 if (cfun_fpr_save_p (i))
10887 insn = save_fpr (stack_pointer_rtx, offset, i);
10888 offset += 8;
10890 /* If f4 and f6 are call clobbered they are saved due to
10891 stdargs and therefore are not frame related. */
10892 if (!call_really_used_regs[i])
10893 RTX_FRAME_RELATED_P (insn) = 1;
10895 else if (!TARGET_PACKED_STACK || call_really_used_regs[i])
10896 offset += 8;
10899 if (TARGET_PACKED_STACK
10900 && cfun_save_high_fprs_p
10901 && cfun_frame_layout.f8_offset + cfun_frame_layout.high_fprs * 8 > 0)
10903 offset = (cfun_frame_layout.f8_offset
10904 + (cfun_frame_layout.high_fprs - 1) * 8);
10906 for (i = FPR15_REGNUM; i >= FPR8_REGNUM && offset >= 0; i--)
10907 if (cfun_fpr_save_p (i))
10909 insn = save_fpr (stack_pointer_rtx, offset, i);
10911 RTX_FRAME_RELATED_P (insn) = 1;
10912 offset -= 8;
10914 if (offset >= cfun_frame_layout.f8_offset)
10915 next_fpr = i;
10918 if (!TARGET_PACKED_STACK)
10919 next_fpr = cfun_save_high_fprs_p ? FPR15_REGNUM : 0;
10921 if (flag_stack_usage_info)
10922 current_function_static_stack_size = cfun_frame_layout.frame_size;
10924 /* Decrement stack pointer. */
10926 if (cfun_frame_layout.frame_size > 0)
10928 rtx frame_off = GEN_INT (-cfun_frame_layout.frame_size);
10929 rtx real_frame_off;
10931 if (s390_stack_size)
10933 HOST_WIDE_INT stack_guard;
10935 if (s390_stack_guard)
10936 stack_guard = s390_stack_guard;
10937 else
10939 /* If no value for stack guard is provided the smallest power of 2
10940 larger than the current frame size is chosen. */
10941 stack_guard = 1;
10942 while (stack_guard < cfun_frame_layout.frame_size)
10943 stack_guard <<= 1;
10946 if (cfun_frame_layout.frame_size >= s390_stack_size)
10948 warning (0, "frame size of function %qs is %wd"
10949 " bytes exceeding user provided stack limit of "
10950 "%d bytes. "
10951 "An unconditional trap is added.",
10952 current_function_name(), cfun_frame_layout.frame_size,
10953 s390_stack_size);
10954 emit_insn (gen_trap ());
10955 emit_barrier ();
10957 else
10959 /* stack_guard has to be smaller than s390_stack_size.
10960 Otherwise we would emit an AND with zero which would
10961 not match the test under mask pattern. */
10962 if (stack_guard >= s390_stack_size)
10964 warning (0, "frame size of function %qs is %wd"
10965 " bytes which is more than half the stack size. "
10966 "The dynamic check would not be reliable. "
10967 "No check emitted for this function.",
10968 current_function_name(),
10969 cfun_frame_layout.frame_size);
10971 else
10973 HOST_WIDE_INT stack_check_mask = ((s390_stack_size - 1)
10974 & ~(stack_guard - 1));
10976 rtx t = gen_rtx_AND (Pmode, stack_pointer_rtx,
10977 GEN_INT (stack_check_mask));
10978 if (TARGET_64BIT)
10979 emit_insn (gen_ctrapdi4 (gen_rtx_EQ (VOIDmode,
10980 t, const0_rtx),
10981 t, const0_rtx, const0_rtx));
10982 else
10983 emit_insn (gen_ctrapsi4 (gen_rtx_EQ (VOIDmode,
10984 t, const0_rtx),
10985 t, const0_rtx, const0_rtx));
10990 if (s390_warn_framesize > 0
10991 && cfun_frame_layout.frame_size >= s390_warn_framesize)
10992 warning (0, "frame size of %qs is %wd bytes",
10993 current_function_name (), cfun_frame_layout.frame_size);
10995 if (s390_warn_dynamicstack_p && cfun->calls_alloca)
10996 warning (0, "%qs uses dynamic stack allocation", current_function_name ());
10998 /* Save incoming stack pointer into temp reg. */
10999 if (TARGET_BACKCHAIN || next_fpr)
11000 insn = emit_insn (gen_move_insn (temp_reg, stack_pointer_rtx));
11002 /* Subtract frame size from stack pointer. */
11004 if (DISP_IN_RANGE (INTVAL (frame_off)))
11006 insn = gen_rtx_SET (stack_pointer_rtx,
11007 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
11008 frame_off));
11009 insn = emit_insn (insn);
11011 else
11013 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
11014 frame_off = force_const_mem (Pmode, frame_off);
11016 insn = emit_insn (gen_add2_insn (stack_pointer_rtx, frame_off));
11017 annotate_constant_pool_refs (&PATTERN (insn));
11020 RTX_FRAME_RELATED_P (insn) = 1;
11021 real_frame_off = GEN_INT (-cfun_frame_layout.frame_size);
11022 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
11023 gen_rtx_SET (stack_pointer_rtx,
11024 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
11025 real_frame_off)));
11027 /* Set backchain. */
11029 if (TARGET_BACKCHAIN)
11031 if (cfun_frame_layout.backchain_offset)
11032 addr = gen_rtx_MEM (Pmode,
11033 plus_constant (Pmode, stack_pointer_rtx,
11034 cfun_frame_layout.backchain_offset));
11035 else
11036 addr = gen_rtx_MEM (Pmode, stack_pointer_rtx);
11037 set_mem_alias_set (addr, get_frame_alias_set ());
11038 insn = emit_insn (gen_move_insn (addr, temp_reg));
11041 /* If we support non-call exceptions (e.g. for Java),
11042 we need to make sure the backchain pointer is set up
11043 before any possibly trapping memory access. */
11044 if (TARGET_BACKCHAIN && cfun->can_throw_non_call_exceptions)
11046 addr = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode));
11047 emit_clobber (addr);
11051 /* Save fprs 8 - 15 (64 bit ABI). */
11053 if (cfun_save_high_fprs_p && next_fpr)
11055 /* If the stack might be accessed through a different register
11056 we have to make sure that the stack pointer decrement is not
11057 moved below the use of the stack slots. */
11058 s390_emit_stack_tie ();
11060 insn = emit_insn (gen_add2_insn (temp_reg,
11061 GEN_INT (cfun_frame_layout.f8_offset)));
11063 offset = 0;
11065 for (i = FPR8_REGNUM; i <= next_fpr; i++)
11066 if (cfun_fpr_save_p (i))
11068 rtx addr = plus_constant (Pmode, stack_pointer_rtx,
11069 cfun_frame_layout.frame_size
11070 + cfun_frame_layout.f8_offset
11071 + offset);
11073 insn = save_fpr (temp_reg, offset, i);
11074 offset += 8;
11075 RTX_FRAME_RELATED_P (insn) = 1;
11076 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
11077 gen_rtx_SET (gen_rtx_MEM (DFmode, addr),
11078 gen_rtx_REG (DFmode, i)));
11082 /* Set frame pointer, if needed. */
11084 if (frame_pointer_needed)
11086 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
11087 RTX_FRAME_RELATED_P (insn) = 1;
11090 /* Set up got pointer, if needed. */
11092 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
11094 rtx_insn *insns = s390_load_got ();
11096 for (rtx_insn *insn = insns; insn; insn = NEXT_INSN (insn))
11097 annotate_constant_pool_refs (&PATTERN (insn));
11099 emit_insn (insns);
11102 if (TARGET_TPF_PROFILING)
11104 /* Generate a BAS instruction to serve as a function
11105 entry intercept to facilitate the use of tracing
11106 algorithms located at the branch target. */
11107 emit_insn (gen_prologue_tpf ());
11109 /* Emit a blockage here so that all code
11110 lies between the profiling mechanisms. */
11111 emit_insn (gen_blockage ());
11115 /* Expand the epilogue into a bunch of separate insns. */
11117 void
11118 s390_emit_epilogue (bool sibcall)
11120 rtx frame_pointer, return_reg, cfa_restores = NULL_RTX;
11121 int area_bottom, area_top, offset = 0;
11122 int next_offset;
11123 rtvec p;
11124 int i;
11126 if (TARGET_TPF_PROFILING)
11129 /* Generate a BAS instruction to serve as a function
11130 entry intercept to facilitate the use of tracing
11131 algorithms located at the branch target. */
11133 /* Emit a blockage here so that all code
11134 lies between the profiling mechanisms. */
11135 emit_insn (gen_blockage ());
11137 emit_insn (gen_epilogue_tpf ());
11140 /* Check whether to use frame or stack pointer for restore. */
11142 frame_pointer = (frame_pointer_needed
11143 ? hard_frame_pointer_rtx : stack_pointer_rtx);
11145 s390_frame_area (&area_bottom, &area_top);
11147 /* Check whether we can access the register save area.
11148 If not, increment the frame pointer as required. */
11150 if (area_top <= area_bottom)
11152 /* Nothing to restore. */
11154 else if (DISP_IN_RANGE (cfun_frame_layout.frame_size + area_bottom)
11155 && DISP_IN_RANGE (cfun_frame_layout.frame_size + area_top - 1))
11157 /* Area is in range. */
11158 offset = cfun_frame_layout.frame_size;
11160 else
11162 rtx insn, frame_off, cfa;
11164 offset = area_bottom < 0 ? -area_bottom : 0;
11165 frame_off = GEN_INT (cfun_frame_layout.frame_size - offset);
11167 cfa = gen_rtx_SET (frame_pointer,
11168 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
11169 if (DISP_IN_RANGE (INTVAL (frame_off)))
11171 insn = gen_rtx_SET (frame_pointer,
11172 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
11173 insn = emit_insn (insn);
11175 else
11177 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
11178 frame_off = force_const_mem (Pmode, frame_off);
11180 insn = emit_insn (gen_add2_insn (frame_pointer, frame_off));
11181 annotate_constant_pool_refs (&PATTERN (insn));
11183 add_reg_note (insn, REG_CFA_ADJUST_CFA, cfa);
11184 RTX_FRAME_RELATED_P (insn) = 1;
11187 /* Restore call saved fprs. */
11189 if (TARGET_64BIT)
11191 if (cfun_save_high_fprs_p)
11193 next_offset = cfun_frame_layout.f8_offset;
11194 for (i = FPR8_REGNUM; i <= FPR15_REGNUM; i++)
11196 if (cfun_fpr_save_p (i))
11198 restore_fpr (frame_pointer,
11199 offset + next_offset, i);
11200 cfa_restores
11201 = alloc_reg_note (REG_CFA_RESTORE,
11202 gen_rtx_REG (DFmode, i), cfa_restores);
11203 next_offset += 8;
11209 else
11211 next_offset = cfun_frame_layout.f4_offset;
11212 /* f4, f6 */
11213 for (i = FPR4_REGNUM; i <= FPR4_REGNUM + 1; i++)
11215 if (cfun_fpr_save_p (i))
11217 restore_fpr (frame_pointer,
11218 offset + next_offset, i);
11219 cfa_restores
11220 = alloc_reg_note (REG_CFA_RESTORE,
11221 gen_rtx_REG (DFmode, i), cfa_restores);
11222 next_offset += 8;
11224 else if (!TARGET_PACKED_STACK)
11225 next_offset += 8;
11230 /* Return register. */
11232 return_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
11234 /* Restore call saved gprs. */
11236 if (cfun_frame_layout.first_restore_gpr != -1)
11238 rtx insn, addr;
11239 int i;
11241 /* Check for global register and save them
11242 to stack location from where they get restored. */
11244 for (i = cfun_frame_layout.first_restore_gpr;
11245 i <= cfun_frame_layout.last_restore_gpr;
11246 i++)
11248 if (global_not_special_regno_p (i))
11250 addr = plus_constant (Pmode, frame_pointer,
11251 offset + cfun_frame_layout.gprs_offset
11252 + (i - cfun_frame_layout.first_save_gpr_slot)
11253 * UNITS_PER_LONG);
11254 addr = gen_rtx_MEM (Pmode, addr);
11255 set_mem_alias_set (addr, get_frame_alias_set ());
11256 emit_move_insn (addr, gen_rtx_REG (Pmode, i));
11258 else
11259 cfa_restores
11260 = alloc_reg_note (REG_CFA_RESTORE,
11261 gen_rtx_REG (Pmode, i), cfa_restores);
11264 if (! sibcall)
11266 /* Fetch return address from stack before load multiple,
11267 this will do good for scheduling.
11269 Only do this if we already decided that r14 needs to be
11270 saved to a stack slot. (And not just because r14 happens to
11271 be in between two GPRs which need saving.) Otherwise it
11272 would be difficult to take that decision back in
11273 s390_optimize_prologue. */
11274 if (cfun_gpr_save_slot (RETURN_REGNUM) == SAVE_SLOT_STACK)
11276 int return_regnum = find_unused_clobbered_reg();
11277 if (!return_regnum)
11278 return_regnum = 4;
11279 return_reg = gen_rtx_REG (Pmode, return_regnum);
11281 addr = plus_constant (Pmode, frame_pointer,
11282 offset + cfun_frame_layout.gprs_offset
11283 + (RETURN_REGNUM
11284 - cfun_frame_layout.first_save_gpr_slot)
11285 * UNITS_PER_LONG);
11286 addr = gen_rtx_MEM (Pmode, addr);
11287 set_mem_alias_set (addr, get_frame_alias_set ());
11288 emit_move_insn (return_reg, addr);
11290 /* Once we did that optimization we have to make sure
11291 s390_optimize_prologue does not try to remove the
11292 store of r14 since we will not be able to find the
11293 load issued here. */
11294 cfun_frame_layout.save_return_addr_p = true;
11298 insn = restore_gprs (frame_pointer,
11299 offset + cfun_frame_layout.gprs_offset
11300 + (cfun_frame_layout.first_restore_gpr
11301 - cfun_frame_layout.first_save_gpr_slot)
11302 * UNITS_PER_LONG,
11303 cfun_frame_layout.first_restore_gpr,
11304 cfun_frame_layout.last_restore_gpr);
11305 insn = emit_insn (insn);
11306 REG_NOTES (insn) = cfa_restores;
11307 add_reg_note (insn, REG_CFA_DEF_CFA,
11308 plus_constant (Pmode, stack_pointer_rtx,
11309 STACK_POINTER_OFFSET));
11310 RTX_FRAME_RELATED_P (insn) = 1;
11313 s390_restore_gprs_from_fprs ();
11315 if (! sibcall)
11318 /* Return to caller. */
11320 p = rtvec_alloc (2);
11322 RTVEC_ELT (p, 0) = ret_rtx;
11323 RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode, return_reg);
11324 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
11328 /* Implement TARGET_SET_UP_BY_PROLOGUE. */
11330 static void
11331 s300_set_up_by_prologue (hard_reg_set_container *regs)
11333 if (cfun->machine->base_reg
11334 && !call_really_used_regs[REGNO (cfun->machine->base_reg)])
11335 SET_HARD_REG_BIT (regs->set, REGNO (cfun->machine->base_reg));
11338 /* -fsplit-stack support. */
11340 /* A SYMBOL_REF for __morestack. */
11341 static GTY(()) rtx morestack_ref;
11343 /* When using -fsplit-stack, the allocation routines set a field in
11344 the TCB to the bottom of the stack plus this much space, measured
11345 in bytes. */
11347 #define SPLIT_STACK_AVAILABLE 1024
11349 /* Emit -fsplit-stack prologue, which goes before the regular function
11350 prologue. */
11352 void
11353 s390_expand_split_stack_prologue (void)
11355 rtx r1, guard, cc = NULL;
11356 rtx_insn *insn;
11357 /* Offset from thread pointer to __private_ss. */
11358 int psso = TARGET_64BIT ? 0x38 : 0x20;
11359 /* Pointer size in bytes. */
11360 /* Frame size and argument size - the two parameters to __morestack. */
11361 HOST_WIDE_INT frame_size = cfun_frame_layout.frame_size;
11362 /* Align argument size to 8 bytes - simplifies __morestack code. */
11363 HOST_WIDE_INT args_size = crtl->args.size >= 0
11364 ? ((crtl->args.size + 7) & ~7)
11365 : 0;
11366 /* Label to be called by __morestack. */
11367 rtx_code_label *call_done = NULL;
11368 rtx_code_label *parm_base = NULL;
11369 rtx tmp;
11371 gcc_assert (flag_split_stack && reload_completed);
11372 if (!TARGET_CPU_ZARCH)
11374 sorry ("CPUs older than z900 are not supported for -fsplit-stack");
11375 return;
11378 r1 = gen_rtx_REG (Pmode, 1);
11380 /* If no stack frame will be allocated, don't do anything. */
11381 if (!frame_size)
11383 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
11385 /* If va_start is used, just use r15. */
11386 emit_move_insn (r1,
11387 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
11388 GEN_INT (STACK_POINTER_OFFSET)));
11391 return;
11394 if (morestack_ref == NULL_RTX)
11396 morestack_ref = gen_rtx_SYMBOL_REF (Pmode, "__morestack");
11397 SYMBOL_REF_FLAGS (morestack_ref) |= (SYMBOL_FLAG_LOCAL
11398 | SYMBOL_FLAG_FUNCTION);
11401 if (CONST_OK_FOR_K (frame_size) || CONST_OK_FOR_Op (frame_size))
11403 /* If frame_size will fit in an add instruction, do a stack space
11404 check, and only call __morestack if there's not enough space. */
11406 /* Get thread pointer. r1 is the only register we can always destroy - r0
11407 could contain a static chain (and cannot be used to address memory
11408 anyway), r2-r6 can contain parameters, and r6-r15 are callee-saved. */
11409 emit_move_insn (r1, gen_rtx_REG (Pmode, TP_REGNUM));
11410 /* Aim at __private_ss. */
11411 guard = gen_rtx_MEM (Pmode, plus_constant (Pmode, r1, psso));
11413 /* If less that 1kiB used, skip addition and compare directly with
11414 __private_ss. */
11415 if (frame_size > SPLIT_STACK_AVAILABLE)
11417 emit_move_insn (r1, guard);
11418 if (TARGET_64BIT)
11419 emit_insn (gen_adddi3 (r1, r1, GEN_INT (frame_size)));
11420 else
11421 emit_insn (gen_addsi3 (r1, r1, GEN_INT (frame_size)));
11422 guard = r1;
11425 /* Compare the (maybe adjusted) guard with the stack pointer. */
11426 cc = s390_emit_compare (LT, stack_pointer_rtx, guard);
11429 call_done = gen_label_rtx ();
11430 parm_base = gen_label_rtx ();
11432 /* Emit the parameter block. */
11433 tmp = gen_split_stack_data (parm_base, call_done,
11434 GEN_INT (frame_size),
11435 GEN_INT (args_size));
11436 insn = emit_insn (tmp);
11437 add_reg_note (insn, REG_LABEL_OPERAND, call_done);
11438 LABEL_NUSES (call_done)++;
11439 add_reg_note (insn, REG_LABEL_OPERAND, parm_base);
11440 LABEL_NUSES (parm_base)++;
11442 /* %r1 = litbase. */
11443 insn = emit_move_insn (r1, gen_rtx_LABEL_REF (VOIDmode, parm_base));
11444 add_reg_note (insn, REG_LABEL_OPERAND, parm_base);
11445 LABEL_NUSES (parm_base)++;
11447 /* Now, we need to call __morestack. It has very special calling
11448 conventions: it preserves param/return/static chain registers for
11449 calling main function body, and looks for its own parameters at %r1. */
11451 if (cc != NULL)
11453 tmp = gen_split_stack_cond_call (morestack_ref, cc, call_done);
11455 insn = emit_jump_insn (tmp);
11456 JUMP_LABEL (insn) = call_done;
11457 LABEL_NUSES (call_done)++;
11459 /* Mark the jump as very unlikely to be taken. */
11460 add_int_reg_note (insn, REG_BR_PROB, REG_BR_PROB_BASE / 100);
11462 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
11464 /* If va_start is used, and __morestack was not called, just use
11465 r15. */
11466 emit_move_insn (r1,
11467 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
11468 GEN_INT (STACK_POINTER_OFFSET)));
11471 else
11473 tmp = gen_split_stack_call (morestack_ref, call_done);
11474 insn = emit_jump_insn (tmp);
11475 JUMP_LABEL (insn) = call_done;
11476 LABEL_NUSES (call_done)++;
11477 emit_barrier ();
11480 /* __morestack will call us here. */
11482 emit_label (call_done);
11485 /* We may have to tell the dataflow pass that the split stack prologue
11486 is initializing a register. */
11488 static void
11489 s390_live_on_entry (bitmap regs)
11491 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
11493 gcc_assert (flag_split_stack);
11494 bitmap_set_bit (regs, 1);
11498 /* Return true if the function can use simple_return to return outside
11499 of a shrink-wrapped region. At present shrink-wrapping is supported
11500 in all cases. */
11502 bool
11503 s390_can_use_simple_return_insn (void)
11505 return true;
11508 /* Return true if the epilogue is guaranteed to contain only a return
11509 instruction and if a direct return can therefore be used instead.
11510 One of the main advantages of using direct return instructions
11511 is that we can then use conditional returns. */
11513 bool
11514 s390_can_use_return_insn (void)
11516 int i;
11518 if (!reload_completed)
11519 return false;
11521 if (crtl->profile)
11522 return false;
11524 if (TARGET_TPF_PROFILING)
11525 return false;
11527 for (i = 0; i < 16; i++)
11528 if (cfun_gpr_save_slot (i) != SAVE_SLOT_NONE)
11529 return false;
11531 /* For 31 bit this is not covered by the frame_size check below
11532 since f4, f6 are saved in the register save area without needing
11533 additional stack space. */
11534 if (!TARGET_64BIT
11535 && (cfun_fpr_save_p (FPR4_REGNUM) || cfun_fpr_save_p (FPR6_REGNUM)))
11536 return false;
11538 if (cfun->machine->base_reg
11539 && !call_really_used_regs[REGNO (cfun->machine->base_reg)])
11540 return false;
11542 return cfun_frame_layout.frame_size == 0;
11545 /* The VX ABI differs for vararg functions. Therefore we need the
11546 prototype of the callee to be available when passing vector type
11547 values. */
11548 static const char *
11549 s390_invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
11551 return ((TARGET_VX_ABI
11552 && typelist == 0
11553 && VECTOR_TYPE_P (TREE_TYPE (val))
11554 && (funcdecl == NULL_TREE
11555 || (TREE_CODE (funcdecl) == FUNCTION_DECL
11556 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
11557 ? N_("vector argument passed to unprototyped function")
11558 : NULL);
11562 /* Return the size in bytes of a function argument of
11563 type TYPE and/or mode MODE. At least one of TYPE or
11564 MODE must be specified. */
11566 static int
11567 s390_function_arg_size (machine_mode mode, const_tree type)
11569 if (type)
11570 return int_size_in_bytes (type);
11572 /* No type info available for some library calls ... */
11573 if (mode != BLKmode)
11574 return GET_MODE_SIZE (mode);
11576 /* If we have neither type nor mode, abort */
11577 gcc_unreachable ();
11580 /* Return true if a function argument of type TYPE and mode MODE
11581 is to be passed in a vector register, if available. */
11583 bool
11584 s390_function_arg_vector (machine_mode mode, const_tree type)
11586 if (!TARGET_VX_ABI)
11587 return false;
11589 if (s390_function_arg_size (mode, type) > 16)
11590 return false;
11592 /* No type info available for some library calls ... */
11593 if (!type)
11594 return VECTOR_MODE_P (mode);
11596 /* The ABI says that record types with a single member are treated
11597 just like that member would be. */
11598 while (TREE_CODE (type) == RECORD_TYPE)
11600 tree field, single = NULL_TREE;
11602 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
11604 if (TREE_CODE (field) != FIELD_DECL)
11605 continue;
11607 if (single == NULL_TREE)
11608 single = TREE_TYPE (field);
11609 else
11610 return false;
11613 if (single == NULL_TREE)
11614 return false;
11615 else
11617 /* If the field declaration adds extra byte due to
11618 e.g. padding this is not accepted as vector type. */
11619 if (int_size_in_bytes (single) <= 0
11620 || int_size_in_bytes (single) != int_size_in_bytes (type))
11621 return false;
11622 type = single;
11626 return VECTOR_TYPE_P (type);
11629 /* Return true if a function argument of type TYPE and mode MODE
11630 is to be passed in a floating-point register, if available. */
11632 static bool
11633 s390_function_arg_float (machine_mode mode, const_tree type)
11635 if (s390_function_arg_size (mode, type) > 8)
11636 return false;
11638 /* Soft-float changes the ABI: no floating-point registers are used. */
11639 if (TARGET_SOFT_FLOAT)
11640 return false;
11642 /* No type info available for some library calls ... */
11643 if (!type)
11644 return mode == SFmode || mode == DFmode || mode == SDmode || mode == DDmode;
11646 /* The ABI says that record types with a single member are treated
11647 just like that member would be. */
11648 while (TREE_CODE (type) == RECORD_TYPE)
11650 tree field, single = NULL_TREE;
11652 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
11654 if (TREE_CODE (field) != FIELD_DECL)
11655 continue;
11657 if (single == NULL_TREE)
11658 single = TREE_TYPE (field);
11659 else
11660 return false;
11663 if (single == NULL_TREE)
11664 return false;
11665 else
11666 type = single;
11669 return TREE_CODE (type) == REAL_TYPE;
11672 /* Return true if a function argument of type TYPE and mode MODE
11673 is to be passed in an integer register, or a pair of integer
11674 registers, if available. */
11676 static bool
11677 s390_function_arg_integer (machine_mode mode, const_tree type)
11679 int size = s390_function_arg_size (mode, type);
11680 if (size > 8)
11681 return false;
11683 /* No type info available for some library calls ... */
11684 if (!type)
11685 return GET_MODE_CLASS (mode) == MODE_INT
11686 || (TARGET_SOFT_FLOAT && SCALAR_FLOAT_MODE_P (mode));
11688 /* We accept small integral (and similar) types. */
11689 if (INTEGRAL_TYPE_P (type)
11690 || POINTER_TYPE_P (type)
11691 || TREE_CODE (type) == NULLPTR_TYPE
11692 || TREE_CODE (type) == OFFSET_TYPE
11693 || (TARGET_SOFT_FLOAT && TREE_CODE (type) == REAL_TYPE))
11694 return true;
11696 /* We also accept structs of size 1, 2, 4, 8 that are not
11697 passed in floating-point registers. */
11698 if (AGGREGATE_TYPE_P (type)
11699 && exact_log2 (size) >= 0
11700 && !s390_function_arg_float (mode, type))
11701 return true;
11703 return false;
11706 /* Return 1 if a function argument of type TYPE and mode MODE
11707 is to be passed by reference. The ABI specifies that only
11708 structures of size 1, 2, 4, or 8 bytes are passed by value,
11709 all other structures (and complex numbers) are passed by
11710 reference. */
11712 static bool
11713 s390_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
11714 machine_mode mode, const_tree type,
11715 bool named ATTRIBUTE_UNUSED)
11717 int size = s390_function_arg_size (mode, type);
11719 if (s390_function_arg_vector (mode, type))
11720 return false;
11722 if (size > 8)
11723 return true;
11725 if (type)
11727 if (AGGREGATE_TYPE_P (type) && exact_log2 (size) < 0)
11728 return true;
11730 if (TREE_CODE (type) == COMPLEX_TYPE
11731 || TREE_CODE (type) == VECTOR_TYPE)
11732 return true;
11735 return false;
11738 /* Update the data in CUM to advance over an argument of mode MODE and
11739 data type TYPE. (TYPE is null for libcalls where that information
11740 may not be available.). The boolean NAMED specifies whether the
11741 argument is a named argument (as opposed to an unnamed argument
11742 matching an ellipsis). */
11744 static void
11745 s390_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
11746 const_tree type, bool named)
11748 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
11750 if (s390_function_arg_vector (mode, type))
11752 /* We are called for unnamed vector stdarg arguments which are
11753 passed on the stack. In this case this hook does not have to
11754 do anything since stack arguments are tracked by common
11755 code. */
11756 if (!named)
11757 return;
11758 cum->vrs += 1;
11760 else if (s390_function_arg_float (mode, type))
11762 cum->fprs += 1;
11764 else if (s390_function_arg_integer (mode, type))
11766 int size = s390_function_arg_size (mode, type);
11767 cum->gprs += ((size + UNITS_PER_LONG - 1) / UNITS_PER_LONG);
11769 else
11770 gcc_unreachable ();
11773 /* Define where to put the arguments to a function.
11774 Value is zero to push the argument on the stack,
11775 or a hard register in which to store the argument.
11777 MODE is the argument's machine mode.
11778 TYPE is the data type of the argument (as a tree).
11779 This is null for libcalls where that information may
11780 not be available.
11781 CUM is a variable of type CUMULATIVE_ARGS which gives info about
11782 the preceding args and about the function being called.
11783 NAMED is nonzero if this argument is a named parameter
11784 (otherwise it is an extra parameter matching an ellipsis).
11786 On S/390, we use general purpose registers 2 through 6 to
11787 pass integer, pointer, and certain structure arguments, and
11788 floating point registers 0 and 2 (0, 2, 4, and 6 on 64-bit)
11789 to pass floating point arguments. All remaining arguments
11790 are pushed to the stack. */
11792 static rtx
11793 s390_function_arg (cumulative_args_t cum_v, machine_mode mode,
11794 const_tree type, bool named)
11796 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
11798 if (!named)
11799 s390_check_type_for_vector_abi (type, true, false);
11801 if (s390_function_arg_vector (mode, type))
11803 /* Vector arguments being part of the ellipsis are passed on the
11804 stack. */
11805 if (!named || (cum->vrs + 1 > VEC_ARG_NUM_REG))
11806 return NULL_RTX;
11808 return gen_rtx_REG (mode, cum->vrs + FIRST_VEC_ARG_REGNO);
11810 else if (s390_function_arg_float (mode, type))
11812 if (cum->fprs + 1 > FP_ARG_NUM_REG)
11813 return NULL_RTX;
11814 else
11815 return gen_rtx_REG (mode, cum->fprs + 16);
11817 else if (s390_function_arg_integer (mode, type))
11819 int size = s390_function_arg_size (mode, type);
11820 int n_gprs = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
11822 if (cum->gprs + n_gprs > GP_ARG_NUM_REG)
11823 return NULL_RTX;
11824 else if (n_gprs == 1 || UNITS_PER_WORD == UNITS_PER_LONG)
11825 return gen_rtx_REG (mode, cum->gprs + 2);
11826 else if (n_gprs == 2)
11828 rtvec p = rtvec_alloc (2);
11830 RTVEC_ELT (p, 0)
11831 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 2),
11832 const0_rtx);
11833 RTVEC_ELT (p, 1)
11834 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 3),
11835 GEN_INT (4));
11837 return gen_rtx_PARALLEL (mode, p);
11841 /* After the real arguments, expand_call calls us once again
11842 with a void_type_node type. Whatever we return here is
11843 passed as operand 2 to the call expanders.
11845 We don't need this feature ... */
11846 else if (type == void_type_node)
11847 return const0_rtx;
11849 gcc_unreachable ();
11852 /* Return true if return values of type TYPE should be returned
11853 in a memory buffer whose address is passed by the caller as
11854 hidden first argument. */
11856 static bool
11857 s390_return_in_memory (const_tree type, const_tree fundecl ATTRIBUTE_UNUSED)
11859 /* We accept small integral (and similar) types. */
11860 if (INTEGRAL_TYPE_P (type)
11861 || POINTER_TYPE_P (type)
11862 || TREE_CODE (type) == OFFSET_TYPE
11863 || TREE_CODE (type) == REAL_TYPE)
11864 return int_size_in_bytes (type) > 8;
11866 /* vector types which fit into a VR. */
11867 if (TARGET_VX_ABI
11868 && VECTOR_TYPE_P (type)
11869 && int_size_in_bytes (type) <= 16)
11870 return false;
11872 /* Aggregates and similar constructs are always returned
11873 in memory. */
11874 if (AGGREGATE_TYPE_P (type)
11875 || TREE_CODE (type) == COMPLEX_TYPE
11876 || VECTOR_TYPE_P (type))
11877 return true;
11879 /* ??? We get called on all sorts of random stuff from
11880 aggregate_value_p. We can't abort, but it's not clear
11881 what's safe to return. Pretend it's a struct I guess. */
11882 return true;
11885 /* Function arguments and return values are promoted to word size. */
11887 static machine_mode
11888 s390_promote_function_mode (const_tree type, machine_mode mode,
11889 int *punsignedp,
11890 const_tree fntype ATTRIBUTE_UNUSED,
11891 int for_return ATTRIBUTE_UNUSED)
11893 if (INTEGRAL_MODE_P (mode)
11894 && GET_MODE_SIZE (mode) < UNITS_PER_LONG)
11896 if (type != NULL_TREE && POINTER_TYPE_P (type))
11897 *punsignedp = POINTERS_EXTEND_UNSIGNED;
11898 return Pmode;
11901 return mode;
11904 /* Define where to return a (scalar) value of type RET_TYPE.
11905 If RET_TYPE is null, define where to return a (scalar)
11906 value of mode MODE from a libcall. */
11908 static rtx
11909 s390_function_and_libcall_value (machine_mode mode,
11910 const_tree ret_type,
11911 const_tree fntype_or_decl,
11912 bool outgoing ATTRIBUTE_UNUSED)
11914 /* For vector return types it is important to use the RET_TYPE
11915 argument whenever available since the middle-end might have
11916 changed the mode to a scalar mode. */
11917 bool vector_ret_type_p = ((ret_type && VECTOR_TYPE_P (ret_type))
11918 || (!ret_type && VECTOR_MODE_P (mode)));
11920 /* For normal functions perform the promotion as
11921 promote_function_mode would do. */
11922 if (ret_type)
11924 int unsignedp = TYPE_UNSIGNED (ret_type);
11925 mode = promote_function_mode (ret_type, mode, &unsignedp,
11926 fntype_or_decl, 1);
11929 gcc_assert (GET_MODE_CLASS (mode) == MODE_INT
11930 || SCALAR_FLOAT_MODE_P (mode)
11931 || (TARGET_VX_ABI && vector_ret_type_p));
11932 gcc_assert (GET_MODE_SIZE (mode) <= (TARGET_VX_ABI ? 16 : 8));
11934 if (TARGET_VX_ABI && vector_ret_type_p)
11935 return gen_rtx_REG (mode, FIRST_VEC_ARG_REGNO);
11936 else if (TARGET_HARD_FLOAT && SCALAR_FLOAT_MODE_P (mode))
11937 return gen_rtx_REG (mode, 16);
11938 else if (GET_MODE_SIZE (mode) <= UNITS_PER_LONG
11939 || UNITS_PER_LONG == UNITS_PER_WORD)
11940 return gen_rtx_REG (mode, 2);
11941 else if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_LONG)
11943 /* This case is triggered when returning a 64 bit value with
11944 -m31 -mzarch. Although the value would fit into a single
11945 register it has to be forced into a 32 bit register pair in
11946 order to match the ABI. */
11947 rtvec p = rtvec_alloc (2);
11949 RTVEC_ELT (p, 0)
11950 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 2), const0_rtx);
11951 RTVEC_ELT (p, 1)
11952 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 3), GEN_INT (4));
11954 return gen_rtx_PARALLEL (mode, p);
11957 gcc_unreachable ();
11960 /* Define where to return a scalar return value of type RET_TYPE. */
11962 static rtx
11963 s390_function_value (const_tree ret_type, const_tree fn_decl_or_type,
11964 bool outgoing)
11966 return s390_function_and_libcall_value (TYPE_MODE (ret_type), ret_type,
11967 fn_decl_or_type, outgoing);
11970 /* Define where to return a scalar libcall return value of mode
11971 MODE. */
11973 static rtx
11974 s390_libcall_value (machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
11976 return s390_function_and_libcall_value (mode, NULL_TREE,
11977 NULL_TREE, true);
11981 /* Create and return the va_list datatype.
11983 On S/390, va_list is an array type equivalent to
11985 typedef struct __va_list_tag
11987 long __gpr;
11988 long __fpr;
11989 void *__overflow_arg_area;
11990 void *__reg_save_area;
11991 } va_list[1];
11993 where __gpr and __fpr hold the number of general purpose
11994 or floating point arguments used up to now, respectively,
11995 __overflow_arg_area points to the stack location of the
11996 next argument passed on the stack, and __reg_save_area
11997 always points to the start of the register area in the
11998 call frame of the current function. The function prologue
11999 saves all registers used for argument passing into this
12000 area if the function uses variable arguments. */
12002 static tree
12003 s390_build_builtin_va_list (void)
12005 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
12007 record = lang_hooks.types.make_type (RECORD_TYPE);
12009 type_decl =
12010 build_decl (BUILTINS_LOCATION,
12011 TYPE_DECL, get_identifier ("__va_list_tag"), record);
12013 f_gpr = build_decl (BUILTINS_LOCATION,
12014 FIELD_DECL, get_identifier ("__gpr"),
12015 long_integer_type_node);
12016 f_fpr = build_decl (BUILTINS_LOCATION,
12017 FIELD_DECL, get_identifier ("__fpr"),
12018 long_integer_type_node);
12019 f_ovf = build_decl (BUILTINS_LOCATION,
12020 FIELD_DECL, get_identifier ("__overflow_arg_area"),
12021 ptr_type_node);
12022 f_sav = build_decl (BUILTINS_LOCATION,
12023 FIELD_DECL, get_identifier ("__reg_save_area"),
12024 ptr_type_node);
12026 va_list_gpr_counter_field = f_gpr;
12027 va_list_fpr_counter_field = f_fpr;
12029 DECL_FIELD_CONTEXT (f_gpr) = record;
12030 DECL_FIELD_CONTEXT (f_fpr) = record;
12031 DECL_FIELD_CONTEXT (f_ovf) = record;
12032 DECL_FIELD_CONTEXT (f_sav) = record;
12034 TYPE_STUB_DECL (record) = type_decl;
12035 TYPE_NAME (record) = type_decl;
12036 TYPE_FIELDS (record) = f_gpr;
12037 DECL_CHAIN (f_gpr) = f_fpr;
12038 DECL_CHAIN (f_fpr) = f_ovf;
12039 DECL_CHAIN (f_ovf) = f_sav;
12041 layout_type (record);
12043 /* The correct type is an array type of one element. */
12044 return build_array_type (record, build_index_type (size_zero_node));
12047 /* Implement va_start by filling the va_list structure VALIST.
12048 STDARG_P is always true, and ignored.
12049 NEXTARG points to the first anonymous stack argument.
12051 The following global variables are used to initialize
12052 the va_list structure:
12054 crtl->args.info:
12055 holds number of gprs and fprs used for named arguments.
12056 crtl->args.arg_offset_rtx:
12057 holds the offset of the first anonymous stack argument
12058 (relative to the virtual arg pointer). */
12060 static void
12061 s390_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
12063 HOST_WIDE_INT n_gpr, n_fpr;
12064 int off;
12065 tree f_gpr, f_fpr, f_ovf, f_sav;
12066 tree gpr, fpr, ovf, sav, t;
12068 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12069 f_fpr = DECL_CHAIN (f_gpr);
12070 f_ovf = DECL_CHAIN (f_fpr);
12071 f_sav = DECL_CHAIN (f_ovf);
12073 valist = build_simple_mem_ref (valist);
12074 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12075 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
12076 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
12077 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
12079 /* Count number of gp and fp argument registers used. */
12081 n_gpr = crtl->args.info.gprs;
12082 n_fpr = crtl->args.info.fprs;
12084 if (cfun->va_list_gpr_size)
12086 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
12087 build_int_cst (NULL_TREE, n_gpr));
12088 TREE_SIDE_EFFECTS (t) = 1;
12089 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12092 if (cfun->va_list_fpr_size)
12094 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
12095 build_int_cst (NULL_TREE, n_fpr));
12096 TREE_SIDE_EFFECTS (t) = 1;
12097 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12100 if (flag_split_stack
12101 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun->decl))
12102 == NULL)
12103 && cfun->machine->split_stack_varargs_pointer == NULL_RTX)
12105 rtx reg;
12106 rtx_insn *seq;
12108 reg = gen_reg_rtx (Pmode);
12109 cfun->machine->split_stack_varargs_pointer = reg;
12111 start_sequence ();
12112 emit_move_insn (reg, gen_rtx_REG (Pmode, 1));
12113 seq = get_insns ();
12114 end_sequence ();
12116 push_topmost_sequence ();
12117 emit_insn_after (seq, entry_of_function ());
12118 pop_topmost_sequence ();
12121 /* Find the overflow area.
12122 FIXME: This currently is too pessimistic when the vector ABI is
12123 enabled. In that case we *always* set up the overflow area
12124 pointer. */
12125 if (n_gpr + cfun->va_list_gpr_size > GP_ARG_NUM_REG
12126 || n_fpr + cfun->va_list_fpr_size > FP_ARG_NUM_REG
12127 || TARGET_VX_ABI)
12129 if (cfun->machine->split_stack_varargs_pointer == NULL_RTX)
12130 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
12131 else
12132 t = make_tree (TREE_TYPE (ovf), cfun->machine->split_stack_varargs_pointer);
12134 off = INTVAL (crtl->args.arg_offset_rtx);
12135 off = off < 0 ? 0 : off;
12136 if (TARGET_DEBUG_ARG)
12137 fprintf (stderr, "va_start: n_gpr = %d, n_fpr = %d off %d\n",
12138 (int)n_gpr, (int)n_fpr, off);
12140 t = fold_build_pointer_plus_hwi (t, off);
12142 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
12143 TREE_SIDE_EFFECTS (t) = 1;
12144 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12147 /* Find the register save area. */
12148 if ((cfun->va_list_gpr_size && n_gpr < GP_ARG_NUM_REG)
12149 || (cfun->va_list_fpr_size && n_fpr < FP_ARG_NUM_REG))
12151 t = make_tree (TREE_TYPE (sav), return_address_pointer_rtx);
12152 t = fold_build_pointer_plus_hwi (t, -RETURN_REGNUM * UNITS_PER_LONG);
12154 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
12155 TREE_SIDE_EFFECTS (t) = 1;
12156 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12160 /* Implement va_arg by updating the va_list structure
12161 VALIST as required to retrieve an argument of type
12162 TYPE, and returning that argument.
12164 Generates code equivalent to:
12166 if (integral value) {
12167 if (size <= 4 && args.gpr < 5 ||
12168 size > 4 && args.gpr < 4 )
12169 ret = args.reg_save_area[args.gpr+8]
12170 else
12171 ret = *args.overflow_arg_area++;
12172 } else if (vector value) {
12173 ret = *args.overflow_arg_area;
12174 args.overflow_arg_area += size / 8;
12175 } else if (float value) {
12176 if (args.fgpr < 2)
12177 ret = args.reg_save_area[args.fpr+64]
12178 else
12179 ret = *args.overflow_arg_area++;
12180 } else if (aggregate value) {
12181 if (args.gpr < 5)
12182 ret = *args.reg_save_area[args.gpr]
12183 else
12184 ret = **args.overflow_arg_area++;
12185 } */
12187 static tree
12188 s390_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
12189 gimple_seq *post_p ATTRIBUTE_UNUSED)
12191 tree f_gpr, f_fpr, f_ovf, f_sav;
12192 tree gpr, fpr, ovf, sav, reg, t, u;
12193 int indirect_p, size, n_reg, sav_ofs, sav_scale, max_reg;
12194 tree lab_false, lab_over = NULL_TREE;
12195 tree addr = create_tmp_var (ptr_type_node, "addr");
12196 bool left_align_p; /* How a value < UNITS_PER_LONG is aligned within
12197 a stack slot. */
12199 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12200 f_fpr = DECL_CHAIN (f_gpr);
12201 f_ovf = DECL_CHAIN (f_fpr);
12202 f_sav = DECL_CHAIN (f_ovf);
12204 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12205 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
12206 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
12208 /* The tree for args* cannot be shared between gpr/fpr and ovf since
12209 both appear on a lhs. */
12210 valist = unshare_expr (valist);
12211 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
12213 size = int_size_in_bytes (type);
12215 s390_check_type_for_vector_abi (type, true, false);
12217 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
12219 if (TARGET_DEBUG_ARG)
12221 fprintf (stderr, "va_arg: aggregate type");
12222 debug_tree (type);
12225 /* Aggregates are passed by reference. */
12226 indirect_p = 1;
12227 reg = gpr;
12228 n_reg = 1;
12230 /* kernel stack layout on 31 bit: It is assumed here that no padding
12231 will be added by s390_frame_info because for va_args always an even
12232 number of gprs has to be saved r15-r2 = 14 regs. */
12233 sav_ofs = 2 * UNITS_PER_LONG;
12234 sav_scale = UNITS_PER_LONG;
12235 size = UNITS_PER_LONG;
12236 max_reg = GP_ARG_NUM_REG - n_reg;
12237 left_align_p = false;
12239 else if (s390_function_arg_vector (TYPE_MODE (type), type))
12241 if (TARGET_DEBUG_ARG)
12243 fprintf (stderr, "va_arg: vector type");
12244 debug_tree (type);
12247 indirect_p = 0;
12248 reg = NULL_TREE;
12249 n_reg = 0;
12250 sav_ofs = 0;
12251 sav_scale = 8;
12252 max_reg = 0;
12253 left_align_p = true;
12255 else if (s390_function_arg_float (TYPE_MODE (type), type))
12257 if (TARGET_DEBUG_ARG)
12259 fprintf (stderr, "va_arg: float type");
12260 debug_tree (type);
12263 /* FP args go in FP registers, if present. */
12264 indirect_p = 0;
12265 reg = fpr;
12266 n_reg = 1;
12267 sav_ofs = 16 * UNITS_PER_LONG;
12268 sav_scale = 8;
12269 max_reg = FP_ARG_NUM_REG - n_reg;
12270 left_align_p = false;
12272 else
12274 if (TARGET_DEBUG_ARG)
12276 fprintf (stderr, "va_arg: other type");
12277 debug_tree (type);
12280 /* Otherwise into GP registers. */
12281 indirect_p = 0;
12282 reg = gpr;
12283 n_reg = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
12285 /* kernel stack layout on 31 bit: It is assumed here that no padding
12286 will be added by s390_frame_info because for va_args always an even
12287 number of gprs has to be saved r15-r2 = 14 regs. */
12288 sav_ofs = 2 * UNITS_PER_LONG;
12290 if (size < UNITS_PER_LONG)
12291 sav_ofs += UNITS_PER_LONG - size;
12293 sav_scale = UNITS_PER_LONG;
12294 max_reg = GP_ARG_NUM_REG - n_reg;
12295 left_align_p = false;
12298 /* Pull the value out of the saved registers ... */
12300 if (reg != NULL_TREE)
12303 if (reg > ((typeof (reg))max_reg))
12304 goto lab_false;
12306 addr = sav + sav_ofs + reg * save_scale;
12308 goto lab_over;
12310 lab_false:
12313 lab_false = create_artificial_label (UNKNOWN_LOCATION);
12314 lab_over = create_artificial_label (UNKNOWN_LOCATION);
12316 t = fold_convert (TREE_TYPE (reg), size_int (max_reg));
12317 t = build2 (GT_EXPR, boolean_type_node, reg, t);
12318 u = build1 (GOTO_EXPR, void_type_node, lab_false);
12319 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
12320 gimplify_and_add (t, pre_p);
12322 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
12323 u = build2 (MULT_EXPR, TREE_TYPE (reg), reg,
12324 fold_convert (TREE_TYPE (reg), size_int (sav_scale)));
12325 t = fold_build_pointer_plus (t, u);
12327 gimplify_assign (addr, t, pre_p);
12329 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
12331 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
12334 /* ... Otherwise out of the overflow area. */
12336 t = ovf;
12337 if (size < UNITS_PER_LONG && !left_align_p)
12338 t = fold_build_pointer_plus_hwi (t, UNITS_PER_LONG - size);
12340 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
12342 gimplify_assign (addr, t, pre_p);
12344 if (size < UNITS_PER_LONG && left_align_p)
12345 t = fold_build_pointer_plus_hwi (t, UNITS_PER_LONG);
12346 else
12347 t = fold_build_pointer_plus_hwi (t, size);
12349 gimplify_assign (ovf, t, pre_p);
12351 if (reg != NULL_TREE)
12352 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
12355 /* Increment register save count. */
12357 if (n_reg > 0)
12359 u = build2 (PREINCREMENT_EXPR, TREE_TYPE (reg), reg,
12360 fold_convert (TREE_TYPE (reg), size_int (n_reg)));
12361 gimplify_and_add (u, pre_p);
12364 if (indirect_p)
12366 t = build_pointer_type_for_mode (build_pointer_type (type),
12367 ptr_mode, true);
12368 addr = fold_convert (t, addr);
12369 addr = build_va_arg_indirect_ref (addr);
12371 else
12373 t = build_pointer_type_for_mode (type, ptr_mode, true);
12374 addr = fold_convert (t, addr);
12377 return build_va_arg_indirect_ref (addr);
12380 /* Emit rtl for the tbegin or tbegin_retry (RETRY != NULL_RTX)
12381 expanders.
12382 DEST - Register location where CC will be stored.
12383 TDB - Pointer to a 256 byte area where to store the transaction.
12384 diagnostic block. NULL if TDB is not needed.
12385 RETRY - Retry count value. If non-NULL a retry loop for CC2
12386 is emitted
12387 CLOBBER_FPRS_P - If true clobbers for all FPRs are emitted as part
12388 of the tbegin instruction pattern. */
12390 void
12391 s390_expand_tbegin (rtx dest, rtx tdb, rtx retry, bool clobber_fprs_p)
12393 rtx retry_plus_two = gen_reg_rtx (SImode);
12394 rtx retry_reg = gen_reg_rtx (SImode);
12395 rtx_code_label *retry_label = NULL;
12397 if (retry != NULL_RTX)
12399 emit_move_insn (retry_reg, retry);
12400 emit_insn (gen_addsi3 (retry_plus_two, retry_reg, const2_rtx));
12401 emit_insn (gen_addsi3 (retry_reg, retry_reg, const1_rtx));
12402 retry_label = gen_label_rtx ();
12403 emit_label (retry_label);
12406 if (clobber_fprs_p)
12408 if (TARGET_VX)
12409 emit_insn (gen_tbegin_1_z13 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
12410 tdb));
12411 else
12412 emit_insn (gen_tbegin_1 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
12413 tdb));
12415 else
12416 emit_insn (gen_tbegin_nofloat_1 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
12417 tdb));
12419 emit_move_insn (dest, gen_rtx_UNSPEC (SImode,
12420 gen_rtvec (1, gen_rtx_REG (CCRAWmode,
12421 CC_REGNUM)),
12422 UNSPEC_CC_TO_INT));
12423 if (retry != NULL_RTX)
12425 const int CC0 = 1 << 3;
12426 const int CC1 = 1 << 2;
12427 const int CC3 = 1 << 0;
12428 rtx jump;
12429 rtx count = gen_reg_rtx (SImode);
12430 rtx_code_label *leave_label = gen_label_rtx ();
12432 /* Exit for success and permanent failures. */
12433 jump = s390_emit_jump (leave_label,
12434 gen_rtx_EQ (VOIDmode,
12435 gen_rtx_REG (CCRAWmode, CC_REGNUM),
12436 gen_rtx_CONST_INT (VOIDmode, CC0 | CC1 | CC3)));
12437 LABEL_NUSES (leave_label) = 1;
12439 /* CC2 - transient failure. Perform retry with ppa. */
12440 emit_move_insn (count, retry_plus_two);
12441 emit_insn (gen_subsi3 (count, count, retry_reg));
12442 emit_insn (gen_tx_assist (count));
12443 jump = emit_jump_insn (gen_doloop_si64 (retry_label,
12444 retry_reg,
12445 retry_reg));
12446 JUMP_LABEL (jump) = retry_label;
12447 LABEL_NUSES (retry_label) = 1;
12448 emit_label (leave_label);
12453 /* Return the decl for the target specific builtin with the function
12454 code FCODE. */
12456 static tree
12457 s390_builtin_decl (unsigned fcode, bool initialized_p ATTRIBUTE_UNUSED)
12459 if (fcode >= S390_BUILTIN_MAX)
12460 return error_mark_node;
12462 return s390_builtin_decls[fcode];
12465 /* We call mcount before the function prologue. So a profiled leaf
12466 function should stay a leaf function. */
12468 static bool
12469 s390_keep_leaf_when_profiled ()
12471 return true;
12474 /* Output assembly code for the trampoline template to
12475 stdio stream FILE.
12477 On S/390, we use gpr 1 internally in the trampoline code;
12478 gpr 0 is used to hold the static chain. */
12480 static void
12481 s390_asm_trampoline_template (FILE *file)
12483 rtx op[2];
12484 op[0] = gen_rtx_REG (Pmode, 0);
12485 op[1] = gen_rtx_REG (Pmode, 1);
12487 if (TARGET_64BIT)
12489 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
12490 output_asm_insn ("lmg\t%0,%1,14(%1)", op); /* 6 byte */
12491 output_asm_insn ("br\t%1", op); /* 2 byte */
12492 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 10));
12494 else
12496 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
12497 output_asm_insn ("lm\t%0,%1,6(%1)", op); /* 4 byte */
12498 output_asm_insn ("br\t%1", op); /* 2 byte */
12499 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 8));
12503 /* Emit RTL insns to initialize the variable parts of a trampoline.
12504 FNADDR is an RTX for the address of the function's pure code.
12505 CXT is an RTX for the static chain value for the function. */
12507 static void
12508 s390_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
12510 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
12511 rtx mem;
12513 emit_block_move (m_tramp, assemble_trampoline_template (),
12514 GEN_INT (2 * UNITS_PER_LONG), BLOCK_OP_NORMAL);
12516 mem = adjust_address (m_tramp, Pmode, 2 * UNITS_PER_LONG);
12517 emit_move_insn (mem, cxt);
12518 mem = adjust_address (m_tramp, Pmode, 3 * UNITS_PER_LONG);
12519 emit_move_insn (mem, fnaddr);
12522 /* Output assembler code to FILE to increment profiler label # LABELNO
12523 for profiling a function entry. */
12525 void
12526 s390_function_profiler (FILE *file, int labelno)
12528 rtx op[7];
12530 char label[128];
12531 ASM_GENERATE_INTERNAL_LABEL (label, "LP", labelno);
12533 fprintf (file, "# function profiler \n");
12535 op[0] = gen_rtx_REG (Pmode, RETURN_REGNUM);
12536 op[1] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
12537 op[1] = gen_rtx_MEM (Pmode, plus_constant (Pmode, op[1], UNITS_PER_LONG));
12539 op[2] = gen_rtx_REG (Pmode, 1);
12540 op[3] = gen_rtx_SYMBOL_REF (Pmode, label);
12541 SYMBOL_REF_FLAGS (op[3]) = SYMBOL_FLAG_LOCAL;
12543 op[4] = gen_rtx_SYMBOL_REF (Pmode, "_mcount");
12544 if (flag_pic)
12546 op[4] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[4]), UNSPEC_PLT);
12547 op[4] = gen_rtx_CONST (Pmode, op[4]);
12550 if (TARGET_64BIT)
12552 output_asm_insn ("stg\t%0,%1", op);
12553 output_asm_insn ("larl\t%2,%3", op);
12554 output_asm_insn ("brasl\t%0,%4", op);
12555 output_asm_insn ("lg\t%0,%1", op);
12557 else if (TARGET_CPU_ZARCH)
12559 output_asm_insn ("st\t%0,%1", op);
12560 output_asm_insn ("larl\t%2,%3", op);
12561 output_asm_insn ("brasl\t%0,%4", op);
12562 output_asm_insn ("l\t%0,%1", op);
12564 else if (!flag_pic)
12566 op[6] = gen_label_rtx ();
12568 output_asm_insn ("st\t%0,%1", op);
12569 output_asm_insn ("bras\t%2,%l6", op);
12570 output_asm_insn (".long\t%4", op);
12571 output_asm_insn (".long\t%3", op);
12572 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
12573 output_asm_insn ("l\t%0,0(%2)", op);
12574 output_asm_insn ("l\t%2,4(%2)", op);
12575 output_asm_insn ("basr\t%0,%0", op);
12576 output_asm_insn ("l\t%0,%1", op);
12578 else
12580 op[5] = gen_label_rtx ();
12581 op[6] = gen_label_rtx ();
12583 output_asm_insn ("st\t%0,%1", op);
12584 output_asm_insn ("bras\t%2,%l6", op);
12585 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[5]));
12586 output_asm_insn (".long\t%4-%l5", op);
12587 output_asm_insn (".long\t%3-%l5", op);
12588 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
12589 output_asm_insn ("lr\t%0,%2", op);
12590 output_asm_insn ("a\t%0,0(%2)", op);
12591 output_asm_insn ("a\t%2,4(%2)", op);
12592 output_asm_insn ("basr\t%0,%0", op);
12593 output_asm_insn ("l\t%0,%1", op);
12597 /* Encode symbol attributes (local vs. global, tls model) of a SYMBOL_REF
12598 into its SYMBOL_REF_FLAGS. */
12600 static void
12601 s390_encode_section_info (tree decl, rtx rtl, int first)
12603 default_encode_section_info (decl, rtl, first);
12605 if (TREE_CODE (decl) == VAR_DECL)
12607 /* Store the alignment to be able to check if we can use
12608 a larl/load-relative instruction. We only handle the cases
12609 that can go wrong (i.e. no FUNC_DECLs). */
12610 if (DECL_ALIGN (decl) == 0 || DECL_ALIGN (decl) % 16)
12611 SYMBOL_FLAG_SET_NOTALIGN2 (XEXP (rtl, 0));
12612 else if (DECL_ALIGN (decl) % 32)
12613 SYMBOL_FLAG_SET_NOTALIGN4 (XEXP (rtl, 0));
12614 else if (DECL_ALIGN (decl) % 64)
12615 SYMBOL_FLAG_SET_NOTALIGN8 (XEXP (rtl, 0));
12618 /* Literal pool references don't have a decl so they are handled
12619 differently here. We rely on the information in the MEM_ALIGN
12620 entry to decide upon the alignment. */
12621 if (MEM_P (rtl)
12622 && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF
12623 && TREE_CONSTANT_POOL_ADDRESS_P (XEXP (rtl, 0)))
12625 if (MEM_ALIGN (rtl) == 0 || MEM_ALIGN (rtl) % 16)
12626 SYMBOL_FLAG_SET_NOTALIGN2 (XEXP (rtl, 0));
12627 else if (MEM_ALIGN (rtl) % 32)
12628 SYMBOL_FLAG_SET_NOTALIGN4 (XEXP (rtl, 0));
12629 else if (MEM_ALIGN (rtl) % 64)
12630 SYMBOL_FLAG_SET_NOTALIGN8 (XEXP (rtl, 0));
12634 /* Output thunk to FILE that implements a C++ virtual function call (with
12635 multiple inheritance) to FUNCTION. The thunk adjusts the this pointer
12636 by DELTA, and unless VCALL_OFFSET is zero, applies an additional adjustment
12637 stored at VCALL_OFFSET in the vtable whose address is located at offset 0
12638 relative to the resulting this pointer. */
12640 static void
12641 s390_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
12642 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
12643 tree function)
12645 rtx op[10];
12646 int nonlocal = 0;
12648 /* Make sure unwind info is emitted for the thunk if needed. */
12649 final_start_function (emit_barrier (), file, 1);
12651 /* Operand 0 is the target function. */
12652 op[0] = XEXP (DECL_RTL (function), 0);
12653 if (flag_pic && !SYMBOL_REF_LOCAL_P (op[0]))
12655 nonlocal = 1;
12656 op[0] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[0]),
12657 TARGET_64BIT ? UNSPEC_PLT : UNSPEC_GOT);
12658 op[0] = gen_rtx_CONST (Pmode, op[0]);
12661 /* Operand 1 is the 'this' pointer. */
12662 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
12663 op[1] = gen_rtx_REG (Pmode, 3);
12664 else
12665 op[1] = gen_rtx_REG (Pmode, 2);
12667 /* Operand 2 is the delta. */
12668 op[2] = GEN_INT (delta);
12670 /* Operand 3 is the vcall_offset. */
12671 op[3] = GEN_INT (vcall_offset);
12673 /* Operand 4 is the temporary register. */
12674 op[4] = gen_rtx_REG (Pmode, 1);
12676 /* Operands 5 to 8 can be used as labels. */
12677 op[5] = NULL_RTX;
12678 op[6] = NULL_RTX;
12679 op[7] = NULL_RTX;
12680 op[8] = NULL_RTX;
12682 /* Operand 9 can be used for temporary register. */
12683 op[9] = NULL_RTX;
12685 /* Generate code. */
12686 if (TARGET_64BIT)
12688 /* Setup literal pool pointer if required. */
12689 if ((!DISP_IN_RANGE (delta)
12690 && !CONST_OK_FOR_K (delta)
12691 && !CONST_OK_FOR_Os (delta))
12692 || (!DISP_IN_RANGE (vcall_offset)
12693 && !CONST_OK_FOR_K (vcall_offset)
12694 && !CONST_OK_FOR_Os (vcall_offset)))
12696 op[5] = gen_label_rtx ();
12697 output_asm_insn ("larl\t%4,%5", op);
12700 /* Add DELTA to this pointer. */
12701 if (delta)
12703 if (CONST_OK_FOR_J (delta))
12704 output_asm_insn ("la\t%1,%2(%1)", op);
12705 else if (DISP_IN_RANGE (delta))
12706 output_asm_insn ("lay\t%1,%2(%1)", op);
12707 else if (CONST_OK_FOR_K (delta))
12708 output_asm_insn ("aghi\t%1,%2", op);
12709 else if (CONST_OK_FOR_Os (delta))
12710 output_asm_insn ("agfi\t%1,%2", op);
12711 else
12713 op[6] = gen_label_rtx ();
12714 output_asm_insn ("agf\t%1,%6-%5(%4)", op);
12718 /* Perform vcall adjustment. */
12719 if (vcall_offset)
12721 if (DISP_IN_RANGE (vcall_offset))
12723 output_asm_insn ("lg\t%4,0(%1)", op);
12724 output_asm_insn ("ag\t%1,%3(%4)", op);
12726 else if (CONST_OK_FOR_K (vcall_offset))
12728 output_asm_insn ("lghi\t%4,%3", op);
12729 output_asm_insn ("ag\t%4,0(%1)", op);
12730 output_asm_insn ("ag\t%1,0(%4)", op);
12732 else if (CONST_OK_FOR_Os (vcall_offset))
12734 output_asm_insn ("lgfi\t%4,%3", op);
12735 output_asm_insn ("ag\t%4,0(%1)", op);
12736 output_asm_insn ("ag\t%1,0(%4)", op);
12738 else
12740 op[7] = gen_label_rtx ();
12741 output_asm_insn ("llgf\t%4,%7-%5(%4)", op);
12742 output_asm_insn ("ag\t%4,0(%1)", op);
12743 output_asm_insn ("ag\t%1,0(%4)", op);
12747 /* Jump to target. */
12748 output_asm_insn ("jg\t%0", op);
12750 /* Output literal pool if required. */
12751 if (op[5])
12753 output_asm_insn (".align\t4", op);
12754 targetm.asm_out.internal_label (file, "L",
12755 CODE_LABEL_NUMBER (op[5]));
12757 if (op[6])
12759 targetm.asm_out.internal_label (file, "L",
12760 CODE_LABEL_NUMBER (op[6]));
12761 output_asm_insn (".long\t%2", op);
12763 if (op[7])
12765 targetm.asm_out.internal_label (file, "L",
12766 CODE_LABEL_NUMBER (op[7]));
12767 output_asm_insn (".long\t%3", op);
12770 else
12772 /* Setup base pointer if required. */
12773 if (!vcall_offset
12774 || (!DISP_IN_RANGE (delta)
12775 && !CONST_OK_FOR_K (delta)
12776 && !CONST_OK_FOR_Os (delta))
12777 || (!DISP_IN_RANGE (delta)
12778 && !CONST_OK_FOR_K (vcall_offset)
12779 && !CONST_OK_FOR_Os (vcall_offset)))
12781 op[5] = gen_label_rtx ();
12782 output_asm_insn ("basr\t%4,0", op);
12783 targetm.asm_out.internal_label (file, "L",
12784 CODE_LABEL_NUMBER (op[5]));
12787 /* Add DELTA to this pointer. */
12788 if (delta)
12790 if (CONST_OK_FOR_J (delta))
12791 output_asm_insn ("la\t%1,%2(%1)", op);
12792 else if (DISP_IN_RANGE (delta))
12793 output_asm_insn ("lay\t%1,%2(%1)", op);
12794 else if (CONST_OK_FOR_K (delta))
12795 output_asm_insn ("ahi\t%1,%2", op);
12796 else if (CONST_OK_FOR_Os (delta))
12797 output_asm_insn ("afi\t%1,%2", op);
12798 else
12800 op[6] = gen_label_rtx ();
12801 output_asm_insn ("a\t%1,%6-%5(%4)", op);
12805 /* Perform vcall adjustment. */
12806 if (vcall_offset)
12808 if (CONST_OK_FOR_J (vcall_offset))
12810 output_asm_insn ("l\t%4,0(%1)", op);
12811 output_asm_insn ("a\t%1,%3(%4)", op);
12813 else if (DISP_IN_RANGE (vcall_offset))
12815 output_asm_insn ("l\t%4,0(%1)", op);
12816 output_asm_insn ("ay\t%1,%3(%4)", op);
12818 else if (CONST_OK_FOR_K (vcall_offset))
12820 output_asm_insn ("lhi\t%4,%3", op);
12821 output_asm_insn ("a\t%4,0(%1)", op);
12822 output_asm_insn ("a\t%1,0(%4)", op);
12824 else if (CONST_OK_FOR_Os (vcall_offset))
12826 output_asm_insn ("iilf\t%4,%3", op);
12827 output_asm_insn ("a\t%4,0(%1)", op);
12828 output_asm_insn ("a\t%1,0(%4)", op);
12830 else
12832 op[7] = gen_label_rtx ();
12833 output_asm_insn ("l\t%4,%7-%5(%4)", op);
12834 output_asm_insn ("a\t%4,0(%1)", op);
12835 output_asm_insn ("a\t%1,0(%4)", op);
12838 /* We had to clobber the base pointer register.
12839 Re-setup the base pointer (with a different base). */
12840 op[5] = gen_label_rtx ();
12841 output_asm_insn ("basr\t%4,0", op);
12842 targetm.asm_out.internal_label (file, "L",
12843 CODE_LABEL_NUMBER (op[5]));
12846 /* Jump to target. */
12847 op[8] = gen_label_rtx ();
12849 if (!flag_pic)
12850 output_asm_insn ("l\t%4,%8-%5(%4)", op);
12851 else if (!nonlocal)
12852 output_asm_insn ("a\t%4,%8-%5(%4)", op);
12853 /* We cannot call through .plt, since .plt requires %r12 loaded. */
12854 else if (flag_pic == 1)
12856 output_asm_insn ("a\t%4,%8-%5(%4)", op);
12857 output_asm_insn ("l\t%4,%0(%4)", op);
12859 else if (flag_pic == 2)
12861 op[9] = gen_rtx_REG (Pmode, 0);
12862 output_asm_insn ("l\t%9,%8-4-%5(%4)", op);
12863 output_asm_insn ("a\t%4,%8-%5(%4)", op);
12864 output_asm_insn ("ar\t%4,%9", op);
12865 output_asm_insn ("l\t%4,0(%4)", op);
12868 output_asm_insn ("br\t%4", op);
12870 /* Output literal pool. */
12871 output_asm_insn (".align\t4", op);
12873 if (nonlocal && flag_pic == 2)
12874 output_asm_insn (".long\t%0", op);
12875 if (nonlocal)
12877 op[0] = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
12878 SYMBOL_REF_FLAGS (op[0]) = SYMBOL_FLAG_LOCAL;
12881 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[8]));
12882 if (!flag_pic)
12883 output_asm_insn (".long\t%0", op);
12884 else
12885 output_asm_insn (".long\t%0-%5", op);
12887 if (op[6])
12889 targetm.asm_out.internal_label (file, "L",
12890 CODE_LABEL_NUMBER (op[6]));
12891 output_asm_insn (".long\t%2", op);
12893 if (op[7])
12895 targetm.asm_out.internal_label (file, "L",
12896 CODE_LABEL_NUMBER (op[7]));
12897 output_asm_insn (".long\t%3", op);
12900 final_end_function ();
12903 static bool
12904 s390_valid_pointer_mode (machine_mode mode)
12906 return (mode == SImode || (TARGET_64BIT && mode == DImode));
12909 /* Checks whether the given CALL_EXPR would use a caller
12910 saved register. This is used to decide whether sibling call
12911 optimization could be performed on the respective function
12912 call. */
12914 static bool
12915 s390_call_saved_register_used (tree call_expr)
12917 CUMULATIVE_ARGS cum_v;
12918 cumulative_args_t cum;
12919 tree parameter;
12920 machine_mode mode;
12921 tree type;
12922 rtx parm_rtx;
12923 int reg, i;
12925 INIT_CUMULATIVE_ARGS (cum_v, NULL, NULL, 0, 0);
12926 cum = pack_cumulative_args (&cum_v);
12928 for (i = 0; i < call_expr_nargs (call_expr); i++)
12930 parameter = CALL_EXPR_ARG (call_expr, i);
12931 gcc_assert (parameter);
12933 /* For an undeclared variable passed as parameter we will get
12934 an ERROR_MARK node here. */
12935 if (TREE_CODE (parameter) == ERROR_MARK)
12936 return true;
12938 type = TREE_TYPE (parameter);
12939 gcc_assert (type);
12941 mode = TYPE_MODE (type);
12942 gcc_assert (mode);
12944 /* We assume that in the target function all parameters are
12945 named. This only has an impact on vector argument register
12946 usage none of which is call-saved. */
12947 if (pass_by_reference (&cum_v, mode, type, true))
12949 mode = Pmode;
12950 type = build_pointer_type (type);
12953 parm_rtx = s390_function_arg (cum, mode, type, true);
12955 s390_function_arg_advance (cum, mode, type, true);
12957 if (!parm_rtx)
12958 continue;
12960 if (REG_P (parm_rtx))
12962 for (reg = 0;
12963 reg < HARD_REGNO_NREGS (REGNO (parm_rtx), GET_MODE (parm_rtx));
12964 reg++)
12965 if (!call_used_regs[reg + REGNO (parm_rtx)])
12966 return true;
12969 if (GET_CODE (parm_rtx) == PARALLEL)
12971 int i;
12973 for (i = 0; i < XVECLEN (parm_rtx, 0); i++)
12975 rtx r = XEXP (XVECEXP (parm_rtx, 0, i), 0);
12977 gcc_assert (REG_P (r));
12979 for (reg = 0;
12980 reg < HARD_REGNO_NREGS (REGNO (r), GET_MODE (r));
12981 reg++)
12982 if (!call_used_regs[reg + REGNO (r)])
12983 return true;
12988 return false;
12991 /* Return true if the given call expression can be
12992 turned into a sibling call.
12993 DECL holds the declaration of the function to be called whereas
12994 EXP is the call expression itself. */
12996 static bool
12997 s390_function_ok_for_sibcall (tree decl, tree exp)
12999 /* The TPF epilogue uses register 1. */
13000 if (TARGET_TPF_PROFILING)
13001 return false;
13003 /* The 31 bit PLT code uses register 12 (GOT pointer - caller saved)
13004 which would have to be restored before the sibcall. */
13005 if (!TARGET_64BIT && flag_pic && decl && !targetm.binds_local_p (decl))
13006 return false;
13008 /* Register 6 on s390 is available as an argument register but unfortunately
13009 "caller saved". This makes functions needing this register for arguments
13010 not suitable for sibcalls. */
13011 return !s390_call_saved_register_used (exp);
13014 /* Return the fixed registers used for condition codes. */
13016 static bool
13017 s390_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
13019 *p1 = CC_REGNUM;
13020 *p2 = INVALID_REGNUM;
13022 return true;
13025 /* This function is used by the call expanders of the machine description.
13026 It emits the call insn itself together with the necessary operations
13027 to adjust the target address and returns the emitted insn.
13028 ADDR_LOCATION is the target address rtx
13029 TLS_CALL the location of the thread-local symbol
13030 RESULT_REG the register where the result of the call should be stored
13031 RETADDR_REG the register where the return address should be stored
13032 If this parameter is NULL_RTX the call is considered
13033 to be a sibling call. */
13035 rtx_insn *
13036 s390_emit_call (rtx addr_location, rtx tls_call, rtx result_reg,
13037 rtx retaddr_reg)
13039 bool plt_call = false;
13040 rtx_insn *insn;
13041 rtx call;
13042 rtx clobber;
13043 rtvec vec;
13045 /* Direct function calls need special treatment. */
13046 if (GET_CODE (addr_location) == SYMBOL_REF)
13048 /* When calling a global routine in PIC mode, we must
13049 replace the symbol itself with the PLT stub. */
13050 if (flag_pic && !SYMBOL_REF_LOCAL_P (addr_location))
13052 if (TARGET_64BIT || retaddr_reg != NULL_RTX)
13054 addr_location = gen_rtx_UNSPEC (Pmode,
13055 gen_rtvec (1, addr_location),
13056 UNSPEC_PLT);
13057 addr_location = gen_rtx_CONST (Pmode, addr_location);
13058 plt_call = true;
13060 else
13061 /* For -fpic code the PLT entries might use r12 which is
13062 call-saved. Therefore we cannot do a sibcall when
13063 calling directly using a symbol ref. When reaching
13064 this point we decided (in s390_function_ok_for_sibcall)
13065 to do a sibcall for a function pointer but one of the
13066 optimizers was able to get rid of the function pointer
13067 by propagating the symbol ref into the call. This
13068 optimization is illegal for S/390 so we turn the direct
13069 call into a indirect call again. */
13070 addr_location = force_reg (Pmode, addr_location);
13073 /* Unless we can use the bras(l) insn, force the
13074 routine address into a register. */
13075 if (!TARGET_SMALL_EXEC && !TARGET_CPU_ZARCH)
13077 if (flag_pic)
13078 addr_location = legitimize_pic_address (addr_location, 0);
13079 else
13080 addr_location = force_reg (Pmode, addr_location);
13084 /* If it is already an indirect call or the code above moved the
13085 SYMBOL_REF to somewhere else make sure the address can be found in
13086 register 1. */
13087 if (retaddr_reg == NULL_RTX
13088 && GET_CODE (addr_location) != SYMBOL_REF
13089 && !plt_call)
13091 emit_move_insn (gen_rtx_REG (Pmode, SIBCALL_REGNUM), addr_location);
13092 addr_location = gen_rtx_REG (Pmode, SIBCALL_REGNUM);
13095 addr_location = gen_rtx_MEM (QImode, addr_location);
13096 call = gen_rtx_CALL (VOIDmode, addr_location, const0_rtx);
13098 if (result_reg != NULL_RTX)
13099 call = gen_rtx_SET (result_reg, call);
13101 if (retaddr_reg != NULL_RTX)
13103 clobber = gen_rtx_CLOBBER (VOIDmode, retaddr_reg);
13105 if (tls_call != NULL_RTX)
13106 vec = gen_rtvec (3, call, clobber,
13107 gen_rtx_USE (VOIDmode, tls_call));
13108 else
13109 vec = gen_rtvec (2, call, clobber);
13111 call = gen_rtx_PARALLEL (VOIDmode, vec);
13114 insn = emit_call_insn (call);
13116 /* 31-bit PLT stubs and tls calls use the GOT register implicitly. */
13117 if ((!TARGET_64BIT && plt_call) || tls_call != NULL_RTX)
13119 /* s390_function_ok_for_sibcall should
13120 have denied sibcalls in this case. */
13121 gcc_assert (retaddr_reg != NULL_RTX);
13122 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, 12));
13124 return insn;
13127 /* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
13129 static void
13130 s390_conditional_register_usage (void)
13132 int i;
13134 if (flag_pic)
13136 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
13137 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
13139 if (TARGET_CPU_ZARCH)
13141 fixed_regs[BASE_REGNUM] = 0;
13142 call_used_regs[BASE_REGNUM] = 0;
13143 fixed_regs[RETURN_REGNUM] = 0;
13144 call_used_regs[RETURN_REGNUM] = 0;
13146 if (TARGET_64BIT)
13148 for (i = FPR8_REGNUM; i <= FPR15_REGNUM; i++)
13149 call_used_regs[i] = call_really_used_regs[i] = 0;
13151 else
13153 call_used_regs[FPR4_REGNUM] = call_really_used_regs[FPR4_REGNUM] = 0;
13154 call_used_regs[FPR6_REGNUM] = call_really_used_regs[FPR6_REGNUM] = 0;
13157 if (TARGET_SOFT_FLOAT)
13159 for (i = FPR0_REGNUM; i <= FPR15_REGNUM; i++)
13160 call_used_regs[i] = fixed_regs[i] = 1;
13163 /* Disable v16 - v31 for non-vector target. */
13164 if (!TARGET_VX)
13166 for (i = VR16_REGNUM; i <= VR31_REGNUM; i++)
13167 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
13171 /* Corresponding function to eh_return expander. */
13173 static GTY(()) rtx s390_tpf_eh_return_symbol;
13174 void
13175 s390_emit_tpf_eh_return (rtx target)
13177 rtx_insn *insn;
13178 rtx reg, orig_ra;
13180 if (!s390_tpf_eh_return_symbol)
13181 s390_tpf_eh_return_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tpf_eh_return");
13183 reg = gen_rtx_REG (Pmode, 2);
13184 orig_ra = gen_rtx_REG (Pmode, 3);
13186 emit_move_insn (reg, target);
13187 emit_move_insn (orig_ra, get_hard_reg_initial_val (Pmode, RETURN_REGNUM));
13188 insn = s390_emit_call (s390_tpf_eh_return_symbol, NULL_RTX, reg,
13189 gen_rtx_REG (Pmode, RETURN_REGNUM));
13190 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), reg);
13191 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), orig_ra);
13193 emit_move_insn (EH_RETURN_HANDLER_RTX, reg);
13196 /* Rework the prologue/epilogue to avoid saving/restoring
13197 registers unnecessarily. */
13199 static void
13200 s390_optimize_prologue (void)
13202 rtx_insn *insn, *new_insn, *next_insn;
13204 /* Do a final recompute of the frame-related data. */
13205 s390_optimize_register_info ();
13207 /* If all special registers are in fact used, there's nothing we
13208 can do, so no point in walking the insn list. */
13210 if (cfun_frame_layout.first_save_gpr <= BASE_REGNUM
13211 && cfun_frame_layout.last_save_gpr >= BASE_REGNUM
13212 && (TARGET_CPU_ZARCH
13213 || (cfun_frame_layout.first_save_gpr <= RETURN_REGNUM
13214 && cfun_frame_layout.last_save_gpr >= RETURN_REGNUM)))
13215 return;
13217 /* Search for prologue/epilogue insns and replace them. */
13219 for (insn = get_insns (); insn; insn = next_insn)
13221 int first, last, off;
13222 rtx set, base, offset;
13223 rtx pat;
13225 next_insn = NEXT_INSN (insn);
13227 if (! NONJUMP_INSN_P (insn) || ! RTX_FRAME_RELATED_P (insn))
13228 continue;
13230 pat = PATTERN (insn);
13232 /* Remove ldgr/lgdr instructions used for saving and restore
13233 GPRs if possible. */
13234 if (TARGET_Z10)
13236 rtx tmp_pat = pat;
13238 if (INSN_CODE (insn) == CODE_FOR_stack_restore_from_fpr)
13239 tmp_pat = XVECEXP (pat, 0, 0);
13241 if (GET_CODE (tmp_pat) == SET
13242 && GET_MODE (SET_SRC (tmp_pat)) == DImode
13243 && REG_P (SET_SRC (tmp_pat))
13244 && REG_P (SET_DEST (tmp_pat)))
13246 int src_regno = REGNO (SET_SRC (tmp_pat));
13247 int dest_regno = REGNO (SET_DEST (tmp_pat));
13248 int gpr_regno;
13249 int fpr_regno;
13251 if (!((GENERAL_REGNO_P (src_regno)
13252 && FP_REGNO_P (dest_regno))
13253 || (FP_REGNO_P (src_regno)
13254 && GENERAL_REGNO_P (dest_regno))))
13255 continue;
13257 gpr_regno = GENERAL_REGNO_P (src_regno) ? src_regno : dest_regno;
13258 fpr_regno = FP_REGNO_P (src_regno) ? src_regno : dest_regno;
13260 /* GPR must be call-saved, FPR must be call-clobbered. */
13261 if (!call_really_used_regs[fpr_regno]
13262 || call_really_used_regs[gpr_regno])
13263 continue;
13265 /* It must not happen that what we once saved in an FPR now
13266 needs a stack slot. */
13267 gcc_assert (cfun_gpr_save_slot (gpr_regno) != SAVE_SLOT_STACK);
13269 if (cfun_gpr_save_slot (gpr_regno) == SAVE_SLOT_NONE)
13271 remove_insn (insn);
13272 continue;
13277 if (GET_CODE (pat) == PARALLEL
13278 && store_multiple_operation (pat, VOIDmode))
13280 set = XVECEXP (pat, 0, 0);
13281 first = REGNO (SET_SRC (set));
13282 last = first + XVECLEN (pat, 0) - 1;
13283 offset = const0_rtx;
13284 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
13285 off = INTVAL (offset);
13287 if (GET_CODE (base) != REG || off < 0)
13288 continue;
13289 if (cfun_frame_layout.first_save_gpr != -1
13290 && (cfun_frame_layout.first_save_gpr < first
13291 || cfun_frame_layout.last_save_gpr > last))
13292 continue;
13293 if (REGNO (base) != STACK_POINTER_REGNUM
13294 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
13295 continue;
13296 if (first > BASE_REGNUM || last < BASE_REGNUM)
13297 continue;
13299 if (cfun_frame_layout.first_save_gpr != -1)
13301 rtx s_pat = save_gprs (base,
13302 off + (cfun_frame_layout.first_save_gpr
13303 - first) * UNITS_PER_LONG,
13304 cfun_frame_layout.first_save_gpr,
13305 cfun_frame_layout.last_save_gpr);
13306 new_insn = emit_insn_before (s_pat, insn);
13307 INSN_ADDRESSES_NEW (new_insn, -1);
13310 remove_insn (insn);
13311 continue;
13314 if (cfun_frame_layout.first_save_gpr == -1
13315 && GET_CODE (pat) == SET
13316 && GENERAL_REG_P (SET_SRC (pat))
13317 && GET_CODE (SET_DEST (pat)) == MEM)
13319 set = pat;
13320 first = REGNO (SET_SRC (set));
13321 offset = const0_rtx;
13322 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
13323 off = INTVAL (offset);
13325 if (GET_CODE (base) != REG || off < 0)
13326 continue;
13327 if (REGNO (base) != STACK_POINTER_REGNUM
13328 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
13329 continue;
13331 remove_insn (insn);
13332 continue;
13335 if (GET_CODE (pat) == PARALLEL
13336 && load_multiple_operation (pat, VOIDmode))
13338 set = XVECEXP (pat, 0, 0);
13339 first = REGNO (SET_DEST (set));
13340 last = first + XVECLEN (pat, 0) - 1;
13341 offset = const0_rtx;
13342 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
13343 off = INTVAL (offset);
13345 if (GET_CODE (base) != REG || off < 0)
13346 continue;
13348 if (cfun_frame_layout.first_restore_gpr != -1
13349 && (cfun_frame_layout.first_restore_gpr < first
13350 || cfun_frame_layout.last_restore_gpr > last))
13351 continue;
13352 if (REGNO (base) != STACK_POINTER_REGNUM
13353 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
13354 continue;
13355 if (first > BASE_REGNUM || last < BASE_REGNUM)
13356 continue;
13358 if (cfun_frame_layout.first_restore_gpr != -1)
13360 rtx rpat = restore_gprs (base,
13361 off + (cfun_frame_layout.first_restore_gpr
13362 - first) * UNITS_PER_LONG,
13363 cfun_frame_layout.first_restore_gpr,
13364 cfun_frame_layout.last_restore_gpr);
13366 /* Remove REG_CFA_RESTOREs for registers that we no
13367 longer need to save. */
13368 REG_NOTES (rpat) = REG_NOTES (insn);
13369 for (rtx *ptr = &REG_NOTES (rpat); *ptr; )
13370 if (REG_NOTE_KIND (*ptr) == REG_CFA_RESTORE
13371 && ((int) REGNO (XEXP (*ptr, 0))
13372 < cfun_frame_layout.first_restore_gpr))
13373 *ptr = XEXP (*ptr, 1);
13374 else
13375 ptr = &XEXP (*ptr, 1);
13376 new_insn = emit_insn_before (rpat, insn);
13377 RTX_FRAME_RELATED_P (new_insn) = 1;
13378 INSN_ADDRESSES_NEW (new_insn, -1);
13381 remove_insn (insn);
13382 continue;
13385 if (cfun_frame_layout.first_restore_gpr == -1
13386 && GET_CODE (pat) == SET
13387 && GENERAL_REG_P (SET_DEST (pat))
13388 && GET_CODE (SET_SRC (pat)) == MEM)
13390 set = pat;
13391 first = REGNO (SET_DEST (set));
13392 offset = const0_rtx;
13393 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
13394 off = INTVAL (offset);
13396 if (GET_CODE (base) != REG || off < 0)
13397 continue;
13399 if (REGNO (base) != STACK_POINTER_REGNUM
13400 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
13401 continue;
13403 remove_insn (insn);
13404 continue;
13409 /* On z10 and later the dynamic branch prediction must see the
13410 backward jump within a certain windows. If not it falls back to
13411 the static prediction. This function rearranges the loop backward
13412 branch in a way which makes the static prediction always correct.
13413 The function returns true if it added an instruction. */
13414 static bool
13415 s390_fix_long_loop_prediction (rtx_insn *insn)
13417 rtx set = single_set (insn);
13418 rtx code_label, label_ref;
13419 rtx_insn *uncond_jump;
13420 rtx_insn *cur_insn;
13421 rtx tmp;
13422 int distance;
13424 /* This will exclude branch on count and branch on index patterns
13425 since these are correctly statically predicted. */
13426 if (!set
13427 || SET_DEST (set) != pc_rtx
13428 || GET_CODE (SET_SRC(set)) != IF_THEN_ELSE)
13429 return false;
13431 /* Skip conditional returns. */
13432 if (ANY_RETURN_P (XEXP (SET_SRC (set), 1))
13433 && XEXP (SET_SRC (set), 2) == pc_rtx)
13434 return false;
13436 label_ref = (GET_CODE (XEXP (SET_SRC (set), 1)) == LABEL_REF ?
13437 XEXP (SET_SRC (set), 1) : XEXP (SET_SRC (set), 2));
13439 gcc_assert (GET_CODE (label_ref) == LABEL_REF);
13441 code_label = XEXP (label_ref, 0);
13443 if (INSN_ADDRESSES (INSN_UID (code_label)) == -1
13444 || INSN_ADDRESSES (INSN_UID (insn)) == -1
13445 || (INSN_ADDRESSES (INSN_UID (insn))
13446 - INSN_ADDRESSES (INSN_UID (code_label)) < PREDICT_DISTANCE))
13447 return false;
13449 for (distance = 0, cur_insn = PREV_INSN (insn);
13450 distance < PREDICT_DISTANCE - 6;
13451 distance += get_attr_length (cur_insn), cur_insn = PREV_INSN (cur_insn))
13452 if (!cur_insn || JUMP_P (cur_insn) || LABEL_P (cur_insn))
13453 return false;
13455 rtx_code_label *new_label = gen_label_rtx ();
13456 uncond_jump = emit_jump_insn_after (
13457 gen_rtx_SET (pc_rtx,
13458 gen_rtx_LABEL_REF (VOIDmode, code_label)),
13459 insn);
13460 emit_label_after (new_label, uncond_jump);
13462 tmp = XEXP (SET_SRC (set), 1);
13463 XEXP (SET_SRC (set), 1) = XEXP (SET_SRC (set), 2);
13464 XEXP (SET_SRC (set), 2) = tmp;
13465 INSN_CODE (insn) = -1;
13467 XEXP (label_ref, 0) = new_label;
13468 JUMP_LABEL (insn) = new_label;
13469 JUMP_LABEL (uncond_jump) = code_label;
13471 return true;
13474 /* Returns 1 if INSN reads the value of REG for purposes not related
13475 to addressing of memory, and 0 otherwise. */
13476 static int
13477 s390_non_addr_reg_read_p (rtx reg, rtx_insn *insn)
13479 return reg_referenced_p (reg, PATTERN (insn))
13480 && !reg_used_in_mem_p (REGNO (reg), PATTERN (insn));
13483 /* Starting from INSN find_cond_jump looks downwards in the insn
13484 stream for a single jump insn which is the last user of the
13485 condition code set in INSN. */
13486 static rtx_insn *
13487 find_cond_jump (rtx_insn *insn)
13489 for (; insn; insn = NEXT_INSN (insn))
13491 rtx ite, cc;
13493 if (LABEL_P (insn))
13494 break;
13496 if (!JUMP_P (insn))
13498 if (reg_mentioned_p (gen_rtx_REG (CCmode, CC_REGNUM), insn))
13499 break;
13500 continue;
13503 /* This will be triggered by a return. */
13504 if (GET_CODE (PATTERN (insn)) != SET)
13505 break;
13507 gcc_assert (SET_DEST (PATTERN (insn)) == pc_rtx);
13508 ite = SET_SRC (PATTERN (insn));
13510 if (GET_CODE (ite) != IF_THEN_ELSE)
13511 break;
13513 cc = XEXP (XEXP (ite, 0), 0);
13514 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc)))
13515 break;
13517 if (find_reg_note (insn, REG_DEAD, cc))
13518 return insn;
13519 break;
13522 return NULL;
13525 /* Swap the condition in COND and the operands in OP0 and OP1 so that
13526 the semantics does not change. If NULL_RTX is passed as COND the
13527 function tries to find the conditional jump starting with INSN. */
13528 static void
13529 s390_swap_cmp (rtx cond, rtx *op0, rtx *op1, rtx_insn *insn)
13531 rtx tmp = *op0;
13533 if (cond == NULL_RTX)
13535 rtx_insn *jump = find_cond_jump (NEXT_INSN (insn));
13536 rtx set = jump ? single_set (jump) : NULL_RTX;
13538 if (set == NULL_RTX)
13539 return;
13541 cond = XEXP (SET_SRC (set), 0);
13544 *op0 = *op1;
13545 *op1 = tmp;
13546 PUT_CODE (cond, swap_condition (GET_CODE (cond)));
13549 /* On z10, instructions of the compare-and-branch family have the
13550 property to access the register occurring as second operand with
13551 its bits complemented. If such a compare is grouped with a second
13552 instruction that accesses the same register non-complemented, and
13553 if that register's value is delivered via a bypass, then the
13554 pipeline recycles, thereby causing significant performance decline.
13555 This function locates such situations and exchanges the two
13556 operands of the compare. The function return true whenever it
13557 added an insn. */
13558 static bool
13559 s390_z10_optimize_cmp (rtx_insn *insn)
13561 rtx_insn *prev_insn, *next_insn;
13562 bool insn_added_p = false;
13563 rtx cond, *op0, *op1;
13565 if (GET_CODE (PATTERN (insn)) == PARALLEL)
13567 /* Handle compare and branch and branch on count
13568 instructions. */
13569 rtx pattern = single_set (insn);
13571 if (!pattern
13572 || SET_DEST (pattern) != pc_rtx
13573 || GET_CODE (SET_SRC (pattern)) != IF_THEN_ELSE)
13574 return false;
13576 cond = XEXP (SET_SRC (pattern), 0);
13577 op0 = &XEXP (cond, 0);
13578 op1 = &XEXP (cond, 1);
13580 else if (GET_CODE (PATTERN (insn)) == SET)
13582 rtx src, dest;
13584 /* Handle normal compare instructions. */
13585 src = SET_SRC (PATTERN (insn));
13586 dest = SET_DEST (PATTERN (insn));
13588 if (!REG_P (dest)
13589 || !CC_REGNO_P (REGNO (dest))
13590 || GET_CODE (src) != COMPARE)
13591 return false;
13593 /* s390_swap_cmp will try to find the conditional
13594 jump when passing NULL_RTX as condition. */
13595 cond = NULL_RTX;
13596 op0 = &XEXP (src, 0);
13597 op1 = &XEXP (src, 1);
13599 else
13600 return false;
13602 if (!REG_P (*op0) || !REG_P (*op1))
13603 return false;
13605 if (GET_MODE_CLASS (GET_MODE (*op0)) != MODE_INT)
13606 return false;
13608 /* Swap the COMPARE arguments and its mask if there is a
13609 conflicting access in the previous insn. */
13610 prev_insn = prev_active_insn (insn);
13611 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
13612 && reg_referenced_p (*op1, PATTERN (prev_insn)))
13613 s390_swap_cmp (cond, op0, op1, insn);
13615 /* Check if there is a conflict with the next insn. If there
13616 was no conflict with the previous insn, then swap the
13617 COMPARE arguments and its mask. If we already swapped
13618 the operands, or if swapping them would cause a conflict
13619 with the previous insn, issue a NOP after the COMPARE in
13620 order to separate the two instuctions. */
13621 next_insn = next_active_insn (insn);
13622 if (next_insn != NULL_RTX && INSN_P (next_insn)
13623 && s390_non_addr_reg_read_p (*op1, next_insn))
13625 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
13626 && s390_non_addr_reg_read_p (*op0, prev_insn))
13628 if (REGNO (*op1) == 0)
13629 emit_insn_after (gen_nop1 (), insn);
13630 else
13631 emit_insn_after (gen_nop (), insn);
13632 insn_added_p = true;
13634 else
13635 s390_swap_cmp (cond, op0, op1, insn);
13637 return insn_added_p;
13640 /* Number of INSNs to be scanned backward in the last BB of the loop
13641 and forward in the first BB of the loop. This usually should be a
13642 bit more than the number of INSNs which could go into one
13643 group. */
13644 #define S390_OSC_SCAN_INSN_NUM 5
13646 /* Scan LOOP for static OSC collisions and return true if a osc_break
13647 should be issued for this loop. */
13648 static bool
13649 s390_adjust_loop_scan_osc (struct loop* loop)
13652 HARD_REG_SET modregs, newregs;
13653 rtx_insn *insn, *store_insn = NULL;
13654 rtx set;
13655 struct s390_address addr_store, addr_load;
13656 subrtx_iterator::array_type array;
13657 int insn_count;
13659 CLEAR_HARD_REG_SET (modregs);
13661 insn_count = 0;
13662 FOR_BB_INSNS_REVERSE (loop->latch, insn)
13664 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
13665 continue;
13667 insn_count++;
13668 if (insn_count > S390_OSC_SCAN_INSN_NUM)
13669 return false;
13671 find_all_hard_reg_sets (insn, &newregs, true);
13672 IOR_HARD_REG_SET (modregs, newregs);
13674 set = single_set (insn);
13675 if (!set)
13676 continue;
13678 if (MEM_P (SET_DEST (set))
13679 && s390_decompose_address (XEXP (SET_DEST (set), 0), &addr_store))
13681 store_insn = insn;
13682 break;
13686 if (store_insn == NULL_RTX)
13687 return false;
13689 insn_count = 0;
13690 FOR_BB_INSNS (loop->header, insn)
13692 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
13693 continue;
13695 if (insn == store_insn)
13696 return false;
13698 insn_count++;
13699 if (insn_count > S390_OSC_SCAN_INSN_NUM)
13700 return false;
13702 find_all_hard_reg_sets (insn, &newregs, true);
13703 IOR_HARD_REG_SET (modregs, newregs);
13705 set = single_set (insn);
13706 if (!set)
13707 continue;
13709 /* An intermediate store disrupts static OSC checking
13710 anyway. */
13711 if (MEM_P (SET_DEST (set))
13712 && s390_decompose_address (XEXP (SET_DEST (set), 0), NULL))
13713 return false;
13715 FOR_EACH_SUBRTX (iter, array, SET_SRC (set), NONCONST)
13716 if (MEM_P (*iter)
13717 && s390_decompose_address (XEXP (*iter, 0), &addr_load)
13718 && rtx_equal_p (addr_load.base, addr_store.base)
13719 && rtx_equal_p (addr_load.indx, addr_store.indx)
13720 && rtx_equal_p (addr_load.disp, addr_store.disp))
13722 if ((addr_load.base != NULL_RTX
13723 && TEST_HARD_REG_BIT (modregs, REGNO (addr_load.base)))
13724 || (addr_load.indx != NULL_RTX
13725 && TEST_HARD_REG_BIT (modregs, REGNO (addr_load.indx))))
13726 return true;
13729 return false;
13732 /* Look for adjustments which can be done on simple innermost
13733 loops. */
13734 static void
13735 s390_adjust_loops ()
13737 struct loop *loop = NULL;
13739 df_analyze ();
13740 compute_bb_for_insn ();
13742 /* Find the loops. */
13743 loop_optimizer_init (AVOID_CFG_MODIFICATIONS);
13745 FOR_EACH_LOOP (loop, LI_ONLY_INNERMOST)
13747 if (dump_file)
13749 flow_loop_dump (loop, dump_file, NULL, 0);
13750 fprintf (dump_file, ";; OSC loop scan Loop: ");
13752 if (loop->latch == NULL
13753 || pc_set (BB_END (loop->latch)) == NULL_RTX
13754 || !s390_adjust_loop_scan_osc (loop))
13756 if (dump_file)
13758 if (loop->latch == NULL)
13759 fprintf (dump_file, " muliple backward jumps\n");
13760 else
13762 fprintf (dump_file, " header insn: %d latch insn: %d ",
13763 INSN_UID (BB_HEAD (loop->header)),
13764 INSN_UID (BB_END (loop->latch)));
13765 if (pc_set (BB_END (loop->latch)) == NULL_RTX)
13766 fprintf (dump_file, " loop does not end with jump\n");
13767 else
13768 fprintf (dump_file, " not instrumented\n");
13772 else
13774 rtx_insn *new_insn;
13776 if (dump_file)
13777 fprintf (dump_file, " adding OSC break insn: ");
13778 new_insn = emit_insn_before (gen_osc_break (),
13779 BB_END (loop->latch));
13780 INSN_ADDRESSES_NEW (new_insn, -1);
13784 loop_optimizer_finalize ();
13786 df_finish_pass (false);
13789 /* Perform machine-dependent processing. */
13791 static void
13792 s390_reorg (void)
13794 bool pool_overflow = false;
13795 int hw_before, hw_after;
13797 if (s390_tune == PROCESSOR_2964_Z13)
13798 s390_adjust_loops ();
13800 /* Make sure all splits have been performed; splits after
13801 machine_dependent_reorg might confuse insn length counts. */
13802 split_all_insns_noflow ();
13804 /* Install the main literal pool and the associated base
13805 register load insns.
13807 In addition, there are two problematic situations we need
13808 to correct:
13810 - the literal pool might be > 4096 bytes in size, so that
13811 some of its elements cannot be directly accessed
13813 - a branch target might be > 64K away from the branch, so that
13814 it is not possible to use a PC-relative instruction.
13816 To fix those, we split the single literal pool into multiple
13817 pool chunks, reloading the pool base register at various
13818 points throughout the function to ensure it always points to
13819 the pool chunk the following code expects, and / or replace
13820 PC-relative branches by absolute branches.
13822 However, the two problems are interdependent: splitting the
13823 literal pool can move a branch further away from its target,
13824 causing the 64K limit to overflow, and on the other hand,
13825 replacing a PC-relative branch by an absolute branch means
13826 we need to put the branch target address into the literal
13827 pool, possibly causing it to overflow.
13829 So, we loop trying to fix up both problems until we manage
13830 to satisfy both conditions at the same time. Note that the
13831 loop is guaranteed to terminate as every pass of the loop
13832 strictly decreases the total number of PC-relative branches
13833 in the function. (This is not completely true as there
13834 might be branch-over-pool insns introduced by chunkify_start.
13835 Those never need to be split however.) */
13837 for (;;)
13839 struct constant_pool *pool = NULL;
13841 /* Collect the literal pool. */
13842 if (!pool_overflow)
13844 pool = s390_mainpool_start ();
13845 if (!pool)
13846 pool_overflow = true;
13849 /* If literal pool overflowed, start to chunkify it. */
13850 if (pool_overflow)
13851 pool = s390_chunkify_start ();
13853 /* Split out-of-range branches. If this has created new
13854 literal pool entries, cancel current chunk list and
13855 recompute it. zSeries machines have large branch
13856 instructions, so we never need to split a branch. */
13857 if (!TARGET_CPU_ZARCH && s390_split_branches ())
13859 if (pool_overflow)
13860 s390_chunkify_cancel (pool);
13861 else
13862 s390_mainpool_cancel (pool);
13864 continue;
13867 /* If we made it up to here, both conditions are satisfied.
13868 Finish up literal pool related changes. */
13869 if (pool_overflow)
13870 s390_chunkify_finish (pool);
13871 else
13872 s390_mainpool_finish (pool);
13874 /* We're done splitting branches. */
13875 cfun->machine->split_branches_pending_p = false;
13876 break;
13879 /* Generate out-of-pool execute target insns. */
13880 if (TARGET_CPU_ZARCH)
13882 rtx_insn *insn, *target;
13883 rtx label;
13885 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
13887 label = s390_execute_label (insn);
13888 if (!label)
13889 continue;
13891 gcc_assert (label != const0_rtx);
13893 target = emit_label (XEXP (label, 0));
13894 INSN_ADDRESSES_NEW (target, -1);
13896 target = emit_insn (s390_execute_target (insn));
13897 INSN_ADDRESSES_NEW (target, -1);
13901 /* Try to optimize prologue and epilogue further. */
13902 s390_optimize_prologue ();
13904 /* Walk over the insns and do some >=z10 specific changes. */
13905 if (s390_tune >= PROCESSOR_2097_Z10)
13907 rtx_insn *insn;
13908 bool insn_added_p = false;
13910 /* The insn lengths and addresses have to be up to date for the
13911 following manipulations. */
13912 shorten_branches (get_insns ());
13914 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
13916 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
13917 continue;
13919 if (JUMP_P (insn))
13920 insn_added_p |= s390_fix_long_loop_prediction (insn);
13922 if ((GET_CODE (PATTERN (insn)) == PARALLEL
13923 || GET_CODE (PATTERN (insn)) == SET)
13924 && s390_tune == PROCESSOR_2097_Z10)
13925 insn_added_p |= s390_z10_optimize_cmp (insn);
13928 /* Adjust branches if we added new instructions. */
13929 if (insn_added_p)
13930 shorten_branches (get_insns ());
13933 s390_function_num_hotpatch_hw (current_function_decl, &hw_before, &hw_after);
13934 if (hw_after > 0)
13936 rtx_insn *insn;
13938 /* Insert NOPs for hotpatching. */
13939 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
13940 /* Emit NOPs
13941 1. inside the area covered by debug information to allow setting
13942 breakpoints at the NOPs,
13943 2. before any insn which results in an asm instruction,
13944 3. before in-function labels to avoid jumping to the NOPs, for
13945 example as part of a loop,
13946 4. before any barrier in case the function is completely empty
13947 (__builtin_unreachable ()) and has neither internal labels nor
13948 active insns.
13950 if (active_insn_p (insn) || BARRIER_P (insn) || LABEL_P (insn))
13951 break;
13952 /* Output a series of NOPs before the first active insn. */
13953 while (insn && hw_after > 0)
13955 if (hw_after >= 3 && TARGET_CPU_ZARCH)
13957 emit_insn_before (gen_nop_6_byte (), insn);
13958 hw_after -= 3;
13960 else if (hw_after >= 2)
13962 emit_insn_before (gen_nop_4_byte (), insn);
13963 hw_after -= 2;
13965 else
13967 emit_insn_before (gen_nop_2_byte (), insn);
13968 hw_after -= 1;
13974 /* Return true if INSN is a fp load insn writing register REGNO. */
13975 static inline bool
13976 s390_fpload_toreg (rtx_insn *insn, unsigned int regno)
13978 rtx set;
13979 enum attr_type flag = s390_safe_attr_type (insn);
13981 if (flag != TYPE_FLOADSF && flag != TYPE_FLOADDF)
13982 return false;
13984 set = single_set (insn);
13986 if (set == NULL_RTX)
13987 return false;
13989 if (!REG_P (SET_DEST (set)) || !MEM_P (SET_SRC (set)))
13990 return false;
13992 if (REGNO (SET_DEST (set)) != regno)
13993 return false;
13995 return true;
13998 /* This value describes the distance to be avoided between an
13999 aritmetic fp instruction and an fp load writing the same register.
14000 Z10_EARLYLOAD_DISTANCE - 1 as well as Z10_EARLYLOAD_DISTANCE + 1 is
14001 fine but the exact value has to be avoided. Otherwise the FP
14002 pipeline will throw an exception causing a major penalty. */
14003 #define Z10_EARLYLOAD_DISTANCE 7
14005 /* Rearrange the ready list in order to avoid the situation described
14006 for Z10_EARLYLOAD_DISTANCE. A problematic load instruction is
14007 moved to the very end of the ready list. */
14008 static void
14009 s390_z10_prevent_earlyload_conflicts (rtx_insn **ready, int *nready_p)
14011 unsigned int regno;
14012 int nready = *nready_p;
14013 rtx_insn *tmp;
14014 int i;
14015 rtx_insn *insn;
14016 rtx set;
14017 enum attr_type flag;
14018 int distance;
14020 /* Skip DISTANCE - 1 active insns. */
14021 for (insn = last_scheduled_insn, distance = Z10_EARLYLOAD_DISTANCE - 1;
14022 distance > 0 && insn != NULL_RTX;
14023 distance--, insn = prev_active_insn (insn))
14024 if (CALL_P (insn) || JUMP_P (insn))
14025 return;
14027 if (insn == NULL_RTX)
14028 return;
14030 set = single_set (insn);
14032 if (set == NULL_RTX || !REG_P (SET_DEST (set))
14033 || GET_MODE_CLASS (GET_MODE (SET_DEST (set))) != MODE_FLOAT)
14034 return;
14036 flag = s390_safe_attr_type (insn);
14038 if (flag == TYPE_FLOADSF || flag == TYPE_FLOADDF)
14039 return;
14041 regno = REGNO (SET_DEST (set));
14042 i = nready - 1;
14044 while (!s390_fpload_toreg (ready[i], regno) && i > 0)
14045 i--;
14047 if (!i)
14048 return;
14050 tmp = ready[i];
14051 memmove (&ready[1], &ready[0], sizeof (rtx_insn *) * i);
14052 ready[0] = tmp;
14056 /* The s390_sched_state variable tracks the state of the current or
14057 the last instruction group.
14059 0,1,2 number of instructions scheduled in the current group
14060 3 the last group is complete - normal insns
14061 4 the last group was a cracked/expanded insn */
14063 static int s390_sched_state;
14065 #define S390_SCHED_STATE_NORMAL 3
14066 #define S390_SCHED_STATE_CRACKED 4
14068 #define S390_SCHED_ATTR_MASK_CRACKED 0x1
14069 #define S390_SCHED_ATTR_MASK_EXPANDED 0x2
14070 #define S390_SCHED_ATTR_MASK_ENDGROUP 0x4
14071 #define S390_SCHED_ATTR_MASK_GROUPALONE 0x8
14073 static unsigned int
14074 s390_get_sched_attrmask (rtx_insn *insn)
14076 unsigned int mask = 0;
14078 switch (s390_tune)
14080 case PROCESSOR_2827_ZEC12:
14081 if (get_attr_zEC12_cracked (insn))
14082 mask |= S390_SCHED_ATTR_MASK_CRACKED;
14083 if (get_attr_zEC12_expanded (insn))
14084 mask |= S390_SCHED_ATTR_MASK_EXPANDED;
14085 if (get_attr_zEC12_endgroup (insn))
14086 mask |= S390_SCHED_ATTR_MASK_ENDGROUP;
14087 if (get_attr_zEC12_groupalone (insn))
14088 mask |= S390_SCHED_ATTR_MASK_GROUPALONE;
14089 break;
14090 case PROCESSOR_2964_Z13:
14091 if (get_attr_z13_cracked (insn))
14092 mask |= S390_SCHED_ATTR_MASK_CRACKED;
14093 if (get_attr_z13_expanded (insn))
14094 mask |= S390_SCHED_ATTR_MASK_EXPANDED;
14095 if (get_attr_z13_endgroup (insn))
14096 mask |= S390_SCHED_ATTR_MASK_ENDGROUP;
14097 if (get_attr_z13_groupalone (insn))
14098 mask |= S390_SCHED_ATTR_MASK_GROUPALONE;
14099 break;
14100 default:
14101 gcc_unreachable ();
14103 return mask;
14106 static unsigned int
14107 s390_get_unit_mask (rtx_insn *insn, int *units)
14109 unsigned int mask = 0;
14111 switch (s390_tune)
14113 case PROCESSOR_2964_Z13:
14114 *units = 3;
14115 if (get_attr_z13_unit_lsu (insn))
14116 mask |= 1 << 0;
14117 if (get_attr_z13_unit_fxu (insn))
14118 mask |= 1 << 1;
14119 if (get_attr_z13_unit_vfu (insn))
14120 mask |= 1 << 2;
14121 break;
14122 default:
14123 gcc_unreachable ();
14125 return mask;
14128 /* Return the scheduling score for INSN. The higher the score the
14129 better. The score is calculated from the OOO scheduling attributes
14130 of INSN and the scheduling state s390_sched_state. */
14131 static int
14132 s390_sched_score (rtx_insn *insn)
14134 unsigned int mask = s390_get_sched_attrmask (insn);
14135 int score = 0;
14137 switch (s390_sched_state)
14139 case 0:
14140 /* Try to put insns into the first slot which would otherwise
14141 break a group. */
14142 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) != 0
14143 || (mask & S390_SCHED_ATTR_MASK_EXPANDED) != 0)
14144 score += 5;
14145 if ((mask & S390_SCHED_ATTR_MASK_GROUPALONE) != 0)
14146 score += 10;
14147 /* fallthrough */
14148 case 1:
14149 /* Prefer not cracked insns while trying to put together a
14150 group. */
14151 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) == 0
14152 && (mask & S390_SCHED_ATTR_MASK_EXPANDED) == 0
14153 && (mask & S390_SCHED_ATTR_MASK_GROUPALONE) == 0)
14154 score += 10;
14155 if ((mask & S390_SCHED_ATTR_MASK_ENDGROUP) == 0)
14156 score += 5;
14157 break;
14158 case 2:
14159 /* Prefer not cracked insns while trying to put together a
14160 group. */
14161 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) == 0
14162 && (mask & S390_SCHED_ATTR_MASK_EXPANDED) == 0
14163 && (mask & S390_SCHED_ATTR_MASK_GROUPALONE) == 0)
14164 score += 10;
14165 /* Prefer endgroup insns in the last slot. */
14166 if ((mask & S390_SCHED_ATTR_MASK_ENDGROUP) != 0)
14167 score += 10;
14168 break;
14169 case S390_SCHED_STATE_NORMAL:
14170 /* Prefer not cracked insns if the last was not cracked. */
14171 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) == 0
14172 && (mask & S390_SCHED_ATTR_MASK_EXPANDED) == 0)
14173 score += 5;
14174 if ((mask & S390_SCHED_ATTR_MASK_GROUPALONE) != 0)
14175 score += 10;
14176 break;
14177 case S390_SCHED_STATE_CRACKED:
14178 /* Try to keep cracked insns together to prevent them from
14179 interrupting groups. */
14180 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) != 0
14181 || (mask & S390_SCHED_ATTR_MASK_EXPANDED) != 0)
14182 score += 5;
14183 break;
14186 if (s390_tune == PROCESSOR_2964_Z13)
14188 int units, i;
14189 unsigned unit_mask, m = 1;
14191 unit_mask = s390_get_unit_mask (insn, &units);
14192 gcc_assert (units <= MAX_SCHED_UNITS);
14194 /* Add a score in range 0..MAX_SCHED_MIX_SCORE depending on how long
14195 ago the last insn of this unit type got scheduled. This is
14196 supposed to help providing a proper instruction mix to the
14197 CPU. */
14198 for (i = 0; i < units; i++, m <<= 1)
14199 if (m & unit_mask)
14200 score += (last_scheduled_unit_distance[i] * MAX_SCHED_MIX_SCORE /
14201 MAX_SCHED_MIX_DISTANCE);
14203 return score;
14206 /* This function is called via hook TARGET_SCHED_REORDER before
14207 issuing one insn from list READY which contains *NREADYP entries.
14208 For target z10 it reorders load instructions to avoid early load
14209 conflicts in the floating point pipeline */
14210 static int
14211 s390_sched_reorder (FILE *file, int verbose,
14212 rtx_insn **ready, int *nreadyp, int clock ATTRIBUTE_UNUSED)
14214 if (s390_tune == PROCESSOR_2097_Z10
14215 && reload_completed
14216 && *nreadyp > 1)
14217 s390_z10_prevent_earlyload_conflicts (ready, nreadyp);
14219 if (s390_tune >= PROCESSOR_2827_ZEC12
14220 && reload_completed
14221 && *nreadyp > 1)
14223 int i;
14224 int last_index = *nreadyp - 1;
14225 int max_index = -1;
14226 int max_score = -1;
14227 rtx_insn *tmp;
14229 /* Just move the insn with the highest score to the top (the
14230 end) of the list. A full sort is not needed since a conflict
14231 in the hazard recognition cannot happen. So the top insn in
14232 the ready list will always be taken. */
14233 for (i = last_index; i >= 0; i--)
14235 int score;
14237 if (recog_memoized (ready[i]) < 0)
14238 continue;
14240 score = s390_sched_score (ready[i]);
14241 if (score > max_score)
14243 max_score = score;
14244 max_index = i;
14248 if (max_index != -1)
14250 if (max_index != last_index)
14252 tmp = ready[max_index];
14253 ready[max_index] = ready[last_index];
14254 ready[last_index] = tmp;
14256 if (verbose > 5)
14257 fprintf (file,
14258 ";;\t\tBACKEND: move insn %d to the top of list\n",
14259 INSN_UID (ready[last_index]));
14261 else if (verbose > 5)
14262 fprintf (file,
14263 ";;\t\tBACKEND: best insn %d already on top\n",
14264 INSN_UID (ready[last_index]));
14267 if (verbose > 5)
14269 fprintf (file, "ready list ooo attributes - sched state: %d\n",
14270 s390_sched_state);
14272 for (i = last_index; i >= 0; i--)
14274 unsigned int sched_mask;
14275 rtx_insn *insn = ready[i];
14277 if (recog_memoized (insn) < 0)
14278 continue;
14280 sched_mask = s390_get_sched_attrmask (insn);
14281 fprintf (file, ";;\t\tBACKEND: insn %d score: %d: ",
14282 INSN_UID (insn),
14283 s390_sched_score (insn));
14284 #define PRINT_SCHED_ATTR(M, ATTR) fprintf (file, "%s ",\
14285 ((M) & sched_mask) ? #ATTR : "");
14286 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_CRACKED, cracked);
14287 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_EXPANDED, expanded);
14288 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_ENDGROUP, endgroup);
14289 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_GROUPALONE, groupalone);
14290 #undef PRINT_SCHED_ATTR
14291 if (s390_tune == PROCESSOR_2964_Z13)
14293 unsigned int unit_mask, m = 1;
14294 int units, j;
14296 unit_mask = s390_get_unit_mask (insn, &units);
14297 fprintf (file, "(units:");
14298 for (j = 0; j < units; j++, m <<= 1)
14299 if (m & unit_mask)
14300 fprintf (file, " u%d", j);
14301 fprintf (file, ")");
14303 fprintf (file, "\n");
14308 return s390_issue_rate ();
14312 /* This function is called via hook TARGET_SCHED_VARIABLE_ISSUE after
14313 the scheduler has issued INSN. It stores the last issued insn into
14314 last_scheduled_insn in order to make it available for
14315 s390_sched_reorder. */
14316 static int
14317 s390_sched_variable_issue (FILE *file, int verbose, rtx_insn *insn, int more)
14319 last_scheduled_insn = insn;
14321 if (s390_tune >= PROCESSOR_2827_ZEC12
14322 && reload_completed
14323 && recog_memoized (insn) >= 0)
14325 unsigned int mask = s390_get_sched_attrmask (insn);
14327 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) != 0
14328 || (mask & S390_SCHED_ATTR_MASK_EXPANDED) != 0)
14329 s390_sched_state = S390_SCHED_STATE_CRACKED;
14330 else if ((mask & S390_SCHED_ATTR_MASK_ENDGROUP) != 0
14331 || (mask & S390_SCHED_ATTR_MASK_GROUPALONE) != 0)
14332 s390_sched_state = S390_SCHED_STATE_NORMAL;
14333 else
14335 /* Only normal insns are left (mask == 0). */
14336 switch (s390_sched_state)
14338 case 0:
14339 case 1:
14340 case 2:
14341 case S390_SCHED_STATE_NORMAL:
14342 if (s390_sched_state == S390_SCHED_STATE_NORMAL)
14343 s390_sched_state = 1;
14344 else
14345 s390_sched_state++;
14347 break;
14348 case S390_SCHED_STATE_CRACKED:
14349 s390_sched_state = S390_SCHED_STATE_NORMAL;
14350 break;
14354 if (s390_tune == PROCESSOR_2964_Z13)
14356 int units, i;
14357 unsigned unit_mask, m = 1;
14359 unit_mask = s390_get_unit_mask (insn, &units);
14360 gcc_assert (units <= MAX_SCHED_UNITS);
14362 for (i = 0; i < units; i++, m <<= 1)
14363 if (m & unit_mask)
14364 last_scheduled_unit_distance[i] = 0;
14365 else if (last_scheduled_unit_distance[i] < MAX_SCHED_MIX_DISTANCE)
14366 last_scheduled_unit_distance[i]++;
14369 if (verbose > 5)
14371 unsigned int sched_mask;
14373 sched_mask = s390_get_sched_attrmask (insn);
14375 fprintf (file, ";;\t\tBACKEND: insn %d: ", INSN_UID (insn));
14376 #define PRINT_SCHED_ATTR(M, ATTR) fprintf (file, "%s ", ((M) & sched_mask) ? #ATTR : "");
14377 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_CRACKED, cracked);
14378 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_EXPANDED, expanded);
14379 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_ENDGROUP, endgroup);
14380 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_GROUPALONE, groupalone);
14381 #undef PRINT_SCHED_ATTR
14383 if (s390_tune == PROCESSOR_2964_Z13)
14385 unsigned int unit_mask, m = 1;
14386 int units, j;
14388 unit_mask = s390_get_unit_mask (insn, &units);
14389 fprintf (file, "(units:");
14390 for (j = 0; j < units; j++, m <<= 1)
14391 if (m & unit_mask)
14392 fprintf (file, " %d", j);
14393 fprintf (file, ")");
14395 fprintf (file, " sched state: %d\n", s390_sched_state);
14397 if (s390_tune == PROCESSOR_2964_Z13)
14399 int units, j;
14401 s390_get_unit_mask (insn, &units);
14403 fprintf (file, ";;\t\tBACKEND: units unused for: ");
14404 for (j = 0; j < units; j++)
14405 fprintf (file, "%d:%d ", j, last_scheduled_unit_distance[j]);
14406 fprintf (file, "\n");
14411 if (GET_CODE (PATTERN (insn)) != USE
14412 && GET_CODE (PATTERN (insn)) != CLOBBER)
14413 return more - 1;
14414 else
14415 return more;
14418 static void
14419 s390_sched_init (FILE *file ATTRIBUTE_UNUSED,
14420 int verbose ATTRIBUTE_UNUSED,
14421 int max_ready ATTRIBUTE_UNUSED)
14423 last_scheduled_insn = NULL;
14424 memset (last_scheduled_unit_distance, 0, MAX_SCHED_UNITS * sizeof (int));
14425 s390_sched_state = 0;
14428 /* This target hook implementation for TARGET_LOOP_UNROLL_ADJUST calculates
14429 a new number struct loop *loop should be unrolled if tuned for cpus with
14430 a built-in stride prefetcher.
14431 The loop is analyzed for memory accesses by calling check_dpu for
14432 each rtx of the loop. Depending on the loop_depth and the amount of
14433 memory accesses a new number <=nunroll is returned to improve the
14434 behavior of the hardware prefetch unit. */
14435 static unsigned
14436 s390_loop_unroll_adjust (unsigned nunroll, struct loop *loop)
14438 basic_block *bbs;
14439 rtx_insn *insn;
14440 unsigned i;
14441 unsigned mem_count = 0;
14443 if (s390_tune < PROCESSOR_2097_Z10)
14444 return nunroll;
14446 /* Count the number of memory references within the loop body. */
14447 bbs = get_loop_body (loop);
14448 subrtx_iterator::array_type array;
14449 for (i = 0; i < loop->num_nodes; i++)
14450 FOR_BB_INSNS (bbs[i], insn)
14451 if (INSN_P (insn) && INSN_CODE (insn) != -1)
14452 FOR_EACH_SUBRTX (iter, array, PATTERN (insn), NONCONST)
14453 if (MEM_P (*iter))
14454 mem_count += 1;
14455 free (bbs);
14457 /* Prevent division by zero, and we do not need to adjust nunroll in this case. */
14458 if (mem_count == 0)
14459 return nunroll;
14461 switch (loop_depth(loop))
14463 case 1:
14464 return MIN (nunroll, 28 / mem_count);
14465 case 2:
14466 return MIN (nunroll, 22 / mem_count);
14467 default:
14468 return MIN (nunroll, 16 / mem_count);
14472 /* Restore the current options. This is a hook function and also called
14473 internally. */
14475 static void
14476 s390_function_specific_restore (struct gcc_options *opts,
14477 struct cl_target_option *ptr ATTRIBUTE_UNUSED)
14479 opts->x_s390_cost_pointer = (long)processor_table[opts->x_s390_tune].cost;
14482 static void
14483 s390_option_override_internal (bool main_args_p,
14484 struct gcc_options *opts,
14485 const struct gcc_options *opts_set)
14487 const char *prefix;
14488 const char *suffix;
14490 /* Set up prefix/suffix so the error messages refer to either the command
14491 line argument, or the attribute(target). */
14492 if (main_args_p)
14494 prefix = "-m";
14495 suffix = "";
14497 else
14499 prefix = "option(\"";
14500 suffix = "\")";
14504 /* Architecture mode defaults according to ABI. */
14505 if (!(opts_set->x_target_flags & MASK_ZARCH))
14507 if (TARGET_64BIT)
14508 opts->x_target_flags |= MASK_ZARCH;
14509 else
14510 opts->x_target_flags &= ~MASK_ZARCH;
14513 /* Set the march default in case it hasn't been specified on cmdline. */
14514 if (!opts_set->x_s390_arch)
14515 opts->x_s390_arch = PROCESSOR_2064_Z900;
14516 else if (opts->x_s390_arch == PROCESSOR_9672_G5
14517 || opts->x_s390_arch == PROCESSOR_9672_G6)
14518 warning (OPT_Wdeprecated, "%sarch=%s%s is deprecated and will be removed "
14519 "in future releases; use at least %sarch=z900%s",
14520 prefix, opts->x_s390_arch == PROCESSOR_9672_G5 ? "g5" : "g6",
14521 suffix, prefix, suffix);
14523 opts->x_s390_arch_flags = processor_flags_table[(int) opts->x_s390_arch];
14525 /* Determine processor to tune for. */
14526 if (!opts_set->x_s390_tune)
14527 opts->x_s390_tune = opts->x_s390_arch;
14528 else if (opts->x_s390_tune == PROCESSOR_9672_G5
14529 || opts->x_s390_tune == PROCESSOR_9672_G6)
14530 warning (OPT_Wdeprecated, "%stune=%s%s is deprecated and will be removed "
14531 "in future releases; use at least %stune=z900%s",
14532 prefix, opts->x_s390_tune == PROCESSOR_9672_G5 ? "g5" : "g6",
14533 suffix, prefix, suffix);
14535 opts->x_s390_tune_flags = processor_flags_table[opts->x_s390_tune];
14537 /* Sanity checks. */
14538 if (opts->x_s390_arch == PROCESSOR_NATIVE
14539 || opts->x_s390_tune == PROCESSOR_NATIVE)
14540 gcc_unreachable ();
14541 if (TARGET_ZARCH_P (opts->x_target_flags) && !TARGET_CPU_ZARCH_P (opts))
14542 error ("z/Architecture mode not supported on %s",
14543 processor_table[(int)opts->x_s390_arch].name);
14544 if (TARGET_64BIT && !TARGET_ZARCH_P (opts->x_target_flags))
14545 error ("64-bit ABI not supported in ESA/390 mode");
14547 /* Enable hardware transactions if available and not explicitly
14548 disabled by user. E.g. with -m31 -march=zEC12 -mzarch */
14549 if (!TARGET_OPT_HTM_P (opts_set->x_target_flags))
14551 if (TARGET_CPU_HTM_P (opts) && TARGET_ZARCH_P (opts->x_target_flags))
14552 opts->x_target_flags |= MASK_OPT_HTM;
14553 else
14554 opts->x_target_flags &= ~MASK_OPT_HTM;
14557 if (TARGET_OPT_VX_P (opts_set->x_target_flags))
14559 if (TARGET_OPT_VX_P (opts->x_target_flags))
14561 if (!TARGET_CPU_VX_P (opts))
14562 error ("hardware vector support not available on %s",
14563 processor_table[(int)opts->x_s390_arch].name);
14564 if (TARGET_SOFT_FLOAT_P (opts->x_target_flags))
14565 error ("hardware vector support not available with -msoft-float");
14568 else
14570 if (TARGET_CPU_VX_P (opts))
14571 /* Enable vector support if available and not explicitly disabled
14572 by user. E.g. with -m31 -march=z13 -mzarch */
14573 opts->x_target_flags |= MASK_OPT_VX;
14574 else
14575 opts->x_target_flags &= ~MASK_OPT_VX;
14578 /* Use hardware DFP if available and not explicitly disabled by
14579 user. E.g. with -m31 -march=z10 -mzarch */
14580 if (!TARGET_HARD_DFP_P (opts_set->x_target_flags))
14582 if (TARGET_DFP_P (opts))
14583 opts->x_target_flags |= MASK_HARD_DFP;
14584 else
14585 opts->x_target_flags &= ~MASK_HARD_DFP;
14588 if (TARGET_HARD_DFP_P (opts->x_target_flags) && !TARGET_DFP_P (opts))
14590 if (TARGET_HARD_DFP_P (opts_set->x_target_flags))
14592 if (!TARGET_CPU_DFP_P (opts))
14593 error ("hardware decimal floating point instructions"
14594 " not available on %s",
14595 processor_table[(int)opts->x_s390_arch].name);
14596 if (!TARGET_ZARCH_P (opts->x_target_flags))
14597 error ("hardware decimal floating point instructions"
14598 " not available in ESA/390 mode");
14600 else
14601 opts->x_target_flags &= ~MASK_HARD_DFP;
14604 if (TARGET_SOFT_FLOAT_P (opts_set->x_target_flags)
14605 && TARGET_SOFT_FLOAT_P (opts->x_target_flags))
14607 if (TARGET_HARD_DFP_P (opts_set->x_target_flags)
14608 && TARGET_HARD_DFP_P (opts->x_target_flags))
14609 error ("-mhard-dfp can%'t be used in conjunction with -msoft-float");
14611 opts->x_target_flags &= ~MASK_HARD_DFP;
14614 if (TARGET_BACKCHAIN_P (opts->x_target_flags)
14615 && TARGET_PACKED_STACK_P (opts->x_target_flags)
14616 && TARGET_HARD_FLOAT_P (opts->x_target_flags))
14617 error ("-mbackchain -mpacked-stack -mhard-float are not supported "
14618 "in combination");
14620 if (opts->x_s390_stack_size)
14622 if (opts->x_s390_stack_guard >= opts->x_s390_stack_size)
14623 error ("stack size must be greater than the stack guard value");
14624 else if (opts->x_s390_stack_size > 1 << 16)
14625 error ("stack size must not be greater than 64k");
14627 else if (opts->x_s390_stack_guard)
14628 error ("-mstack-guard implies use of -mstack-size");
14630 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
14631 if (!TARGET_LONG_DOUBLE_128_P (opts_set->x_target_flags))
14632 opts->x_target_flags |= MASK_LONG_DOUBLE_128;
14633 #endif
14635 if (opts->x_s390_tune >= PROCESSOR_2097_Z10)
14637 maybe_set_param_value (PARAM_MAX_UNROLLED_INSNS, 100,
14638 opts->x_param_values,
14639 opts_set->x_param_values);
14640 maybe_set_param_value (PARAM_MAX_UNROLL_TIMES, 32,
14641 opts->x_param_values,
14642 opts_set->x_param_values);
14643 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 2000,
14644 opts->x_param_values,
14645 opts_set->x_param_values);
14646 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEEL_TIMES, 64,
14647 opts->x_param_values,
14648 opts_set->x_param_values);
14651 maybe_set_param_value (PARAM_MAX_PENDING_LIST_LENGTH, 256,
14652 opts->x_param_values,
14653 opts_set->x_param_values);
14654 /* values for loop prefetching */
14655 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, 256,
14656 opts->x_param_values,
14657 opts_set->x_param_values);
14658 maybe_set_param_value (PARAM_L1_CACHE_SIZE, 128,
14659 opts->x_param_values,
14660 opts_set->x_param_values);
14661 /* s390 has more than 2 levels and the size is much larger. Since
14662 we are always running virtualized assume that we only get a small
14663 part of the caches above l1. */
14664 maybe_set_param_value (PARAM_L2_CACHE_SIZE, 1500,
14665 opts->x_param_values,
14666 opts_set->x_param_values);
14667 maybe_set_param_value (PARAM_PREFETCH_MIN_INSN_TO_MEM_RATIO, 2,
14668 opts->x_param_values,
14669 opts_set->x_param_values);
14670 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 6,
14671 opts->x_param_values,
14672 opts_set->x_param_values);
14674 /* Use the alternative scheduling-pressure algorithm by default. */
14675 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM, 2,
14676 opts->x_param_values,
14677 opts_set->x_param_values);
14679 maybe_set_param_value (PARAM_MIN_VECT_LOOP_BOUND, 2,
14680 opts->x_param_values,
14681 opts_set->x_param_values);
14683 /* Call target specific restore function to do post-init work. At the moment,
14684 this just sets opts->x_s390_cost_pointer. */
14685 s390_function_specific_restore (opts, NULL);
14688 static void
14689 s390_option_override (void)
14691 unsigned int i;
14692 cl_deferred_option *opt;
14693 vec<cl_deferred_option> *v =
14694 (vec<cl_deferred_option> *) s390_deferred_options;
14696 if (v)
14697 FOR_EACH_VEC_ELT (*v, i, opt)
14699 switch (opt->opt_index)
14701 case OPT_mhotpatch_:
14703 int val1;
14704 int val2;
14705 char s[256];
14706 char *t;
14708 strncpy (s, opt->arg, 256);
14709 s[255] = 0;
14710 t = strchr (s, ',');
14711 if (t != NULL)
14713 *t = 0;
14714 t++;
14715 val1 = integral_argument (s);
14716 val2 = integral_argument (t);
14718 else
14720 val1 = -1;
14721 val2 = -1;
14723 if (val1 == -1 || val2 == -1)
14725 /* argument is not a plain number */
14726 error ("arguments to %qs should be non-negative integers",
14727 "-mhotpatch=n,m");
14728 break;
14730 else if (val1 > s390_hotpatch_hw_max
14731 || val2 > s390_hotpatch_hw_max)
14733 error ("argument to %qs is too large (max. %d)",
14734 "-mhotpatch=n,m", s390_hotpatch_hw_max);
14735 break;
14737 s390_hotpatch_hw_before_label = val1;
14738 s390_hotpatch_hw_after_label = val2;
14739 break;
14741 default:
14742 gcc_unreachable ();
14746 /* Set up function hooks. */
14747 init_machine_status = s390_init_machine_status;
14749 s390_option_override_internal (true, &global_options, &global_options_set);
14751 /* Save the initial options in case the user does function specific
14752 options. */
14753 target_option_default_node = build_target_option_node (&global_options);
14754 target_option_current_node = target_option_default_node;
14756 /* This cannot reside in s390_option_optimization_table since HAVE_prefetch
14757 requires the arch flags to be evaluated already. Since prefetching
14758 is beneficial on s390, we enable it if available. */
14759 if (flag_prefetch_loop_arrays < 0 && HAVE_prefetch && optimize >= 3)
14760 flag_prefetch_loop_arrays = 1;
14762 if (TARGET_TPF)
14764 /* Don't emit DWARF3/4 unless specifically selected. The TPF
14765 debuggers do not yet support DWARF 3/4. */
14766 if (!global_options_set.x_dwarf_strict)
14767 dwarf_strict = 1;
14768 if (!global_options_set.x_dwarf_version)
14769 dwarf_version = 2;
14772 /* Register a target-specific optimization-and-lowering pass
14773 to run immediately before prologue and epilogue generation.
14775 Registering the pass must be done at start up. It's
14776 convenient to do it here. */
14777 opt_pass *new_pass = new pass_s390_early_mach (g);
14778 struct register_pass_info insert_pass_s390_early_mach =
14780 new_pass, /* pass */
14781 "pro_and_epilogue", /* reference_pass_name */
14782 1, /* ref_pass_instance_number */
14783 PASS_POS_INSERT_BEFORE /* po_op */
14785 register_pass (&insert_pass_s390_early_mach);
14788 #if S390_USE_TARGET_ATTRIBUTE
14789 /* Inner function to process the attribute((target(...))), take an argument and
14790 set the current options from the argument. If we have a list, recursively go
14791 over the list. */
14793 static bool
14794 s390_valid_target_attribute_inner_p (tree args,
14795 struct gcc_options *opts,
14796 struct gcc_options *new_opts_set,
14797 bool force_pragma)
14799 char *next_optstr;
14800 bool ret = true;
14802 #define S390_ATTRIB(S,O,A) { S, sizeof (S)-1, O, A, 0 }
14803 #define S390_PRAGMA(S,O,A) { S, sizeof (S)-1, O, A, 1 }
14804 static const struct
14806 const char *string;
14807 size_t len;
14808 int opt;
14809 int has_arg;
14810 int only_as_pragma;
14811 } attrs[] = {
14812 /* enum options */
14813 S390_ATTRIB ("arch=", OPT_march_, 1),
14814 S390_ATTRIB ("tune=", OPT_mtune_, 1),
14815 /* uinteger options */
14816 S390_ATTRIB ("stack-guard=", OPT_mstack_guard_, 1),
14817 S390_ATTRIB ("stack-size=", OPT_mstack_size_, 1),
14818 S390_ATTRIB ("branch-cost=", OPT_mbranch_cost_, 1),
14819 S390_ATTRIB ("warn-framesize=", OPT_mwarn_framesize_, 1),
14820 /* flag options */
14821 S390_ATTRIB ("backchain", OPT_mbackchain, 0),
14822 S390_ATTRIB ("hard-dfp", OPT_mhard_dfp, 0),
14823 S390_ATTRIB ("hard-float", OPT_mhard_float, 0),
14824 S390_ATTRIB ("htm", OPT_mhtm, 0),
14825 S390_ATTRIB ("vx", OPT_mvx, 0),
14826 S390_ATTRIB ("packed-stack", OPT_mpacked_stack, 0),
14827 S390_ATTRIB ("small-exec", OPT_msmall_exec, 0),
14828 S390_ATTRIB ("soft-float", OPT_msoft_float, 0),
14829 S390_ATTRIB ("mvcle", OPT_mmvcle, 0),
14830 S390_PRAGMA ("zvector", OPT_mzvector, 0),
14831 /* boolean options */
14832 S390_ATTRIB ("warn-dynamicstack", OPT_mwarn_dynamicstack, 0),
14834 #undef S390_ATTRIB
14835 #undef S390_PRAGMA
14837 /* If this is a list, recurse to get the options. */
14838 if (TREE_CODE (args) == TREE_LIST)
14840 bool ret = true;
14841 int num_pragma_values;
14842 int i;
14844 /* Note: attribs.c:decl_attributes prepends the values from
14845 current_target_pragma to the list of target attributes. To determine
14846 whether we're looking at a value of the attribute or the pragma we
14847 assume that the first [list_length (current_target_pragma)] values in
14848 the list are the values from the pragma. */
14849 num_pragma_values = (!force_pragma && current_target_pragma != NULL)
14850 ? list_length (current_target_pragma) : 0;
14851 for (i = 0; args; args = TREE_CHAIN (args), i++)
14853 bool is_pragma;
14855 is_pragma = (force_pragma || i < num_pragma_values);
14856 if (TREE_VALUE (args)
14857 && !s390_valid_target_attribute_inner_p (TREE_VALUE (args),
14858 opts, new_opts_set,
14859 is_pragma))
14861 ret = false;
14864 return ret;
14867 else if (TREE_CODE (args) != STRING_CST)
14869 error ("attribute %<target%> argument not a string");
14870 return false;
14873 /* Handle multiple arguments separated by commas. */
14874 next_optstr = ASTRDUP (TREE_STRING_POINTER (args));
14876 while (next_optstr && *next_optstr != '\0')
14878 char *p = next_optstr;
14879 char *orig_p = p;
14880 char *comma = strchr (next_optstr, ',');
14881 size_t len, opt_len;
14882 int opt;
14883 bool opt_set_p;
14884 char ch;
14885 unsigned i;
14886 int mask = 0;
14887 enum cl_var_type var_type;
14888 bool found;
14890 if (comma)
14892 *comma = '\0';
14893 len = comma - next_optstr;
14894 next_optstr = comma + 1;
14896 else
14898 len = strlen (p);
14899 next_optstr = NULL;
14902 /* Recognize no-xxx. */
14903 if (len > 3 && p[0] == 'n' && p[1] == 'o' && p[2] == '-')
14905 opt_set_p = false;
14906 p += 3;
14907 len -= 3;
14909 else
14910 opt_set_p = true;
14912 /* Find the option. */
14913 ch = *p;
14914 found = false;
14915 for (i = 0; i < ARRAY_SIZE (attrs); i++)
14917 opt_len = attrs[i].len;
14918 if (ch == attrs[i].string[0]
14919 && ((attrs[i].has_arg) ? len > opt_len : len == opt_len)
14920 && memcmp (p, attrs[i].string, opt_len) == 0)
14922 opt = attrs[i].opt;
14923 if (!opt_set_p && cl_options[opt].cl_reject_negative)
14924 continue;
14925 mask = cl_options[opt].var_value;
14926 var_type = cl_options[opt].var_type;
14927 found = true;
14928 break;
14932 /* Process the option. */
14933 if (!found)
14935 error ("attribute(target(\"%s\")) is unknown", orig_p);
14936 return false;
14938 else if (attrs[i].only_as_pragma && !force_pragma)
14940 /* Value is not allowed for the target attribute. */
14941 error ("value %qs is not supported by attribute %<target%>",
14942 attrs[i].string);
14943 return false;
14946 else if (var_type == CLVC_BIT_SET || var_type == CLVC_BIT_CLEAR)
14948 if (var_type == CLVC_BIT_CLEAR)
14949 opt_set_p = !opt_set_p;
14951 if (opt_set_p)
14952 opts->x_target_flags |= mask;
14953 else
14954 opts->x_target_flags &= ~mask;
14955 new_opts_set->x_target_flags |= mask;
14958 else if (cl_options[opt].var_type == CLVC_BOOLEAN)
14960 int value;
14962 if (cl_options[opt].cl_uinteger)
14964 /* Unsigned integer argument. Code based on the function
14965 decode_cmdline_option () in opts-common.c. */
14966 value = integral_argument (p + opt_len);
14968 else
14969 value = (opt_set_p) ? 1 : 0;
14971 if (value != -1)
14973 struct cl_decoded_option decoded;
14975 /* Value range check; only implemented for numeric and boolean
14976 options at the moment. */
14977 generate_option (opt, NULL, value, CL_TARGET, &decoded);
14978 s390_handle_option (opts, new_opts_set, &decoded, input_location);
14979 set_option (opts, new_opts_set, opt, value,
14980 p + opt_len, DK_UNSPECIFIED, input_location,
14981 global_dc);
14983 else
14985 error ("attribute(target(\"%s\")) is unknown", orig_p);
14986 ret = false;
14990 else if (cl_options[opt].var_type == CLVC_ENUM)
14992 bool arg_ok;
14993 int value;
14995 arg_ok = opt_enum_arg_to_value (opt, p + opt_len, &value, CL_TARGET);
14996 if (arg_ok)
14997 set_option (opts, new_opts_set, opt, value,
14998 p + opt_len, DK_UNSPECIFIED, input_location,
14999 global_dc);
15000 else
15002 error ("attribute(target(\"%s\")) is unknown", orig_p);
15003 ret = false;
15007 else
15008 gcc_unreachable ();
15010 return ret;
15013 /* Return a TARGET_OPTION_NODE tree of the target options listed or NULL. */
15015 tree
15016 s390_valid_target_attribute_tree (tree args,
15017 struct gcc_options *opts,
15018 const struct gcc_options *opts_set,
15019 bool force_pragma)
15021 tree t = NULL_TREE;
15022 struct gcc_options new_opts_set;
15024 memset (&new_opts_set, 0, sizeof (new_opts_set));
15026 /* Process each of the options on the chain. */
15027 if (! s390_valid_target_attribute_inner_p (args, opts, &new_opts_set,
15028 force_pragma))
15029 return error_mark_node;
15031 /* If some option was set (even if it has not changed), rerun
15032 s390_option_override_internal, and then save the options away. */
15033 if (new_opts_set.x_target_flags
15034 || new_opts_set.x_s390_arch
15035 || new_opts_set.x_s390_tune
15036 || new_opts_set.x_s390_stack_guard
15037 || new_opts_set.x_s390_stack_size
15038 || new_opts_set.x_s390_branch_cost
15039 || new_opts_set.x_s390_warn_framesize
15040 || new_opts_set.x_s390_warn_dynamicstack_p)
15042 const unsigned char *src = (const unsigned char *)opts_set;
15043 unsigned char *dest = (unsigned char *)&new_opts_set;
15044 unsigned int i;
15046 /* Merge the original option flags into the new ones. */
15047 for (i = 0; i < sizeof(*opts_set); i++)
15048 dest[i] |= src[i];
15050 /* Do any overrides, such as arch=xxx, or tune=xxx support. */
15051 s390_option_override_internal (false, opts, &new_opts_set);
15052 /* Save the current options unless we are validating options for
15053 #pragma. */
15054 t = build_target_option_node (opts);
15056 return t;
15059 /* Hook to validate attribute((target("string"))). */
15061 static bool
15062 s390_valid_target_attribute_p (tree fndecl,
15063 tree ARG_UNUSED (name),
15064 tree args,
15065 int ARG_UNUSED (flags))
15067 struct gcc_options func_options;
15068 tree new_target, new_optimize;
15069 bool ret = true;
15071 /* attribute((target("default"))) does nothing, beyond
15072 affecting multi-versioning. */
15073 if (TREE_VALUE (args)
15074 && TREE_CODE (TREE_VALUE (args)) == STRING_CST
15075 && TREE_CHAIN (args) == NULL_TREE
15076 && strcmp (TREE_STRING_POINTER (TREE_VALUE (args)), "default") == 0)
15077 return true;
15079 tree old_optimize = build_optimization_node (&global_options);
15081 /* Get the optimization options of the current function. */
15082 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
15084 if (!func_optimize)
15085 func_optimize = old_optimize;
15087 /* Init func_options. */
15088 memset (&func_options, 0, sizeof (func_options));
15089 init_options_struct (&func_options, NULL);
15090 lang_hooks.init_options_struct (&func_options);
15092 cl_optimization_restore (&func_options, TREE_OPTIMIZATION (func_optimize));
15094 /* Initialize func_options to the default before its target options can
15095 be set. */
15096 cl_target_option_restore (&func_options,
15097 TREE_TARGET_OPTION (target_option_default_node));
15099 new_target = s390_valid_target_attribute_tree (args, &func_options,
15100 &global_options_set,
15101 (args ==
15102 current_target_pragma));
15103 new_optimize = build_optimization_node (&func_options);
15104 if (new_target == error_mark_node)
15105 ret = false;
15106 else if (fndecl && new_target)
15108 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
15109 if (old_optimize != new_optimize)
15110 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
15112 return ret;
15115 /* Restore targets globals from NEW_TREE and invalidate s390_previous_fndecl
15116 cache. */
15118 void
15119 s390_activate_target_options (tree new_tree)
15121 cl_target_option_restore (&global_options, TREE_TARGET_OPTION (new_tree));
15122 if (TREE_TARGET_GLOBALS (new_tree))
15123 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
15124 else if (new_tree == target_option_default_node)
15125 restore_target_globals (&default_target_globals);
15126 else
15127 TREE_TARGET_GLOBALS (new_tree) = save_target_globals_default_opts ();
15128 s390_previous_fndecl = NULL_TREE;
15131 /* Establish appropriate back-end context for processing the function
15132 FNDECL. The argument might be NULL to indicate processing at top
15133 level, outside of any function scope. */
15134 static void
15135 s390_set_current_function (tree fndecl)
15137 /* Only change the context if the function changes. This hook is called
15138 several times in the course of compiling a function, and we don't want to
15139 slow things down too much or call target_reinit when it isn't safe. */
15140 if (fndecl == s390_previous_fndecl)
15141 return;
15143 tree old_tree;
15144 if (s390_previous_fndecl == NULL_TREE)
15145 old_tree = target_option_current_node;
15146 else if (DECL_FUNCTION_SPECIFIC_TARGET (s390_previous_fndecl))
15147 old_tree = DECL_FUNCTION_SPECIFIC_TARGET (s390_previous_fndecl);
15148 else
15149 old_tree = target_option_default_node;
15151 if (fndecl == NULL_TREE)
15153 if (old_tree != target_option_current_node)
15154 s390_activate_target_options (target_option_current_node);
15155 return;
15158 tree new_tree = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
15159 if (new_tree == NULL_TREE)
15160 new_tree = target_option_default_node;
15162 if (old_tree != new_tree)
15163 s390_activate_target_options (new_tree);
15164 s390_previous_fndecl = fndecl;
15166 #endif
15168 /* Implement TARGET_USE_BY_PIECES_INFRASTRUCTURE_P. */
15170 static bool
15171 s390_use_by_pieces_infrastructure_p (unsigned HOST_WIDE_INT size,
15172 unsigned int align ATTRIBUTE_UNUSED,
15173 enum by_pieces_operation op ATTRIBUTE_UNUSED,
15174 bool speed_p ATTRIBUTE_UNUSED)
15176 return (size == 1 || size == 2
15177 || size == 4 || (TARGET_ZARCH && size == 8));
15180 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
15182 static void
15183 s390_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
15185 tree sfpc = s390_builtin_decls[S390_BUILTIN_s390_sfpc];
15186 tree efpc = s390_builtin_decls[S390_BUILTIN_s390_efpc];
15187 tree call_efpc = build_call_expr (efpc, 0);
15188 tree fenv_var = create_tmp_var_raw (unsigned_type_node);
15190 #define FPC_EXCEPTION_MASK HOST_WIDE_INT_UC (0xf8000000)
15191 #define FPC_FLAGS_MASK HOST_WIDE_INT_UC (0x00f80000)
15192 #define FPC_DXC_MASK HOST_WIDE_INT_UC (0x0000ff00)
15193 #define FPC_EXCEPTION_MASK_SHIFT HOST_WIDE_INT_UC (24)
15194 #define FPC_FLAGS_SHIFT HOST_WIDE_INT_UC (16)
15195 #define FPC_DXC_SHIFT HOST_WIDE_INT_UC (8)
15197 /* Generates the equivalent of feholdexcept (&fenv_var)
15199 fenv_var = __builtin_s390_efpc ();
15200 __builtin_s390_sfpc (fenv_var & mask) */
15201 tree old_fpc = build2 (MODIFY_EXPR, unsigned_type_node, fenv_var, call_efpc);
15202 tree new_fpc =
15203 build2 (BIT_AND_EXPR, unsigned_type_node, fenv_var,
15204 build_int_cst (unsigned_type_node,
15205 ~(FPC_DXC_MASK | FPC_FLAGS_MASK |
15206 FPC_EXCEPTION_MASK)));
15207 tree set_new_fpc = build_call_expr (sfpc, 1, new_fpc);
15208 *hold = build2 (COMPOUND_EXPR, void_type_node, old_fpc, set_new_fpc);
15210 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT)
15212 __builtin_s390_sfpc (__builtin_s390_efpc () & mask) */
15213 new_fpc = build2 (BIT_AND_EXPR, unsigned_type_node, call_efpc,
15214 build_int_cst (unsigned_type_node,
15215 ~(FPC_DXC_MASK | FPC_FLAGS_MASK)));
15216 *clear = build_call_expr (sfpc, 1, new_fpc);
15218 /* Generates the equivalent of feupdateenv (fenv_var)
15220 old_fpc = __builtin_s390_efpc ();
15221 __builtin_s390_sfpc (fenv_var);
15222 __atomic_feraiseexcept ((old_fpc & FPC_FLAGS_MASK) >> FPC_FLAGS_SHIFT); */
15224 old_fpc = create_tmp_var_raw (unsigned_type_node);
15225 tree store_old_fpc = build2 (MODIFY_EXPR, void_type_node,
15226 old_fpc, call_efpc);
15228 set_new_fpc = build_call_expr (sfpc, 1, fenv_var);
15230 tree raise_old_except = build2 (BIT_AND_EXPR, unsigned_type_node, old_fpc,
15231 build_int_cst (unsigned_type_node,
15232 FPC_FLAGS_MASK));
15233 raise_old_except = build2 (RSHIFT_EXPR, unsigned_type_node, raise_old_except,
15234 build_int_cst (unsigned_type_node,
15235 FPC_FLAGS_SHIFT));
15236 tree atomic_feraiseexcept
15237 = builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT);
15238 raise_old_except = build_call_expr (atomic_feraiseexcept,
15239 1, raise_old_except);
15241 *update = build2 (COMPOUND_EXPR, void_type_node,
15242 build2 (COMPOUND_EXPR, void_type_node,
15243 store_old_fpc, set_new_fpc),
15244 raise_old_except);
15246 #undef FPC_EXCEPTION_MASK
15247 #undef FPC_FLAGS_MASK
15248 #undef FPC_DXC_MASK
15249 #undef FPC_EXCEPTION_MASK_SHIFT
15250 #undef FPC_FLAGS_SHIFT
15251 #undef FPC_DXC_SHIFT
15254 /* Return the vector mode to be used for inner mode MODE when doing
15255 vectorization. */
15256 static machine_mode
15257 s390_preferred_simd_mode (machine_mode mode)
15259 if (TARGET_VX)
15260 switch (mode)
15262 case DFmode:
15263 return V2DFmode;
15264 case DImode:
15265 return V2DImode;
15266 case SImode:
15267 return V4SImode;
15268 case HImode:
15269 return V8HImode;
15270 case QImode:
15271 return V16QImode;
15272 default:;
15274 return word_mode;
15277 /* Our hardware does not require vectors to be strictly aligned. */
15278 static bool
15279 s390_support_vector_misalignment (machine_mode mode ATTRIBUTE_UNUSED,
15280 const_tree type ATTRIBUTE_UNUSED,
15281 int misalignment ATTRIBUTE_UNUSED,
15282 bool is_packed ATTRIBUTE_UNUSED)
15284 if (TARGET_VX)
15285 return true;
15287 return default_builtin_support_vector_misalignment (mode, type, misalignment,
15288 is_packed);
15291 /* The vector ABI requires vector types to be aligned on an 8 byte
15292 boundary (our stack alignment). However, we allow this to be
15293 overriden by the user, while this definitely breaks the ABI. */
15294 static HOST_WIDE_INT
15295 s390_vector_alignment (const_tree type)
15297 if (!TARGET_VX_ABI)
15298 return default_vector_alignment (type);
15300 if (TYPE_USER_ALIGN (type))
15301 return TYPE_ALIGN (type);
15303 return MIN (64, tree_to_shwi (TYPE_SIZE (type)));
15306 #ifdef HAVE_AS_MACHINE_MACHINEMODE
15307 /* Implement TARGET_ASM_FILE_START. */
15308 static void
15309 s390_asm_file_start (void)
15311 default_file_start ();
15312 s390_asm_output_machine_for_arch (asm_out_file);
15314 #endif
15316 /* Implement TARGET_ASM_FILE_END. */
15317 static void
15318 s390_asm_file_end (void)
15320 #ifdef HAVE_AS_GNU_ATTRIBUTE
15321 varpool_node *vnode;
15322 cgraph_node *cnode;
15324 FOR_EACH_VARIABLE (vnode)
15325 if (TREE_PUBLIC (vnode->decl))
15326 s390_check_type_for_vector_abi (TREE_TYPE (vnode->decl), false, false);
15328 FOR_EACH_FUNCTION (cnode)
15329 if (TREE_PUBLIC (cnode->decl))
15330 s390_check_type_for_vector_abi (TREE_TYPE (cnode->decl), false, false);
15333 if (s390_vector_abi != 0)
15334 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
15335 s390_vector_abi);
15336 #endif
15337 file_end_indicate_exec_stack ();
15339 if (flag_split_stack)
15340 file_end_indicate_split_stack ();
15343 /* Return true if TYPE is a vector bool type. */
15344 static inline bool
15345 s390_vector_bool_type_p (const_tree type)
15347 return TYPE_VECTOR_OPAQUE (type);
15350 /* Return the diagnostic message string if the binary operation OP is
15351 not permitted on TYPE1 and TYPE2, NULL otherwise. */
15352 static const char*
15353 s390_invalid_binary_op (int op ATTRIBUTE_UNUSED, const_tree type1, const_tree type2)
15355 bool bool1_p, bool2_p;
15356 bool plusminus_p;
15357 bool muldiv_p;
15358 bool compare_p;
15359 machine_mode mode1, mode2;
15361 if (!TARGET_ZVECTOR)
15362 return NULL;
15364 if (!VECTOR_TYPE_P (type1) || !VECTOR_TYPE_P (type2))
15365 return NULL;
15367 bool1_p = s390_vector_bool_type_p (type1);
15368 bool2_p = s390_vector_bool_type_p (type2);
15370 /* Mixing signed and unsigned types is forbidden for all
15371 operators. */
15372 if (!bool1_p && !bool2_p
15373 && TYPE_UNSIGNED (type1) != TYPE_UNSIGNED (type2))
15374 return N_("types differ in signedness");
15376 plusminus_p = (op == PLUS_EXPR || op == MINUS_EXPR);
15377 muldiv_p = (op == MULT_EXPR || op == RDIV_EXPR || op == TRUNC_DIV_EXPR
15378 || op == CEIL_DIV_EXPR || op == FLOOR_DIV_EXPR
15379 || op == ROUND_DIV_EXPR);
15380 compare_p = (op == LT_EXPR || op == LE_EXPR || op == GT_EXPR || op == GE_EXPR
15381 || op == EQ_EXPR || op == NE_EXPR);
15383 if (bool1_p && bool2_p && (plusminus_p || muldiv_p))
15384 return N_("binary operator does not support two vector bool operands");
15386 if (bool1_p != bool2_p && (muldiv_p || compare_p))
15387 return N_("binary operator does not support vector bool operand");
15389 mode1 = TYPE_MODE (type1);
15390 mode2 = TYPE_MODE (type2);
15392 if (bool1_p != bool2_p && plusminus_p
15393 && (GET_MODE_CLASS (mode1) == MODE_VECTOR_FLOAT
15394 || GET_MODE_CLASS (mode2) == MODE_VECTOR_FLOAT))
15395 return N_("binary operator does not support mixing vector "
15396 "bool with floating point vector operands");
15398 return NULL;
15401 /* Implement TARGET_C_EXCESS_PRECISION.
15403 FIXME: For historical reasons, float_t and double_t are typedef'ed to
15404 double on s390, causing operations on float_t to operate in a higher
15405 precision than is necessary. However, it is not the case that SFmode
15406 operations have implicit excess precision, and we generate more optimal
15407 code if we let the compiler know no implicit extra precision is added.
15409 That means when we are compiling with -fexcess-precision=fast, the value
15410 we set for FLT_EVAL_METHOD will be out of line with the actual precision of
15411 float_t (though they would be correct for -fexcess-precision=standard).
15413 A complete fix would modify glibc to remove the unnecessary typedef
15414 of float_t to double. */
15416 static enum flt_eval_method
15417 s390_excess_precision (enum excess_precision_type type)
15419 switch (type)
15421 case EXCESS_PRECISION_TYPE_IMPLICIT:
15422 case EXCESS_PRECISION_TYPE_FAST:
15423 /* The fastest type to promote to will always be the native type,
15424 whether that occurs with implicit excess precision or
15425 otherwise. */
15426 return FLT_EVAL_METHOD_PROMOTE_TO_FLOAT;
15427 case EXCESS_PRECISION_TYPE_STANDARD:
15428 /* Otherwise, when we are in a standards compliant mode, to
15429 ensure consistency with the implementation in glibc, report that
15430 float is evaluated to the range and precision of double. */
15431 return FLT_EVAL_METHOD_PROMOTE_TO_DOUBLE;
15432 default:
15433 gcc_unreachable ();
15435 return FLT_EVAL_METHOD_UNPREDICTABLE;
15438 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
15440 static unsigned HOST_WIDE_INT
15441 s390_asan_shadow_offset (void)
15443 return TARGET_64BIT ? HOST_WIDE_INT_1U << 52 : HOST_WIDE_INT_UC (0x20000000);
15446 /* Initialize GCC target structure. */
15448 #undef TARGET_ASM_ALIGNED_HI_OP
15449 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
15450 #undef TARGET_ASM_ALIGNED_DI_OP
15451 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
15452 #undef TARGET_ASM_INTEGER
15453 #define TARGET_ASM_INTEGER s390_assemble_integer
15455 #undef TARGET_ASM_OPEN_PAREN
15456 #define TARGET_ASM_OPEN_PAREN ""
15458 #undef TARGET_ASM_CLOSE_PAREN
15459 #define TARGET_ASM_CLOSE_PAREN ""
15461 #undef TARGET_OPTION_OVERRIDE
15462 #define TARGET_OPTION_OVERRIDE s390_option_override
15464 #ifdef TARGET_THREAD_SSP_OFFSET
15465 #undef TARGET_STACK_PROTECT_GUARD
15466 #define TARGET_STACK_PROTECT_GUARD hook_tree_void_null
15467 #endif
15469 #undef TARGET_ENCODE_SECTION_INFO
15470 #define TARGET_ENCODE_SECTION_INFO s390_encode_section_info
15472 #undef TARGET_SCALAR_MODE_SUPPORTED_P
15473 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
15475 #ifdef HAVE_AS_TLS
15476 #undef TARGET_HAVE_TLS
15477 #define TARGET_HAVE_TLS true
15478 #endif
15479 #undef TARGET_CANNOT_FORCE_CONST_MEM
15480 #define TARGET_CANNOT_FORCE_CONST_MEM s390_cannot_force_const_mem
15482 #undef TARGET_DELEGITIMIZE_ADDRESS
15483 #define TARGET_DELEGITIMIZE_ADDRESS s390_delegitimize_address
15485 #undef TARGET_LEGITIMIZE_ADDRESS
15486 #define TARGET_LEGITIMIZE_ADDRESS s390_legitimize_address
15488 #undef TARGET_RETURN_IN_MEMORY
15489 #define TARGET_RETURN_IN_MEMORY s390_return_in_memory
15491 #undef TARGET_INIT_BUILTINS
15492 #define TARGET_INIT_BUILTINS s390_init_builtins
15493 #undef TARGET_EXPAND_BUILTIN
15494 #define TARGET_EXPAND_BUILTIN s390_expand_builtin
15495 #undef TARGET_BUILTIN_DECL
15496 #define TARGET_BUILTIN_DECL s390_builtin_decl
15498 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
15499 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA s390_output_addr_const_extra
15501 #undef TARGET_ASM_OUTPUT_MI_THUNK
15502 #define TARGET_ASM_OUTPUT_MI_THUNK s390_output_mi_thunk
15503 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
15504 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
15506 #undef TARGET_C_EXCESS_PRECISION
15507 #define TARGET_C_EXCESS_PRECISION s390_excess_precision
15509 #undef TARGET_SCHED_ADJUST_PRIORITY
15510 #define TARGET_SCHED_ADJUST_PRIORITY s390_adjust_priority
15511 #undef TARGET_SCHED_ISSUE_RATE
15512 #define TARGET_SCHED_ISSUE_RATE s390_issue_rate
15513 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
15514 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD s390_first_cycle_multipass_dfa_lookahead
15516 #undef TARGET_SCHED_VARIABLE_ISSUE
15517 #define TARGET_SCHED_VARIABLE_ISSUE s390_sched_variable_issue
15518 #undef TARGET_SCHED_REORDER
15519 #define TARGET_SCHED_REORDER s390_sched_reorder
15520 #undef TARGET_SCHED_INIT
15521 #define TARGET_SCHED_INIT s390_sched_init
15523 #undef TARGET_CANNOT_COPY_INSN_P
15524 #define TARGET_CANNOT_COPY_INSN_P s390_cannot_copy_insn_p
15525 #undef TARGET_RTX_COSTS
15526 #define TARGET_RTX_COSTS s390_rtx_costs
15527 #undef TARGET_ADDRESS_COST
15528 #define TARGET_ADDRESS_COST s390_address_cost
15529 #undef TARGET_REGISTER_MOVE_COST
15530 #define TARGET_REGISTER_MOVE_COST s390_register_move_cost
15531 #undef TARGET_MEMORY_MOVE_COST
15532 #define TARGET_MEMORY_MOVE_COST s390_memory_move_cost
15533 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
15534 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
15535 s390_builtin_vectorization_cost
15537 #undef TARGET_MACHINE_DEPENDENT_REORG
15538 #define TARGET_MACHINE_DEPENDENT_REORG s390_reorg
15540 #undef TARGET_VALID_POINTER_MODE
15541 #define TARGET_VALID_POINTER_MODE s390_valid_pointer_mode
15543 #undef TARGET_BUILD_BUILTIN_VA_LIST
15544 #define TARGET_BUILD_BUILTIN_VA_LIST s390_build_builtin_va_list
15545 #undef TARGET_EXPAND_BUILTIN_VA_START
15546 #define TARGET_EXPAND_BUILTIN_VA_START s390_va_start
15547 #undef TARGET_ASAN_SHADOW_OFFSET
15548 #define TARGET_ASAN_SHADOW_OFFSET s390_asan_shadow_offset
15549 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
15550 #define TARGET_GIMPLIFY_VA_ARG_EXPR s390_gimplify_va_arg
15552 #undef TARGET_PROMOTE_FUNCTION_MODE
15553 #define TARGET_PROMOTE_FUNCTION_MODE s390_promote_function_mode
15554 #undef TARGET_PASS_BY_REFERENCE
15555 #define TARGET_PASS_BY_REFERENCE s390_pass_by_reference
15557 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
15558 #define TARGET_FUNCTION_OK_FOR_SIBCALL s390_function_ok_for_sibcall
15559 #undef TARGET_FUNCTION_ARG
15560 #define TARGET_FUNCTION_ARG s390_function_arg
15561 #undef TARGET_FUNCTION_ARG_ADVANCE
15562 #define TARGET_FUNCTION_ARG_ADVANCE s390_function_arg_advance
15563 #undef TARGET_FUNCTION_VALUE
15564 #define TARGET_FUNCTION_VALUE s390_function_value
15565 #undef TARGET_LIBCALL_VALUE
15566 #define TARGET_LIBCALL_VALUE s390_libcall_value
15567 #undef TARGET_STRICT_ARGUMENT_NAMING
15568 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
15570 #undef TARGET_KEEP_LEAF_WHEN_PROFILED
15571 #define TARGET_KEEP_LEAF_WHEN_PROFILED s390_keep_leaf_when_profiled
15573 #undef TARGET_FIXED_CONDITION_CODE_REGS
15574 #define TARGET_FIXED_CONDITION_CODE_REGS s390_fixed_condition_code_regs
15576 #undef TARGET_CC_MODES_COMPATIBLE
15577 #define TARGET_CC_MODES_COMPATIBLE s390_cc_modes_compatible
15579 #undef TARGET_INVALID_WITHIN_DOLOOP
15580 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_insn_null
15582 #ifdef HAVE_AS_TLS
15583 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
15584 #define TARGET_ASM_OUTPUT_DWARF_DTPREL s390_output_dwarf_dtprel
15585 #endif
15587 #undef TARGET_DWARF_FRAME_REG_MODE
15588 #define TARGET_DWARF_FRAME_REG_MODE s390_dwarf_frame_reg_mode
15590 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
15591 #undef TARGET_MANGLE_TYPE
15592 #define TARGET_MANGLE_TYPE s390_mangle_type
15593 #endif
15595 #undef TARGET_SCALAR_MODE_SUPPORTED_P
15596 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
15598 #undef TARGET_VECTOR_MODE_SUPPORTED_P
15599 #define TARGET_VECTOR_MODE_SUPPORTED_P s390_vector_mode_supported_p
15601 #undef TARGET_PREFERRED_RELOAD_CLASS
15602 #define TARGET_PREFERRED_RELOAD_CLASS s390_preferred_reload_class
15604 #undef TARGET_SECONDARY_RELOAD
15605 #define TARGET_SECONDARY_RELOAD s390_secondary_reload
15607 #undef TARGET_LIBGCC_CMP_RETURN_MODE
15608 #define TARGET_LIBGCC_CMP_RETURN_MODE s390_libgcc_cmp_return_mode
15610 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
15611 #define TARGET_LIBGCC_SHIFT_COUNT_MODE s390_libgcc_shift_count_mode
15613 #undef TARGET_LEGITIMATE_ADDRESS_P
15614 #define TARGET_LEGITIMATE_ADDRESS_P s390_legitimate_address_p
15616 #undef TARGET_LEGITIMATE_CONSTANT_P
15617 #define TARGET_LEGITIMATE_CONSTANT_P s390_legitimate_constant_p
15619 #undef TARGET_LRA_P
15620 #define TARGET_LRA_P s390_lra_p
15622 #undef TARGET_CAN_ELIMINATE
15623 #define TARGET_CAN_ELIMINATE s390_can_eliminate
15625 #undef TARGET_CONDITIONAL_REGISTER_USAGE
15626 #define TARGET_CONDITIONAL_REGISTER_USAGE s390_conditional_register_usage
15628 #undef TARGET_LOOP_UNROLL_ADJUST
15629 #define TARGET_LOOP_UNROLL_ADJUST s390_loop_unroll_adjust
15631 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
15632 #define TARGET_ASM_TRAMPOLINE_TEMPLATE s390_asm_trampoline_template
15633 #undef TARGET_TRAMPOLINE_INIT
15634 #define TARGET_TRAMPOLINE_INIT s390_trampoline_init
15636 /* PR 79421 */
15637 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
15638 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
15640 #undef TARGET_UNWIND_WORD_MODE
15641 #define TARGET_UNWIND_WORD_MODE s390_unwind_word_mode
15643 #undef TARGET_CANONICALIZE_COMPARISON
15644 #define TARGET_CANONICALIZE_COMPARISON s390_canonicalize_comparison
15646 #undef TARGET_HARD_REGNO_SCRATCH_OK
15647 #define TARGET_HARD_REGNO_SCRATCH_OK s390_hard_regno_scratch_ok
15649 #undef TARGET_ATTRIBUTE_TABLE
15650 #define TARGET_ATTRIBUTE_TABLE s390_attribute_table
15652 #undef TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P
15653 #define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P hook_bool_const_tree_true
15655 #undef TARGET_SET_UP_BY_PROLOGUE
15656 #define TARGET_SET_UP_BY_PROLOGUE s300_set_up_by_prologue
15658 #undef TARGET_EXTRA_LIVE_ON_ENTRY
15659 #define TARGET_EXTRA_LIVE_ON_ENTRY s390_live_on_entry
15661 #undef TARGET_USE_BY_PIECES_INFRASTRUCTURE_P
15662 #define TARGET_USE_BY_PIECES_INFRASTRUCTURE_P \
15663 s390_use_by_pieces_infrastructure_p
15665 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
15666 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV s390_atomic_assign_expand_fenv
15668 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
15669 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN s390_invalid_arg_for_unprototyped_fn
15671 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
15672 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE s390_preferred_simd_mode
15674 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
15675 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT s390_support_vector_misalignment
15677 #undef TARGET_VECTOR_ALIGNMENT
15678 #define TARGET_VECTOR_ALIGNMENT s390_vector_alignment
15680 #undef TARGET_INVALID_BINARY_OP
15681 #define TARGET_INVALID_BINARY_OP s390_invalid_binary_op
15683 #ifdef HAVE_AS_MACHINE_MACHINEMODE
15684 #undef TARGET_ASM_FILE_START
15685 #define TARGET_ASM_FILE_START s390_asm_file_start
15686 #endif
15688 #undef TARGET_ASM_FILE_END
15689 #define TARGET_ASM_FILE_END s390_asm_file_end
15691 #if S390_USE_TARGET_ATTRIBUTE
15692 #undef TARGET_SET_CURRENT_FUNCTION
15693 #define TARGET_SET_CURRENT_FUNCTION s390_set_current_function
15695 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
15696 #define TARGET_OPTION_VALID_ATTRIBUTE_P s390_valid_target_attribute_p
15697 #endif
15699 #undef TARGET_OPTION_RESTORE
15700 #define TARGET_OPTION_RESTORE s390_function_specific_restore
15702 struct gcc_target targetm = TARGET_INITIALIZER;
15704 #include "gt-s390.h"