S/390: Add missing fallthrough comments.
[official-gcc.git] / gcc / config / s390 / s390.c
blob3bdb64871af9ed2756360e48543171261fd97496
1 /* Subroutines used for code generation on IBM S/390 and zSeries
2 Copyright (C) 1999-2016 Free Software Foundation, Inc.
3 Contributed by Hartmut Penner (hpenner@de.ibm.com) and
4 Ulrich Weigand (uweigand@de.ibm.com) and
5 Andreas Krebbel (Andreas.Krebbel@de.ibm.com).
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "backend.h"
27 #include "target.h"
28 #include "target-globals.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "gimple.h"
32 #include "cfghooks.h"
33 #include "cfgloop.h"
34 #include "df.h"
35 #include "tm_p.h"
36 #include "stringpool.h"
37 #include "expmed.h"
38 #include "optabs.h"
39 #include "regs.h"
40 #include "emit-rtl.h"
41 #include "recog.h"
42 #include "cgraph.h"
43 #include "diagnostic-core.h"
44 #include "diagnostic.h"
45 #include "alias.h"
46 #include "fold-const.h"
47 #include "print-tree.h"
48 #include "stor-layout.h"
49 #include "varasm.h"
50 #include "calls.h"
51 #include "conditions.h"
52 #include "output.h"
53 #include "insn-attr.h"
54 #include "flags.h"
55 #include "except.h"
56 #include "dojump.h"
57 #include "explow.h"
58 #include "stmt.h"
59 #include "expr.h"
60 #include "reload.h"
61 #include "cfgrtl.h"
62 #include "cfganal.h"
63 #include "lcm.h"
64 #include "cfgbuild.h"
65 #include "cfgcleanup.h"
66 #include "debug.h"
67 #include "langhooks.h"
68 #include "internal-fn.h"
69 #include "gimple-fold.h"
70 #include "tree-eh.h"
71 #include "gimplify.h"
72 #include "params.h"
73 #include "opts.h"
74 #include "tree-pass.h"
75 #include "context.h"
76 #include "builtins.h"
77 #include "rtl-iter.h"
78 #include "intl.h"
79 #include "tm-constrs.h"
81 /* This file should be included last. */
82 #include "target-def.h"
84 /* Remember the last target of s390_set_current_function. */
85 static GTY(()) tree s390_previous_fndecl;
87 /* Define the specific costs for a given cpu. */
89 struct processor_costs
91 /* multiplication */
92 const int m; /* cost of an M instruction. */
93 const int mghi; /* cost of an MGHI instruction. */
94 const int mh; /* cost of an MH instruction. */
95 const int mhi; /* cost of an MHI instruction. */
96 const int ml; /* cost of an ML instruction. */
97 const int mr; /* cost of an MR instruction. */
98 const int ms; /* cost of an MS instruction. */
99 const int msg; /* cost of an MSG instruction. */
100 const int msgf; /* cost of an MSGF instruction. */
101 const int msgfr; /* cost of an MSGFR instruction. */
102 const int msgr; /* cost of an MSGR instruction. */
103 const int msr; /* cost of an MSR instruction. */
104 const int mult_df; /* cost of multiplication in DFmode. */
105 const int mxbr;
106 /* square root */
107 const int sqxbr; /* cost of square root in TFmode. */
108 const int sqdbr; /* cost of square root in DFmode. */
109 const int sqebr; /* cost of square root in SFmode. */
110 /* multiply and add */
111 const int madbr; /* cost of multiply and add in DFmode. */
112 const int maebr; /* cost of multiply and add in SFmode. */
113 /* division */
114 const int dxbr;
115 const int ddbr;
116 const int debr;
117 const int dlgr;
118 const int dlr;
119 const int dr;
120 const int dsgfr;
121 const int dsgr;
124 #define s390_cost ((const struct processor_costs *)(s390_cost_pointer))
126 static const
127 struct processor_costs z900_cost =
129 COSTS_N_INSNS (5), /* M */
130 COSTS_N_INSNS (10), /* MGHI */
131 COSTS_N_INSNS (5), /* MH */
132 COSTS_N_INSNS (4), /* MHI */
133 COSTS_N_INSNS (5), /* ML */
134 COSTS_N_INSNS (5), /* MR */
135 COSTS_N_INSNS (4), /* MS */
136 COSTS_N_INSNS (15), /* MSG */
137 COSTS_N_INSNS (7), /* MSGF */
138 COSTS_N_INSNS (7), /* MSGFR */
139 COSTS_N_INSNS (10), /* MSGR */
140 COSTS_N_INSNS (4), /* MSR */
141 COSTS_N_INSNS (7), /* multiplication in DFmode */
142 COSTS_N_INSNS (13), /* MXBR */
143 COSTS_N_INSNS (136), /* SQXBR */
144 COSTS_N_INSNS (44), /* SQDBR */
145 COSTS_N_INSNS (35), /* SQEBR */
146 COSTS_N_INSNS (18), /* MADBR */
147 COSTS_N_INSNS (13), /* MAEBR */
148 COSTS_N_INSNS (134), /* DXBR */
149 COSTS_N_INSNS (30), /* DDBR */
150 COSTS_N_INSNS (27), /* DEBR */
151 COSTS_N_INSNS (220), /* DLGR */
152 COSTS_N_INSNS (34), /* DLR */
153 COSTS_N_INSNS (34), /* DR */
154 COSTS_N_INSNS (32), /* DSGFR */
155 COSTS_N_INSNS (32), /* DSGR */
158 static const
159 struct processor_costs z990_cost =
161 COSTS_N_INSNS (4), /* M */
162 COSTS_N_INSNS (2), /* MGHI */
163 COSTS_N_INSNS (2), /* MH */
164 COSTS_N_INSNS (2), /* MHI */
165 COSTS_N_INSNS (4), /* ML */
166 COSTS_N_INSNS (4), /* MR */
167 COSTS_N_INSNS (5), /* MS */
168 COSTS_N_INSNS (6), /* MSG */
169 COSTS_N_INSNS (4), /* MSGF */
170 COSTS_N_INSNS (4), /* MSGFR */
171 COSTS_N_INSNS (4), /* MSGR */
172 COSTS_N_INSNS (4), /* MSR */
173 COSTS_N_INSNS (1), /* multiplication in DFmode */
174 COSTS_N_INSNS (28), /* MXBR */
175 COSTS_N_INSNS (130), /* SQXBR */
176 COSTS_N_INSNS (66), /* SQDBR */
177 COSTS_N_INSNS (38), /* SQEBR */
178 COSTS_N_INSNS (1), /* MADBR */
179 COSTS_N_INSNS (1), /* MAEBR */
180 COSTS_N_INSNS (60), /* DXBR */
181 COSTS_N_INSNS (40), /* DDBR */
182 COSTS_N_INSNS (26), /* DEBR */
183 COSTS_N_INSNS (176), /* DLGR */
184 COSTS_N_INSNS (31), /* DLR */
185 COSTS_N_INSNS (31), /* DR */
186 COSTS_N_INSNS (31), /* DSGFR */
187 COSTS_N_INSNS (31), /* DSGR */
190 static const
191 struct processor_costs z9_109_cost =
193 COSTS_N_INSNS (4), /* M */
194 COSTS_N_INSNS (2), /* MGHI */
195 COSTS_N_INSNS (2), /* MH */
196 COSTS_N_INSNS (2), /* MHI */
197 COSTS_N_INSNS (4), /* ML */
198 COSTS_N_INSNS (4), /* MR */
199 COSTS_N_INSNS (5), /* MS */
200 COSTS_N_INSNS (6), /* MSG */
201 COSTS_N_INSNS (4), /* MSGF */
202 COSTS_N_INSNS (4), /* MSGFR */
203 COSTS_N_INSNS (4), /* MSGR */
204 COSTS_N_INSNS (4), /* MSR */
205 COSTS_N_INSNS (1), /* multiplication in DFmode */
206 COSTS_N_INSNS (28), /* MXBR */
207 COSTS_N_INSNS (130), /* SQXBR */
208 COSTS_N_INSNS (66), /* SQDBR */
209 COSTS_N_INSNS (38), /* SQEBR */
210 COSTS_N_INSNS (1), /* MADBR */
211 COSTS_N_INSNS (1), /* MAEBR */
212 COSTS_N_INSNS (60), /* DXBR */
213 COSTS_N_INSNS (40), /* DDBR */
214 COSTS_N_INSNS (26), /* DEBR */
215 COSTS_N_INSNS (30), /* DLGR */
216 COSTS_N_INSNS (23), /* DLR */
217 COSTS_N_INSNS (23), /* DR */
218 COSTS_N_INSNS (24), /* DSGFR */
219 COSTS_N_INSNS (24), /* DSGR */
222 static const
223 struct processor_costs z10_cost =
225 COSTS_N_INSNS (10), /* M */
226 COSTS_N_INSNS (10), /* MGHI */
227 COSTS_N_INSNS (10), /* MH */
228 COSTS_N_INSNS (10), /* MHI */
229 COSTS_N_INSNS (10), /* ML */
230 COSTS_N_INSNS (10), /* MR */
231 COSTS_N_INSNS (10), /* MS */
232 COSTS_N_INSNS (10), /* MSG */
233 COSTS_N_INSNS (10), /* MSGF */
234 COSTS_N_INSNS (10), /* MSGFR */
235 COSTS_N_INSNS (10), /* MSGR */
236 COSTS_N_INSNS (10), /* MSR */
237 COSTS_N_INSNS (1) , /* multiplication in DFmode */
238 COSTS_N_INSNS (50), /* MXBR */
239 COSTS_N_INSNS (120), /* SQXBR */
240 COSTS_N_INSNS (52), /* SQDBR */
241 COSTS_N_INSNS (38), /* SQEBR */
242 COSTS_N_INSNS (1), /* MADBR */
243 COSTS_N_INSNS (1), /* MAEBR */
244 COSTS_N_INSNS (111), /* DXBR */
245 COSTS_N_INSNS (39), /* DDBR */
246 COSTS_N_INSNS (32), /* DEBR */
247 COSTS_N_INSNS (160), /* DLGR */
248 COSTS_N_INSNS (71), /* DLR */
249 COSTS_N_INSNS (71), /* DR */
250 COSTS_N_INSNS (71), /* DSGFR */
251 COSTS_N_INSNS (71), /* DSGR */
254 static const
255 struct processor_costs z196_cost =
257 COSTS_N_INSNS (7), /* M */
258 COSTS_N_INSNS (5), /* MGHI */
259 COSTS_N_INSNS (5), /* MH */
260 COSTS_N_INSNS (5), /* MHI */
261 COSTS_N_INSNS (7), /* ML */
262 COSTS_N_INSNS (7), /* MR */
263 COSTS_N_INSNS (6), /* MS */
264 COSTS_N_INSNS (8), /* MSG */
265 COSTS_N_INSNS (6), /* MSGF */
266 COSTS_N_INSNS (6), /* MSGFR */
267 COSTS_N_INSNS (8), /* MSGR */
268 COSTS_N_INSNS (6), /* MSR */
269 COSTS_N_INSNS (1) , /* multiplication in DFmode */
270 COSTS_N_INSNS (40), /* MXBR B+40 */
271 COSTS_N_INSNS (100), /* SQXBR B+100 */
272 COSTS_N_INSNS (42), /* SQDBR B+42 */
273 COSTS_N_INSNS (28), /* SQEBR B+28 */
274 COSTS_N_INSNS (1), /* MADBR B */
275 COSTS_N_INSNS (1), /* MAEBR B */
276 COSTS_N_INSNS (101), /* DXBR B+101 */
277 COSTS_N_INSNS (29), /* DDBR */
278 COSTS_N_INSNS (22), /* DEBR */
279 COSTS_N_INSNS (160), /* DLGR cracked */
280 COSTS_N_INSNS (160), /* DLR cracked */
281 COSTS_N_INSNS (160), /* DR expanded */
282 COSTS_N_INSNS (160), /* DSGFR cracked */
283 COSTS_N_INSNS (160), /* DSGR cracked */
286 static const
287 struct processor_costs zEC12_cost =
289 COSTS_N_INSNS (7), /* M */
290 COSTS_N_INSNS (5), /* MGHI */
291 COSTS_N_INSNS (5), /* MH */
292 COSTS_N_INSNS (5), /* MHI */
293 COSTS_N_INSNS (7), /* ML */
294 COSTS_N_INSNS (7), /* MR */
295 COSTS_N_INSNS (6), /* MS */
296 COSTS_N_INSNS (8), /* MSG */
297 COSTS_N_INSNS (6), /* MSGF */
298 COSTS_N_INSNS (6), /* MSGFR */
299 COSTS_N_INSNS (8), /* MSGR */
300 COSTS_N_INSNS (6), /* MSR */
301 COSTS_N_INSNS (1) , /* multiplication in DFmode */
302 COSTS_N_INSNS (40), /* MXBR B+40 */
303 COSTS_N_INSNS (100), /* SQXBR B+100 */
304 COSTS_N_INSNS (42), /* SQDBR B+42 */
305 COSTS_N_INSNS (28), /* SQEBR B+28 */
306 COSTS_N_INSNS (1), /* MADBR B */
307 COSTS_N_INSNS (1), /* MAEBR B */
308 COSTS_N_INSNS (131), /* DXBR B+131 */
309 COSTS_N_INSNS (29), /* DDBR */
310 COSTS_N_INSNS (22), /* DEBR */
311 COSTS_N_INSNS (160), /* DLGR cracked */
312 COSTS_N_INSNS (160), /* DLR cracked */
313 COSTS_N_INSNS (160), /* DR expanded */
314 COSTS_N_INSNS (160), /* DSGFR cracked */
315 COSTS_N_INSNS (160), /* DSGR cracked */
318 static struct
320 const char *const name;
321 const enum processor_type processor;
322 const struct processor_costs *cost;
324 const processor_table[] =
326 { "g5", PROCESSOR_9672_G5, &z900_cost },
327 { "g6", PROCESSOR_9672_G6, &z900_cost },
328 { "z900", PROCESSOR_2064_Z900, &z900_cost },
329 { "z990", PROCESSOR_2084_Z990, &z990_cost },
330 { "z9-109", PROCESSOR_2094_Z9_109, &z9_109_cost },
331 { "z9-ec", PROCESSOR_2094_Z9_EC, &z9_109_cost },
332 { "z10", PROCESSOR_2097_Z10, &z10_cost },
333 { "z196", PROCESSOR_2817_Z196, &z196_cost },
334 { "zEC12", PROCESSOR_2827_ZEC12, &zEC12_cost },
335 { "z13", PROCESSOR_2964_Z13, &zEC12_cost },
336 { "native", PROCESSOR_NATIVE, NULL }
339 extern int reload_completed;
341 /* Kept up to date using the SCHED_VARIABLE_ISSUE hook. */
342 static rtx_insn *last_scheduled_insn;
343 #define MAX_SCHED_UNITS 3
344 static int last_scheduled_unit_distance[MAX_SCHED_UNITS];
346 /* The maximum score added for an instruction whose unit hasn't been
347 in use for MAX_SCHED_MIX_DISTANCE steps. Increase this value to
348 give instruction mix scheduling more priority over instruction
349 grouping. */
350 #define MAX_SCHED_MIX_SCORE 8
352 /* The maximum distance up to which individual scores will be
353 calculated. Everything beyond this gives MAX_SCHED_MIX_SCORE.
354 Increase this with the OOO windows size of the machine. */
355 #define MAX_SCHED_MIX_DISTANCE 100
357 /* Structure used to hold the components of a S/390 memory
358 address. A legitimate address on S/390 is of the general
359 form
360 base + index + displacement
361 where any of the components is optional.
363 base and index are registers of the class ADDR_REGS,
364 displacement is an unsigned 12-bit immediate constant. */
366 struct s390_address
368 rtx base;
369 rtx indx;
370 rtx disp;
371 bool pointer;
372 bool literal_pool;
375 /* The following structure is embedded in the machine
376 specific part of struct function. */
378 struct GTY (()) s390_frame_layout
380 /* Offset within stack frame. */
381 HOST_WIDE_INT gprs_offset;
382 HOST_WIDE_INT f0_offset;
383 HOST_WIDE_INT f4_offset;
384 HOST_WIDE_INT f8_offset;
385 HOST_WIDE_INT backchain_offset;
387 /* Number of first and last gpr where slots in the register
388 save area are reserved for. */
389 int first_save_gpr_slot;
390 int last_save_gpr_slot;
392 /* Location (FP register number) where GPRs (r0-r15) should
393 be saved to.
394 0 - does not need to be saved at all
395 -1 - stack slot */
396 #define SAVE_SLOT_NONE 0
397 #define SAVE_SLOT_STACK -1
398 signed char gpr_save_slots[16];
400 /* Number of first and last gpr to be saved, restored. */
401 int first_save_gpr;
402 int first_restore_gpr;
403 int last_save_gpr;
404 int last_restore_gpr;
406 /* Bits standing for floating point registers. Set, if the
407 respective register has to be saved. Starting with reg 16 (f0)
408 at the rightmost bit.
409 Bit 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
410 fpr 15 13 11 9 14 12 10 8 7 5 3 1 6 4 2 0
411 reg 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 */
412 unsigned int fpr_bitmap;
414 /* Number of floating point registers f8-f15 which must be saved. */
415 int high_fprs;
417 /* Set if return address needs to be saved.
418 This flag is set by s390_return_addr_rtx if it could not use
419 the initial value of r14 and therefore depends on r14 saved
420 to the stack. */
421 bool save_return_addr_p;
423 /* Size of stack frame. */
424 HOST_WIDE_INT frame_size;
427 /* Define the structure for the machine field in struct function. */
429 struct GTY(()) machine_function
431 struct s390_frame_layout frame_layout;
433 /* Literal pool base register. */
434 rtx base_reg;
436 /* True if we may need to perform branch splitting. */
437 bool split_branches_pending_p;
439 bool has_landing_pad_p;
441 /* True if the current function may contain a tbegin clobbering
442 FPRs. */
443 bool tbegin_p;
445 /* For -fsplit-stack support: A stack local which holds a pointer to
446 the stack arguments for a function with a variable number of
447 arguments. This is set at the start of the function and is used
448 to initialize the overflow_arg_area field of the va_list
449 structure. */
450 rtx split_stack_varargs_pointer;
453 /* Few accessor macros for struct cfun->machine->s390_frame_layout. */
455 #define cfun_frame_layout (cfun->machine->frame_layout)
456 #define cfun_save_high_fprs_p (!!cfun_frame_layout.high_fprs)
457 #define cfun_save_arg_fprs_p (!!(TARGET_64BIT \
458 ? cfun_frame_layout.fpr_bitmap & 0x0f \
459 : cfun_frame_layout.fpr_bitmap & 0x03))
460 #define cfun_gprs_save_area_size ((cfun_frame_layout.last_save_gpr_slot - \
461 cfun_frame_layout.first_save_gpr_slot + 1) * UNITS_PER_LONG)
462 #define cfun_set_fpr_save(REGNO) (cfun->machine->frame_layout.fpr_bitmap |= \
463 (1 << (REGNO - FPR0_REGNUM)))
464 #define cfun_fpr_save_p(REGNO) (!!(cfun->machine->frame_layout.fpr_bitmap & \
465 (1 << (REGNO - FPR0_REGNUM))))
466 #define cfun_gpr_save_slot(REGNO) \
467 cfun->machine->frame_layout.gpr_save_slots[REGNO]
469 /* Number of GPRs and FPRs used for argument passing. */
470 #define GP_ARG_NUM_REG 5
471 #define FP_ARG_NUM_REG (TARGET_64BIT? 4 : 2)
472 #define VEC_ARG_NUM_REG 8
474 /* A couple of shortcuts. */
475 #define CONST_OK_FOR_J(x) \
476 CONST_OK_FOR_CONSTRAINT_P((x), 'J', "J")
477 #define CONST_OK_FOR_K(x) \
478 CONST_OK_FOR_CONSTRAINT_P((x), 'K', "K")
479 #define CONST_OK_FOR_Os(x) \
480 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Os")
481 #define CONST_OK_FOR_Op(x) \
482 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Op")
483 #define CONST_OK_FOR_On(x) \
484 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "On")
486 #define REGNO_PAIR_OK(REGNO, MODE) \
487 (HARD_REGNO_NREGS ((REGNO), (MODE)) == 1 || !((REGNO) & 1))
489 /* That's the read ahead of the dynamic branch prediction unit in
490 bytes on a z10 (or higher) CPU. */
491 #define PREDICT_DISTANCE (TARGET_Z10 ? 384 : 2048)
494 /* Indicate which ABI has been used for passing vector args.
495 0 - no vector type arguments have been passed where the ABI is relevant
496 1 - the old ABI has been used
497 2 - a vector type argument has been passed either in a vector register
498 or on the stack by value */
499 static int s390_vector_abi = 0;
501 /* Set the vector ABI marker if TYPE is subject to the vector ABI
502 switch. The vector ABI affects only vector data types. There are
503 two aspects of the vector ABI relevant here:
505 1. vectors >= 16 bytes have an alignment of 8 bytes with the new
506 ABI and natural alignment with the old.
508 2. vector <= 16 bytes are passed in VRs or by value on the stack
509 with the new ABI but by reference on the stack with the old.
511 If ARG_P is true TYPE is used for a function argument or return
512 value. The ABI marker then is set for all vector data types. If
513 ARG_P is false only type 1 vectors are being checked. */
515 static void
516 s390_check_type_for_vector_abi (const_tree type, bool arg_p, bool in_struct_p)
518 static hash_set<const_tree> visited_types_hash;
520 if (s390_vector_abi)
521 return;
523 if (type == NULL_TREE || TREE_CODE (type) == ERROR_MARK)
524 return;
526 if (visited_types_hash.contains (type))
527 return;
529 visited_types_hash.add (type);
531 if (VECTOR_TYPE_P (type))
533 int type_size = int_size_in_bytes (type);
535 /* Outside arguments only the alignment is changing and this
536 only happens for vector types >= 16 bytes. */
537 if (!arg_p && type_size < 16)
538 return;
540 /* In arguments vector types > 16 are passed as before (GCC
541 never enforced the bigger alignment for arguments which was
542 required by the old vector ABI). However, it might still be
543 ABI relevant due to the changed alignment if it is a struct
544 member. */
545 if (arg_p && type_size > 16 && !in_struct_p)
546 return;
548 s390_vector_abi = TARGET_VX_ABI ? 2 : 1;
550 else if (POINTER_TYPE_P (type) || TREE_CODE (type) == ARRAY_TYPE)
552 /* ARRAY_TYPE: Since with neither of the ABIs we have more than
553 natural alignment there will never be ABI dependent padding
554 in an array type. That's why we do not set in_struct_p to
555 true here. */
556 s390_check_type_for_vector_abi (TREE_TYPE (type), arg_p, in_struct_p);
558 else if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
560 tree arg_chain;
562 /* Check the return type. */
563 s390_check_type_for_vector_abi (TREE_TYPE (type), true, false);
565 for (arg_chain = TYPE_ARG_TYPES (type);
566 arg_chain;
567 arg_chain = TREE_CHAIN (arg_chain))
568 s390_check_type_for_vector_abi (TREE_VALUE (arg_chain), true, false);
570 else if (RECORD_OR_UNION_TYPE_P (type))
572 tree field;
574 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
576 if (TREE_CODE (field) != FIELD_DECL)
577 continue;
579 s390_check_type_for_vector_abi (TREE_TYPE (field), arg_p, true);
585 /* System z builtins. */
587 #include "s390-builtins.h"
589 const unsigned int bflags_builtin[S390_BUILTIN_MAX + 1] =
591 #undef B_DEF
592 #undef OB_DEF
593 #undef OB_DEF_VAR
594 #define B_DEF(NAME, PATTERN, ATTRS, BFLAGS, ...) BFLAGS,
595 #define OB_DEF(...)
596 #define OB_DEF_VAR(...)
597 #include "s390-builtins.def"
601 const unsigned int opflags_builtin[S390_BUILTIN_MAX + 1] =
603 #undef B_DEF
604 #undef OB_DEF
605 #undef OB_DEF_VAR
606 #define B_DEF(NAME, PATTERN, ATTRS, BFLAGS, OPFLAGS, ...) OPFLAGS,
607 #define OB_DEF(...)
608 #define OB_DEF_VAR(...)
609 #include "s390-builtins.def"
613 const unsigned int bflags_overloaded_builtin[S390_OVERLOADED_BUILTIN_MAX + 1] =
615 #undef B_DEF
616 #undef OB_DEF
617 #undef OB_DEF_VAR
618 #define B_DEF(...)
619 #define OB_DEF(NAME, FIRST_VAR_NAME, LAST_VAR_NAME, BFLAGS, ...) BFLAGS,
620 #define OB_DEF_VAR(...)
621 #include "s390-builtins.def"
625 const unsigned int
626 opflags_overloaded_builtin_var[S390_OVERLOADED_BUILTIN_VAR_MAX + 1] =
628 #undef B_DEF
629 #undef OB_DEF
630 #undef OB_DEF_VAR
631 #define B_DEF(...)
632 #define OB_DEF(...)
633 #define OB_DEF_VAR(NAME, PATTERN, FLAGS, FNTYPE) FLAGS,
634 #include "s390-builtins.def"
638 tree s390_builtin_types[BT_MAX];
639 tree s390_builtin_fn_types[BT_FN_MAX];
640 tree s390_builtin_decls[S390_BUILTIN_MAX +
641 S390_OVERLOADED_BUILTIN_MAX +
642 S390_OVERLOADED_BUILTIN_VAR_MAX];
644 static enum insn_code const code_for_builtin[S390_BUILTIN_MAX + 1] = {
645 #undef B_DEF
646 #undef OB_DEF
647 #undef OB_DEF_VAR
648 #define B_DEF(NAME, PATTERN, ...) CODE_FOR_##PATTERN,
649 #define OB_DEF(...)
650 #define OB_DEF_VAR(...)
652 #include "s390-builtins.def"
653 CODE_FOR_nothing
656 static void
657 s390_init_builtins (void)
659 /* These definitions are being used in s390-builtins.def. */
660 tree returns_twice_attr = tree_cons (get_identifier ("returns_twice"),
661 NULL, NULL);
662 tree noreturn_attr = tree_cons (get_identifier ("noreturn"), NULL, NULL);
663 tree c_uint64_type_node;
665 /* The uint64_type_node from tree.c is not compatible to the C99
666 uint64_t data type. What we want is c_uint64_type_node from
667 c-common.c. But since backend code is not supposed to interface
668 with the frontend we recreate it here. */
669 if (TARGET_64BIT)
670 c_uint64_type_node = long_unsigned_type_node;
671 else
672 c_uint64_type_node = long_long_unsigned_type_node;
674 #undef DEF_TYPE
675 #define DEF_TYPE(INDEX, BFLAGS, NODE, CONST_P) \
676 if (s390_builtin_types[INDEX] == NULL) \
677 s390_builtin_types[INDEX] = (!CONST_P) ? \
678 (NODE) : build_type_variant ((NODE), 1, 0);
680 #undef DEF_POINTER_TYPE
681 #define DEF_POINTER_TYPE(INDEX, BFLAGS, INDEX_BASE) \
682 if (s390_builtin_types[INDEX] == NULL) \
683 s390_builtin_types[INDEX] = \
684 build_pointer_type (s390_builtin_types[INDEX_BASE]);
686 #undef DEF_DISTINCT_TYPE
687 #define DEF_DISTINCT_TYPE(INDEX, BFLAGS, INDEX_BASE) \
688 if (s390_builtin_types[INDEX] == NULL) \
689 s390_builtin_types[INDEX] = \
690 build_distinct_type_copy (s390_builtin_types[INDEX_BASE]);
692 #undef DEF_VECTOR_TYPE
693 #define DEF_VECTOR_TYPE(INDEX, BFLAGS, INDEX_BASE, ELEMENTS) \
694 if (s390_builtin_types[INDEX] == NULL) \
695 s390_builtin_types[INDEX] = \
696 build_vector_type (s390_builtin_types[INDEX_BASE], ELEMENTS);
698 #undef DEF_OPAQUE_VECTOR_TYPE
699 #define DEF_OPAQUE_VECTOR_TYPE(INDEX, BFLAGS, INDEX_BASE, ELEMENTS) \
700 if (s390_builtin_types[INDEX] == NULL) \
701 s390_builtin_types[INDEX] = \
702 build_opaque_vector_type (s390_builtin_types[INDEX_BASE], ELEMENTS);
704 #undef DEF_FN_TYPE
705 #define DEF_FN_TYPE(INDEX, BFLAGS, args...) \
706 if (s390_builtin_fn_types[INDEX] == NULL) \
707 s390_builtin_fn_types[INDEX] = \
708 build_function_type_list (args, NULL_TREE);
709 #undef DEF_OV_TYPE
710 #define DEF_OV_TYPE(...)
711 #include "s390-builtin-types.def"
713 #undef B_DEF
714 #define B_DEF(NAME, PATTERN, ATTRS, BFLAGS, OPFLAGS, FNTYPE) \
715 if (s390_builtin_decls[S390_BUILTIN_##NAME] == NULL) \
716 s390_builtin_decls[S390_BUILTIN_##NAME] = \
717 add_builtin_function ("__builtin_" #NAME, \
718 s390_builtin_fn_types[FNTYPE], \
719 S390_BUILTIN_##NAME, \
720 BUILT_IN_MD, \
721 NULL, \
722 ATTRS);
723 #undef OB_DEF
724 #define OB_DEF(NAME, FIRST_VAR_NAME, LAST_VAR_NAME, BFLAGS, FNTYPE) \
725 if (s390_builtin_decls[S390_OVERLOADED_BUILTIN_##NAME + S390_BUILTIN_MAX] \
726 == NULL) \
727 s390_builtin_decls[S390_OVERLOADED_BUILTIN_##NAME + S390_BUILTIN_MAX] = \
728 add_builtin_function ("__builtin_" #NAME, \
729 s390_builtin_fn_types[FNTYPE], \
730 S390_OVERLOADED_BUILTIN_##NAME + S390_BUILTIN_MAX, \
731 BUILT_IN_MD, \
732 NULL, \
734 #undef OB_DEF_VAR
735 #define OB_DEF_VAR(...)
736 #include "s390-builtins.def"
740 /* Return true if ARG is appropriate as argument number ARGNUM of
741 builtin DECL. The operand flags from s390-builtins.def have to
742 passed as OP_FLAGS. */
743 bool
744 s390_const_operand_ok (tree arg, int argnum, int op_flags, tree decl)
746 if (O_UIMM_P (op_flags))
748 int bitwidths[] = { 1, 2, 3, 4, 5, 8, 12, 16, 32 };
749 int bitwidth = bitwidths[op_flags - O_U1];
751 if (!tree_fits_uhwi_p (arg)
752 || tree_to_uhwi (arg) > ((unsigned HOST_WIDE_INT)1 << bitwidth) - 1)
754 error("constant argument %d for builtin %qF is out of range (0.."
755 HOST_WIDE_INT_PRINT_UNSIGNED ")",
756 argnum, decl,
757 ((unsigned HOST_WIDE_INT)1 << bitwidth) - 1);
758 return false;
762 if (O_SIMM_P (op_flags))
764 int bitwidths[] = { 2, 3, 4, 5, 8, 12, 16, 32 };
765 int bitwidth = bitwidths[op_flags - O_S2];
767 if (!tree_fits_shwi_p (arg)
768 || tree_to_shwi (arg) < -((HOST_WIDE_INT)1 << (bitwidth - 1))
769 || tree_to_shwi (arg) > (((HOST_WIDE_INT)1 << (bitwidth - 1)) - 1))
771 error("constant argument %d for builtin %qF is out of range ("
772 HOST_WIDE_INT_PRINT_DEC ".."
773 HOST_WIDE_INT_PRINT_DEC ")",
774 argnum, decl,
775 -((HOST_WIDE_INT)1 << (bitwidth - 1)),
776 ((HOST_WIDE_INT)1 << (bitwidth - 1)) - 1);
777 return false;
780 return true;
783 /* Expand an expression EXP that calls a built-in function,
784 with result going to TARGET if that's convenient
785 (and in mode MODE if that's convenient).
786 SUBTARGET may be used as the target for computing one of EXP's operands.
787 IGNORE is nonzero if the value is to be ignored. */
789 static rtx
790 s390_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
791 machine_mode mode ATTRIBUTE_UNUSED,
792 int ignore ATTRIBUTE_UNUSED)
794 #define MAX_ARGS 6
796 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
797 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
798 enum insn_code icode;
799 rtx op[MAX_ARGS], pat;
800 int arity;
801 bool nonvoid;
802 tree arg;
803 call_expr_arg_iterator iter;
804 unsigned int all_op_flags = opflags_for_builtin (fcode);
805 machine_mode last_vec_mode = VOIDmode;
807 if (TARGET_DEBUG_ARG)
809 fprintf (stderr,
810 "s390_expand_builtin, code = %4d, %s, bflags = 0x%x\n",
811 (int)fcode, IDENTIFIER_POINTER (DECL_NAME (fndecl)),
812 bflags_for_builtin (fcode));
815 if (S390_USE_TARGET_ATTRIBUTE)
817 unsigned int bflags;
819 bflags = bflags_for_builtin (fcode);
820 if ((bflags & B_HTM) && !TARGET_HTM)
822 error ("Builtin %qF is not supported without -mhtm "
823 "(default with -march=zEC12 and higher).", fndecl);
824 return const0_rtx;
826 if ((bflags & B_VX) && !TARGET_VX)
828 error ("Builtin %qF is not supported without -mvx "
829 "(default with -march=z13 and higher).", fndecl);
830 return const0_rtx;
833 if (fcode >= S390_OVERLOADED_BUILTIN_VAR_OFFSET
834 && fcode < S390_ALL_BUILTIN_MAX)
836 gcc_unreachable ();
838 else if (fcode < S390_OVERLOADED_BUILTIN_OFFSET)
840 icode = code_for_builtin[fcode];
841 /* Set a flag in the machine specific cfun part in order to support
842 saving/restoring of FPRs. */
843 if (fcode == S390_BUILTIN_tbegin || fcode == S390_BUILTIN_tbegin_retry)
844 cfun->machine->tbegin_p = true;
846 else if (fcode < S390_OVERLOADED_BUILTIN_VAR_OFFSET)
848 error ("Unresolved overloaded builtin");
849 return const0_rtx;
851 else
852 internal_error ("bad builtin fcode");
854 if (icode == 0)
855 internal_error ("bad builtin icode");
857 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
859 if (nonvoid)
861 machine_mode tmode = insn_data[icode].operand[0].mode;
862 if (!target
863 || GET_MODE (target) != tmode
864 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
865 target = gen_reg_rtx (tmode);
867 /* There are builtins (e.g. vec_promote) with no vector
868 arguments but an element selector. So we have to also look
869 at the vector return type when emitting the modulo
870 operation. */
871 if (VECTOR_MODE_P (insn_data[icode].operand[0].mode))
872 last_vec_mode = insn_data[icode].operand[0].mode;
875 arity = 0;
876 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
878 rtx tmp_rtx;
879 const struct insn_operand_data *insn_op;
880 unsigned int op_flags = all_op_flags & ((1 << O_SHIFT) - 1);
882 all_op_flags = all_op_flags >> O_SHIFT;
884 if (arg == error_mark_node)
885 return NULL_RTX;
886 if (arity >= MAX_ARGS)
887 return NULL_RTX;
889 if (O_IMM_P (op_flags)
890 && TREE_CODE (arg) != INTEGER_CST)
892 error ("constant value required for builtin %qF argument %d",
893 fndecl, arity + 1);
894 return const0_rtx;
897 if (!s390_const_operand_ok (arg, arity + 1, op_flags, fndecl))
898 return const0_rtx;
900 insn_op = &insn_data[icode].operand[arity + nonvoid];
901 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
903 /* expand_expr truncates constants to the target mode only if it
904 is "convenient". However, our checks below rely on this
905 being done. */
906 if (CONST_INT_P (op[arity])
907 && SCALAR_INT_MODE_P (insn_op->mode)
908 && GET_MODE (op[arity]) != insn_op->mode)
909 op[arity] = GEN_INT (trunc_int_for_mode (INTVAL (op[arity]),
910 insn_op->mode));
912 /* Wrap the expanded RTX for pointer types into a MEM expr with
913 the proper mode. This allows us to use e.g. (match_operand
914 "memory_operand"..) in the insn patterns instead of (mem
915 (match_operand "address_operand)). This is helpful for
916 patterns not just accepting MEMs. */
917 if (POINTER_TYPE_P (TREE_TYPE (arg))
918 && insn_op->predicate != address_operand)
919 op[arity] = gen_rtx_MEM (insn_op->mode, op[arity]);
921 /* Expand the module operation required on element selectors. */
922 if (op_flags == O_ELEM)
924 gcc_assert (last_vec_mode != VOIDmode);
925 op[arity] = simplify_expand_binop (SImode, code_to_optab (AND),
926 op[arity],
927 GEN_INT (GET_MODE_NUNITS (last_vec_mode) - 1),
928 NULL_RTX, 1, OPTAB_DIRECT);
931 /* Record the vector mode used for an element selector. This assumes:
932 1. There is no builtin with two different vector modes and an element selector
933 2. The element selector comes after the vector type it is referring to.
934 This currently the true for all the builtins but FIXME we
935 should better check for that. */
936 if (VECTOR_MODE_P (insn_op->mode))
937 last_vec_mode = insn_op->mode;
939 if (insn_op->predicate (op[arity], insn_op->mode))
941 arity++;
942 continue;
945 if (MEM_P (op[arity])
946 && insn_op->predicate == memory_operand
947 && (GET_MODE (XEXP (op[arity], 0)) == Pmode
948 || GET_MODE (XEXP (op[arity], 0)) == VOIDmode))
950 op[arity] = replace_equiv_address (op[arity],
951 copy_to_mode_reg (Pmode,
952 XEXP (op[arity], 0)));
954 /* Some of the builtins require different modes/types than the
955 pattern in order to implement a specific API. Instead of
956 adding many expanders which do the mode change we do it here.
957 E.g. s390_vec_add_u128 required to have vector unsigned char
958 arguments is mapped to addti3. */
959 else if (insn_op->mode != VOIDmode
960 && GET_MODE (op[arity]) != VOIDmode
961 && GET_MODE (op[arity]) != insn_op->mode
962 && ((tmp_rtx = simplify_gen_subreg (insn_op->mode, op[arity],
963 GET_MODE (op[arity]), 0))
964 != NULL_RTX))
966 op[arity] = tmp_rtx;
968 else if (GET_MODE (op[arity]) == insn_op->mode
969 || GET_MODE (op[arity]) == VOIDmode
970 || (insn_op->predicate == address_operand
971 && GET_MODE (op[arity]) == Pmode))
973 /* An address_operand usually has VOIDmode in the expander
974 so we cannot use this. */
975 machine_mode target_mode =
976 (insn_op->predicate == address_operand
977 ? Pmode : insn_op->mode);
978 op[arity] = copy_to_mode_reg (target_mode, op[arity]);
981 if (!insn_op->predicate (op[arity], insn_op->mode))
983 error ("Invalid argument %d for builtin %qF", arity + 1, fndecl);
984 return const0_rtx;
986 arity++;
989 switch (arity)
991 case 0:
992 pat = GEN_FCN (icode) (target);
993 break;
994 case 1:
995 if (nonvoid)
996 pat = GEN_FCN (icode) (target, op[0]);
997 else
998 pat = GEN_FCN (icode) (op[0]);
999 break;
1000 case 2:
1001 if (nonvoid)
1002 pat = GEN_FCN (icode) (target, op[0], op[1]);
1003 else
1004 pat = GEN_FCN (icode) (op[0], op[1]);
1005 break;
1006 case 3:
1007 if (nonvoid)
1008 pat = GEN_FCN (icode) (target, op[0], op[1], op[2]);
1009 else
1010 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
1011 break;
1012 case 4:
1013 if (nonvoid)
1014 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3]);
1015 else
1016 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
1017 break;
1018 case 5:
1019 if (nonvoid)
1020 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3], op[4]);
1021 else
1022 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4]);
1023 break;
1024 case 6:
1025 if (nonvoid)
1026 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3], op[4], op[5]);
1027 else
1028 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4], op[5]);
1029 break;
1030 default:
1031 gcc_unreachable ();
1033 if (!pat)
1034 return NULL_RTX;
1035 emit_insn (pat);
1037 if (nonvoid)
1038 return target;
1039 else
1040 return const0_rtx;
1044 static const int s390_hotpatch_hw_max = 1000000;
1045 static int s390_hotpatch_hw_before_label = 0;
1046 static int s390_hotpatch_hw_after_label = 0;
1048 /* Check whether the hotpatch attribute is applied to a function and, if it has
1049 an argument, the argument is valid. */
1051 static tree
1052 s390_handle_hotpatch_attribute (tree *node, tree name, tree args,
1053 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
1055 tree expr;
1056 tree expr2;
1057 int err;
1059 if (TREE_CODE (*node) != FUNCTION_DECL)
1061 warning (OPT_Wattributes, "%qE attribute only applies to functions",
1062 name);
1063 *no_add_attrs = true;
1065 if (args != NULL && TREE_CHAIN (args) != NULL)
1067 expr = TREE_VALUE (args);
1068 expr2 = TREE_VALUE (TREE_CHAIN (args));
1070 if (args == NULL || TREE_CHAIN (args) == NULL)
1071 err = 1;
1072 else if (TREE_CODE (expr) != INTEGER_CST
1073 || !INTEGRAL_TYPE_P (TREE_TYPE (expr))
1074 || wi::gtu_p (expr, s390_hotpatch_hw_max))
1075 err = 1;
1076 else if (TREE_CODE (expr2) != INTEGER_CST
1077 || !INTEGRAL_TYPE_P (TREE_TYPE (expr2))
1078 || wi::gtu_p (expr2, s390_hotpatch_hw_max))
1079 err = 1;
1080 else
1081 err = 0;
1082 if (err)
1084 error ("requested %qE attribute is not a comma separated pair of"
1085 " non-negative integer constants or too large (max. %d)", name,
1086 s390_hotpatch_hw_max);
1087 *no_add_attrs = true;
1090 return NULL_TREE;
1093 /* Expand the s390_vector_bool type attribute. */
1095 static tree
1096 s390_handle_vectorbool_attribute (tree *node, tree name ATTRIBUTE_UNUSED,
1097 tree args ATTRIBUTE_UNUSED,
1098 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
1100 tree type = *node, result = NULL_TREE;
1101 machine_mode mode;
1103 while (POINTER_TYPE_P (type)
1104 || TREE_CODE (type) == FUNCTION_TYPE
1105 || TREE_CODE (type) == METHOD_TYPE
1106 || TREE_CODE (type) == ARRAY_TYPE)
1107 type = TREE_TYPE (type);
1109 mode = TYPE_MODE (type);
1110 switch (mode)
1112 case DImode: case V2DImode: result = s390_builtin_types[BT_BV2DI]; break;
1113 case SImode: case V4SImode: result = s390_builtin_types[BT_BV4SI]; break;
1114 case HImode: case V8HImode: result = s390_builtin_types[BT_BV8HI]; break;
1115 case QImode: case V16QImode: result = s390_builtin_types[BT_BV16QI];
1116 default: break;
1119 *no_add_attrs = true; /* No need to hang on to the attribute. */
1121 if (result)
1122 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
1124 return NULL_TREE;
1127 static const struct attribute_spec s390_attribute_table[] = {
1128 { "hotpatch", 2, 2, true, false, false, s390_handle_hotpatch_attribute, false },
1129 { "s390_vector_bool", 0, 0, false, true, false, s390_handle_vectorbool_attribute, true },
1130 /* End element. */
1131 { NULL, 0, 0, false, false, false, NULL, false }
1134 /* Return the alignment for LABEL. We default to the -falign-labels
1135 value except for the literal pool base label. */
1137 s390_label_align (rtx_insn *label)
1139 rtx_insn *prev_insn = prev_active_insn (label);
1140 rtx set, src;
1142 if (prev_insn == NULL_RTX)
1143 goto old;
1145 set = single_set (prev_insn);
1147 if (set == NULL_RTX)
1148 goto old;
1150 src = SET_SRC (set);
1152 /* Don't align literal pool base labels. */
1153 if (GET_CODE (src) == UNSPEC
1154 && XINT (src, 1) == UNSPEC_MAIN_BASE)
1155 return 0;
1157 old:
1158 return align_labels_log;
1161 static machine_mode
1162 s390_libgcc_cmp_return_mode (void)
1164 return TARGET_64BIT ? DImode : SImode;
1167 static machine_mode
1168 s390_libgcc_shift_count_mode (void)
1170 return TARGET_64BIT ? DImode : SImode;
1173 static machine_mode
1174 s390_unwind_word_mode (void)
1176 return TARGET_64BIT ? DImode : SImode;
1179 /* Return true if the back end supports mode MODE. */
1180 static bool
1181 s390_scalar_mode_supported_p (machine_mode mode)
1183 /* In contrast to the default implementation reject TImode constants on 31bit
1184 TARGET_ZARCH for ABI compliance. */
1185 if (!TARGET_64BIT && TARGET_ZARCH && mode == TImode)
1186 return false;
1188 if (DECIMAL_FLOAT_MODE_P (mode))
1189 return default_decimal_float_supported_p ();
1191 return default_scalar_mode_supported_p (mode);
1194 /* Return true if the back end supports vector mode MODE. */
1195 static bool
1196 s390_vector_mode_supported_p (machine_mode mode)
1198 machine_mode inner;
1200 if (!VECTOR_MODE_P (mode)
1201 || !TARGET_VX
1202 || GET_MODE_SIZE (mode) > 16)
1203 return false;
1205 inner = GET_MODE_INNER (mode);
1207 switch (inner)
1209 case QImode:
1210 case HImode:
1211 case SImode:
1212 case DImode:
1213 case TImode:
1214 case SFmode:
1215 case DFmode:
1216 case TFmode:
1217 return true;
1218 default:
1219 return false;
1223 /* Set the has_landing_pad_p flag in struct machine_function to VALUE. */
1225 void
1226 s390_set_has_landing_pad_p (bool value)
1228 cfun->machine->has_landing_pad_p = value;
1231 /* If two condition code modes are compatible, return a condition code
1232 mode which is compatible with both. Otherwise, return
1233 VOIDmode. */
1235 static machine_mode
1236 s390_cc_modes_compatible (machine_mode m1, machine_mode m2)
1238 if (m1 == m2)
1239 return m1;
1241 switch (m1)
1243 case CCZmode:
1244 if (m2 == CCUmode || m2 == CCTmode || m2 == CCZ1mode
1245 || m2 == CCSmode || m2 == CCSRmode || m2 == CCURmode)
1246 return m2;
1247 return VOIDmode;
1249 case CCSmode:
1250 case CCUmode:
1251 case CCTmode:
1252 case CCSRmode:
1253 case CCURmode:
1254 case CCZ1mode:
1255 if (m2 == CCZmode)
1256 return m1;
1258 return VOIDmode;
1260 default:
1261 return VOIDmode;
1263 return VOIDmode;
1266 /* Return true if SET either doesn't set the CC register, or else
1267 the source and destination have matching CC modes and that
1268 CC mode is at least as constrained as REQ_MODE. */
1270 static bool
1271 s390_match_ccmode_set (rtx set, machine_mode req_mode)
1273 machine_mode set_mode;
1275 gcc_assert (GET_CODE (set) == SET);
1277 if (GET_CODE (SET_DEST (set)) != REG || !CC_REGNO_P (REGNO (SET_DEST (set))))
1278 return 1;
1280 set_mode = GET_MODE (SET_DEST (set));
1281 switch (set_mode)
1283 case CCSmode:
1284 case CCSRmode:
1285 case CCUmode:
1286 case CCURmode:
1287 case CCLmode:
1288 case CCL1mode:
1289 case CCL2mode:
1290 case CCL3mode:
1291 case CCT1mode:
1292 case CCT2mode:
1293 case CCT3mode:
1294 case CCVEQmode:
1295 case CCVHmode:
1296 case CCVHUmode:
1297 case CCVFHmode:
1298 case CCVFHEmode:
1299 if (req_mode != set_mode)
1300 return 0;
1301 break;
1303 case CCZmode:
1304 if (req_mode != CCSmode && req_mode != CCUmode && req_mode != CCTmode
1305 && req_mode != CCSRmode && req_mode != CCURmode)
1306 return 0;
1307 break;
1309 case CCAPmode:
1310 case CCANmode:
1311 if (req_mode != CCAmode)
1312 return 0;
1313 break;
1315 default:
1316 gcc_unreachable ();
1319 return (GET_MODE (SET_SRC (set)) == set_mode);
1322 /* Return true if every SET in INSN that sets the CC register
1323 has source and destination with matching CC modes and that
1324 CC mode is at least as constrained as REQ_MODE.
1325 If REQ_MODE is VOIDmode, always return false. */
1327 bool
1328 s390_match_ccmode (rtx_insn *insn, machine_mode req_mode)
1330 int i;
1332 /* s390_tm_ccmode returns VOIDmode to indicate failure. */
1333 if (req_mode == VOIDmode)
1334 return false;
1336 if (GET_CODE (PATTERN (insn)) == SET)
1337 return s390_match_ccmode_set (PATTERN (insn), req_mode);
1339 if (GET_CODE (PATTERN (insn)) == PARALLEL)
1340 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1342 rtx set = XVECEXP (PATTERN (insn), 0, i);
1343 if (GET_CODE (set) == SET)
1344 if (!s390_match_ccmode_set (set, req_mode))
1345 return false;
1348 return true;
1351 /* If a test-under-mask instruction can be used to implement
1352 (compare (and ... OP1) OP2), return the CC mode required
1353 to do that. Otherwise, return VOIDmode.
1354 MIXED is true if the instruction can distinguish between
1355 CC1 and CC2 for mixed selected bits (TMxx), it is false
1356 if the instruction cannot (TM). */
1358 machine_mode
1359 s390_tm_ccmode (rtx op1, rtx op2, bool mixed)
1361 int bit0, bit1;
1363 /* ??? Fixme: should work on CONST_WIDE_INT as well. */
1364 if (GET_CODE (op1) != CONST_INT || GET_CODE (op2) != CONST_INT)
1365 return VOIDmode;
1367 /* Selected bits all zero: CC0.
1368 e.g.: int a; if ((a & (16 + 128)) == 0) */
1369 if (INTVAL (op2) == 0)
1370 return CCTmode;
1372 /* Selected bits all one: CC3.
1373 e.g.: int a; if ((a & (16 + 128)) == 16 + 128) */
1374 if (INTVAL (op2) == INTVAL (op1))
1375 return CCT3mode;
1377 /* Exactly two bits selected, mixed zeroes and ones: CC1 or CC2. e.g.:
1378 int a;
1379 if ((a & (16 + 128)) == 16) -> CCT1
1380 if ((a & (16 + 128)) == 128) -> CCT2 */
1381 if (mixed)
1383 bit1 = exact_log2 (INTVAL (op2));
1384 bit0 = exact_log2 (INTVAL (op1) ^ INTVAL (op2));
1385 if (bit0 != -1 && bit1 != -1)
1386 return bit0 > bit1 ? CCT1mode : CCT2mode;
1389 return VOIDmode;
1392 /* Given a comparison code OP (EQ, NE, etc.) and the operands
1393 OP0 and OP1 of a COMPARE, return the mode to be used for the
1394 comparison. */
1396 machine_mode
1397 s390_select_ccmode (enum rtx_code code, rtx op0, rtx op1)
1399 if (TARGET_VX
1400 && register_operand (op0, DFmode)
1401 && register_operand (op1, DFmode))
1403 /* LT, LE, UNGT, UNGE require swapping OP0 and OP1. Either
1404 s390_emit_compare or s390_canonicalize_comparison will take
1405 care of it. */
1406 switch (code)
1408 case EQ:
1409 case NE:
1410 return CCVEQmode;
1411 case GT:
1412 case UNLE:
1413 return CCVFHmode;
1414 case GE:
1415 case UNLT:
1416 return CCVFHEmode;
1417 default:
1422 switch (code)
1424 case EQ:
1425 case NE:
1426 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
1427 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1428 return CCAPmode;
1429 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
1430 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
1431 return CCAPmode;
1432 if ((GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1433 || GET_CODE (op1) == NEG)
1434 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1435 return CCLmode;
1437 if (GET_CODE (op0) == AND)
1439 /* Check whether we can potentially do it via TM. */
1440 machine_mode ccmode;
1441 ccmode = s390_tm_ccmode (XEXP (op0, 1), op1, 1);
1442 if (ccmode != VOIDmode)
1444 /* Relax CCTmode to CCZmode to allow fall-back to AND
1445 if that turns out to be beneficial. */
1446 return ccmode == CCTmode ? CCZmode : ccmode;
1450 if (register_operand (op0, HImode)
1451 && GET_CODE (op1) == CONST_INT
1452 && (INTVAL (op1) == -1 || INTVAL (op1) == 65535))
1453 return CCT3mode;
1454 if (register_operand (op0, QImode)
1455 && GET_CODE (op1) == CONST_INT
1456 && (INTVAL (op1) == -1 || INTVAL (op1) == 255))
1457 return CCT3mode;
1459 return CCZmode;
1461 case LE:
1462 case LT:
1463 case GE:
1464 case GT:
1465 /* The only overflow condition of NEG and ABS happens when
1466 -INT_MAX is used as parameter, which stays negative. So
1467 we have an overflow from a positive value to a negative.
1468 Using CCAP mode the resulting cc can be used for comparisons. */
1469 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
1470 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1471 return CCAPmode;
1473 /* If constants are involved in an add instruction it is possible to use
1474 the resulting cc for comparisons with zero. Knowing the sign of the
1475 constant the overflow behavior gets predictable. e.g.:
1476 int a, b; if ((b = a + c) > 0)
1477 with c as a constant value: c < 0 -> CCAN and c >= 0 -> CCAP */
1478 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
1479 && (CONST_OK_FOR_K (INTVAL (XEXP (op0, 1)))
1480 || (CONST_OK_FOR_CONSTRAINT_P (INTVAL (XEXP (op0, 1)), 'O', "Os")
1481 /* Avoid INT32_MIN on 32 bit. */
1482 && (!TARGET_ZARCH || INTVAL (XEXP (op0, 1)) != -0x7fffffff - 1))))
1484 if (INTVAL (XEXP((op0), 1)) < 0)
1485 return CCANmode;
1486 else
1487 return CCAPmode;
1489 /* Fall through. */
1490 case UNORDERED:
1491 case ORDERED:
1492 case UNEQ:
1493 case UNLE:
1494 case UNLT:
1495 case UNGE:
1496 case UNGT:
1497 case LTGT:
1498 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1499 && GET_CODE (op1) != CONST_INT)
1500 return CCSRmode;
1501 return CCSmode;
1503 case LTU:
1504 case GEU:
1505 if (GET_CODE (op0) == PLUS
1506 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1507 return CCL1mode;
1509 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1510 && GET_CODE (op1) != CONST_INT)
1511 return CCURmode;
1512 return CCUmode;
1514 case LEU:
1515 case GTU:
1516 if (GET_CODE (op0) == MINUS
1517 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1518 return CCL2mode;
1520 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1521 && GET_CODE (op1) != CONST_INT)
1522 return CCURmode;
1523 return CCUmode;
1525 default:
1526 gcc_unreachable ();
1530 /* Replace the comparison OP0 CODE OP1 by a semantically equivalent one
1531 that we can implement more efficiently. */
1533 static void
1534 s390_canonicalize_comparison (int *code, rtx *op0, rtx *op1,
1535 bool op0_preserve_value)
1537 if (op0_preserve_value)
1538 return;
1540 /* Convert ZERO_EXTRACT back to AND to enable TM patterns. */
1541 if ((*code == EQ || *code == NE)
1542 && *op1 == const0_rtx
1543 && GET_CODE (*op0) == ZERO_EXTRACT
1544 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
1545 && GET_CODE (XEXP (*op0, 2)) == CONST_INT
1546 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
1548 rtx inner = XEXP (*op0, 0);
1549 HOST_WIDE_INT modesize = GET_MODE_BITSIZE (GET_MODE (inner));
1550 HOST_WIDE_INT len = INTVAL (XEXP (*op0, 1));
1551 HOST_WIDE_INT pos = INTVAL (XEXP (*op0, 2));
1553 if (len > 0 && len < modesize
1554 && pos >= 0 && pos + len <= modesize
1555 && modesize <= HOST_BITS_PER_WIDE_INT)
1557 unsigned HOST_WIDE_INT block;
1558 block = ((unsigned HOST_WIDE_INT) 1 << len) - 1;
1559 block <<= modesize - pos - len;
1561 *op0 = gen_rtx_AND (GET_MODE (inner), inner,
1562 gen_int_mode (block, GET_MODE (inner)));
1566 /* Narrow AND of memory against immediate to enable TM. */
1567 if ((*code == EQ || *code == NE)
1568 && *op1 == const0_rtx
1569 && GET_CODE (*op0) == AND
1570 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
1571 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
1573 rtx inner = XEXP (*op0, 0);
1574 rtx mask = XEXP (*op0, 1);
1576 /* Ignore paradoxical SUBREGs if all extra bits are masked out. */
1577 if (GET_CODE (inner) == SUBREG
1578 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (inner)))
1579 && (GET_MODE_SIZE (GET_MODE (inner))
1580 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
1581 && ((INTVAL (mask)
1582 & GET_MODE_MASK (GET_MODE (inner))
1583 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (inner))))
1584 == 0))
1585 inner = SUBREG_REG (inner);
1587 /* Do not change volatile MEMs. */
1588 if (MEM_P (inner) && !MEM_VOLATILE_P (inner))
1590 int part = s390_single_part (XEXP (*op0, 1),
1591 GET_MODE (inner), QImode, 0);
1592 if (part >= 0)
1594 mask = gen_int_mode (s390_extract_part (mask, QImode, 0), QImode);
1595 inner = adjust_address_nv (inner, QImode, part);
1596 *op0 = gen_rtx_AND (QImode, inner, mask);
1601 /* Narrow comparisons against 0xffff to HImode if possible. */
1602 if ((*code == EQ || *code == NE)
1603 && GET_CODE (*op1) == CONST_INT
1604 && INTVAL (*op1) == 0xffff
1605 && SCALAR_INT_MODE_P (GET_MODE (*op0))
1606 && (nonzero_bits (*op0, GET_MODE (*op0))
1607 & ~(unsigned HOST_WIDE_INT) 0xffff) == 0)
1609 *op0 = gen_lowpart (HImode, *op0);
1610 *op1 = constm1_rtx;
1613 /* Remove redundant UNSPEC_STRCMPCC_TO_INT conversions if possible. */
1614 if (GET_CODE (*op0) == UNSPEC
1615 && XINT (*op0, 1) == UNSPEC_STRCMPCC_TO_INT
1616 && XVECLEN (*op0, 0) == 1
1617 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCUmode
1618 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
1619 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
1620 && *op1 == const0_rtx)
1622 enum rtx_code new_code = UNKNOWN;
1623 switch (*code)
1625 case EQ: new_code = EQ; break;
1626 case NE: new_code = NE; break;
1627 case LT: new_code = GTU; break;
1628 case GT: new_code = LTU; break;
1629 case LE: new_code = GEU; break;
1630 case GE: new_code = LEU; break;
1631 default: break;
1634 if (new_code != UNKNOWN)
1636 *op0 = XVECEXP (*op0, 0, 0);
1637 *code = new_code;
1641 /* Remove redundant UNSPEC_CC_TO_INT conversions if possible. */
1642 if (GET_CODE (*op0) == UNSPEC
1643 && XINT (*op0, 1) == UNSPEC_CC_TO_INT
1644 && XVECLEN (*op0, 0) == 1
1645 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
1646 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
1647 && CONST_INT_P (*op1))
1649 enum rtx_code new_code = UNKNOWN;
1650 switch (GET_MODE (XVECEXP (*op0, 0, 0)))
1652 case CCZmode:
1653 case CCRAWmode:
1654 switch (*code)
1656 case EQ: new_code = EQ; break;
1657 case NE: new_code = NE; break;
1658 default: break;
1660 break;
1661 default: break;
1664 if (new_code != UNKNOWN)
1666 /* For CCRAWmode put the required cc mask into the second
1667 operand. */
1668 if (GET_MODE (XVECEXP (*op0, 0, 0)) == CCRAWmode
1669 && INTVAL (*op1) >= 0 && INTVAL (*op1) <= 3)
1670 *op1 = gen_rtx_CONST_INT (VOIDmode, 1 << (3 - INTVAL (*op1)));
1671 *op0 = XVECEXP (*op0, 0, 0);
1672 *code = new_code;
1676 /* Simplify cascaded EQ, NE with const0_rtx. */
1677 if ((*code == NE || *code == EQ)
1678 && (GET_CODE (*op0) == EQ || GET_CODE (*op0) == NE)
1679 && GET_MODE (*op0) == SImode
1680 && GET_MODE (XEXP (*op0, 0)) == CCZ1mode
1681 && REG_P (XEXP (*op0, 0))
1682 && XEXP (*op0, 1) == const0_rtx
1683 && *op1 == const0_rtx)
1685 if ((*code == EQ && GET_CODE (*op0) == NE)
1686 || (*code == NE && GET_CODE (*op0) == EQ))
1687 *code = EQ;
1688 else
1689 *code = NE;
1690 *op0 = XEXP (*op0, 0);
1693 /* Prefer register over memory as first operand. */
1694 if (MEM_P (*op0) && REG_P (*op1))
1696 rtx tem = *op0; *op0 = *op1; *op1 = tem;
1697 *code = (int)swap_condition ((enum rtx_code)*code);
1700 /* Using the scalar variants of vector instructions for 64 bit FP
1701 comparisons might require swapping the operands. */
1702 if (TARGET_VX
1703 && register_operand (*op0, DFmode)
1704 && register_operand (*op1, DFmode)
1705 && (*code == LT || *code == LE || *code == UNGT || *code == UNGE))
1707 rtx tmp;
1709 switch (*code)
1711 case LT: *code = GT; break;
1712 case LE: *code = GE; break;
1713 case UNGT: *code = UNLE; break;
1714 case UNGE: *code = UNLT; break;
1715 default: ;
1717 tmp = *op0; *op0 = *op1; *op1 = tmp;
1721 /* Helper function for s390_emit_compare. If possible emit a 64 bit
1722 FP compare using the single element variant of vector instructions.
1723 Replace CODE with the comparison code to be used in the CC reg
1724 compare and return the condition code register RTX in CC. */
1726 static bool
1727 s390_expand_vec_compare_scalar (enum rtx_code *code, rtx cmp1, rtx cmp2,
1728 rtx *cc)
1730 machine_mode cmp_mode;
1731 bool swap_p = false;
1733 switch (*code)
1735 case EQ: cmp_mode = CCVEQmode; break;
1736 case NE: cmp_mode = CCVEQmode; break;
1737 case GT: cmp_mode = CCVFHmode; break;
1738 case GE: cmp_mode = CCVFHEmode; break;
1739 case UNLE: cmp_mode = CCVFHmode; break;
1740 case UNLT: cmp_mode = CCVFHEmode; break;
1741 case LT: cmp_mode = CCVFHmode; *code = GT; swap_p = true; break;
1742 case LE: cmp_mode = CCVFHEmode; *code = GE; swap_p = true; break;
1743 case UNGE: cmp_mode = CCVFHmode; *code = UNLE; swap_p = true; break;
1744 case UNGT: cmp_mode = CCVFHEmode; *code = UNLT; swap_p = true; break;
1745 default: return false;
1748 if (swap_p)
1750 rtx tmp = cmp2;
1751 cmp2 = cmp1;
1752 cmp1 = tmp;
1754 *cc = gen_rtx_REG (cmp_mode, CC_REGNUM);
1755 emit_insn (gen_rtx_PARALLEL (VOIDmode,
1756 gen_rtvec (2,
1757 gen_rtx_SET (*cc,
1758 gen_rtx_COMPARE (cmp_mode, cmp1,
1759 cmp2)),
1760 gen_rtx_CLOBBER (VOIDmode,
1761 gen_rtx_SCRATCH (V2DImode)))));
1762 return true;
1766 /* Emit a compare instruction suitable to implement the comparison
1767 OP0 CODE OP1. Return the correct condition RTL to be placed in
1768 the IF_THEN_ELSE of the conditional branch testing the result. */
1771 s390_emit_compare (enum rtx_code code, rtx op0, rtx op1)
1773 machine_mode mode = s390_select_ccmode (code, op0, op1);
1774 rtx cc;
1776 if (TARGET_VX
1777 && register_operand (op0, DFmode)
1778 && register_operand (op1, DFmode)
1779 && s390_expand_vec_compare_scalar (&code, op0, op1, &cc))
1781 /* Work has been done by s390_expand_vec_compare_scalar already. */
1783 else if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
1785 /* Do not output a redundant compare instruction if a
1786 compare_and_swap pattern already computed the result and the
1787 machine modes are compatible. */
1788 gcc_assert (s390_cc_modes_compatible (GET_MODE (op0), mode)
1789 == GET_MODE (op0));
1790 cc = op0;
1792 else
1794 cc = gen_rtx_REG (mode, CC_REGNUM);
1795 emit_insn (gen_rtx_SET (cc, gen_rtx_COMPARE (mode, op0, op1)));
1798 return gen_rtx_fmt_ee (code, VOIDmode, cc, const0_rtx);
1801 /* Emit a SImode compare and swap instruction setting MEM to NEW_RTX if OLD
1802 matches CMP.
1803 Return the correct condition RTL to be placed in the IF_THEN_ELSE of the
1804 conditional branch testing the result. */
1806 static rtx
1807 s390_emit_compare_and_swap (enum rtx_code code, rtx old, rtx mem,
1808 rtx cmp, rtx new_rtx)
1810 emit_insn (gen_atomic_compare_and_swapsi_internal (old, mem, cmp, new_rtx));
1811 return s390_emit_compare (code, gen_rtx_REG (CCZ1mode, CC_REGNUM),
1812 const0_rtx);
1815 /* Emit a jump instruction to TARGET and return it. If COND is
1816 NULL_RTX, emit an unconditional jump, else a conditional jump under
1817 condition COND. */
1819 rtx_insn *
1820 s390_emit_jump (rtx target, rtx cond)
1822 rtx insn;
1824 target = gen_rtx_LABEL_REF (VOIDmode, target);
1825 if (cond)
1826 target = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, target, pc_rtx);
1828 insn = gen_rtx_SET (pc_rtx, target);
1829 return emit_jump_insn (insn);
1832 /* Return branch condition mask to implement a branch
1833 specified by CODE. Return -1 for invalid comparisons. */
1836 s390_branch_condition_mask (rtx code)
1838 const int CC0 = 1 << 3;
1839 const int CC1 = 1 << 2;
1840 const int CC2 = 1 << 1;
1841 const int CC3 = 1 << 0;
1843 gcc_assert (GET_CODE (XEXP (code, 0)) == REG);
1844 gcc_assert (REGNO (XEXP (code, 0)) == CC_REGNUM);
1845 gcc_assert (XEXP (code, 1) == const0_rtx
1846 || (GET_MODE (XEXP (code, 0)) == CCRAWmode
1847 && CONST_INT_P (XEXP (code, 1))));
1850 switch (GET_MODE (XEXP (code, 0)))
1852 case CCZmode:
1853 case CCZ1mode:
1854 switch (GET_CODE (code))
1856 case EQ: return CC0;
1857 case NE: return CC1 | CC2 | CC3;
1858 default: return -1;
1860 break;
1862 case CCT1mode:
1863 switch (GET_CODE (code))
1865 case EQ: return CC1;
1866 case NE: return CC0 | CC2 | CC3;
1867 default: return -1;
1869 break;
1871 case CCT2mode:
1872 switch (GET_CODE (code))
1874 case EQ: return CC2;
1875 case NE: return CC0 | CC1 | CC3;
1876 default: return -1;
1878 break;
1880 case CCT3mode:
1881 switch (GET_CODE (code))
1883 case EQ: return CC3;
1884 case NE: return CC0 | CC1 | CC2;
1885 default: return -1;
1887 break;
1889 case CCLmode:
1890 switch (GET_CODE (code))
1892 case EQ: return CC0 | CC2;
1893 case NE: return CC1 | CC3;
1894 default: return -1;
1896 break;
1898 case CCL1mode:
1899 switch (GET_CODE (code))
1901 case LTU: return CC2 | CC3; /* carry */
1902 case GEU: return CC0 | CC1; /* no carry */
1903 default: return -1;
1905 break;
1907 case CCL2mode:
1908 switch (GET_CODE (code))
1910 case GTU: return CC0 | CC1; /* borrow */
1911 case LEU: return CC2 | CC3; /* no borrow */
1912 default: return -1;
1914 break;
1916 case CCL3mode:
1917 switch (GET_CODE (code))
1919 case EQ: return CC0 | CC2;
1920 case NE: return CC1 | CC3;
1921 case LTU: return CC1;
1922 case GTU: return CC3;
1923 case LEU: return CC1 | CC2;
1924 case GEU: return CC2 | CC3;
1925 default: return -1;
1928 case CCUmode:
1929 switch (GET_CODE (code))
1931 case EQ: return CC0;
1932 case NE: return CC1 | CC2 | CC3;
1933 case LTU: return CC1;
1934 case GTU: return CC2;
1935 case LEU: return CC0 | CC1;
1936 case GEU: return CC0 | CC2;
1937 default: return -1;
1939 break;
1941 case CCURmode:
1942 switch (GET_CODE (code))
1944 case EQ: return CC0;
1945 case NE: return CC2 | CC1 | CC3;
1946 case LTU: return CC2;
1947 case GTU: return CC1;
1948 case LEU: return CC0 | CC2;
1949 case GEU: return CC0 | CC1;
1950 default: return -1;
1952 break;
1954 case CCAPmode:
1955 switch (GET_CODE (code))
1957 case EQ: return CC0;
1958 case NE: return CC1 | CC2 | CC3;
1959 case LT: return CC1 | CC3;
1960 case GT: return CC2;
1961 case LE: return CC0 | CC1 | CC3;
1962 case GE: return CC0 | CC2;
1963 default: return -1;
1965 break;
1967 case CCANmode:
1968 switch (GET_CODE (code))
1970 case EQ: return CC0;
1971 case NE: return CC1 | CC2 | CC3;
1972 case LT: return CC1;
1973 case GT: return CC2 | CC3;
1974 case LE: return CC0 | CC1;
1975 case GE: return CC0 | CC2 | CC3;
1976 default: return -1;
1978 break;
1980 case CCSmode:
1981 switch (GET_CODE (code))
1983 case EQ: return CC0;
1984 case NE: return CC1 | CC2 | CC3;
1985 case LT: return CC1;
1986 case GT: return CC2;
1987 case LE: return CC0 | CC1;
1988 case GE: return CC0 | CC2;
1989 case UNORDERED: return CC3;
1990 case ORDERED: return CC0 | CC1 | CC2;
1991 case UNEQ: return CC0 | CC3;
1992 case UNLT: return CC1 | CC3;
1993 case UNGT: return CC2 | CC3;
1994 case UNLE: return CC0 | CC1 | CC3;
1995 case UNGE: return CC0 | CC2 | CC3;
1996 case LTGT: return CC1 | CC2;
1997 default: return -1;
1999 break;
2001 case CCSRmode:
2002 switch (GET_CODE (code))
2004 case EQ: return CC0;
2005 case NE: return CC2 | CC1 | CC3;
2006 case LT: return CC2;
2007 case GT: return CC1;
2008 case LE: return CC0 | CC2;
2009 case GE: return CC0 | CC1;
2010 case UNORDERED: return CC3;
2011 case ORDERED: return CC0 | CC2 | CC1;
2012 case UNEQ: return CC0 | CC3;
2013 case UNLT: return CC2 | CC3;
2014 case UNGT: return CC1 | CC3;
2015 case UNLE: return CC0 | CC2 | CC3;
2016 case UNGE: return CC0 | CC1 | CC3;
2017 case LTGT: return CC2 | CC1;
2018 default: return -1;
2020 break;
2022 /* Vector comparison modes. */
2024 case CCVEQmode:
2025 switch (GET_CODE (code))
2027 case EQ: return CC0;
2028 case NE: return CC3;
2029 default: return -1;
2032 case CCVEQANYmode:
2033 switch (GET_CODE (code))
2035 case EQ: return CC0 | CC1;
2036 case NE: return CC3 | CC1;
2037 default: return -1;
2040 /* Integer vector compare modes. */
2042 case CCVHmode:
2043 switch (GET_CODE (code))
2045 case GT: return CC0;
2046 case LE: return CC3;
2047 default: return -1;
2050 case CCVHANYmode:
2051 switch (GET_CODE (code))
2053 case GT: return CC0 | CC1;
2054 case LE: return CC3 | CC1;
2055 default: return -1;
2058 case CCVHUmode:
2059 switch (GET_CODE (code))
2061 case GTU: return CC0;
2062 case LEU: return CC3;
2063 default: return -1;
2066 case CCVHUANYmode:
2067 switch (GET_CODE (code))
2069 case GTU: return CC0 | CC1;
2070 case LEU: return CC3 | CC1;
2071 default: return -1;
2074 /* FP vector compare modes. */
2076 case CCVFHmode:
2077 switch (GET_CODE (code))
2079 case GT: return CC0;
2080 case UNLE: return CC3;
2081 default: return -1;
2084 case CCVFHANYmode:
2085 switch (GET_CODE (code))
2087 case GT: return CC0 | CC1;
2088 case UNLE: return CC3 | CC1;
2089 default: return -1;
2092 case CCVFHEmode:
2093 switch (GET_CODE (code))
2095 case GE: return CC0;
2096 case UNLT: return CC3;
2097 default: return -1;
2100 case CCVFHEANYmode:
2101 switch (GET_CODE (code))
2103 case GE: return CC0 | CC1;
2104 case UNLT: return CC3 | CC1;
2105 default: return -1;
2109 case CCRAWmode:
2110 switch (GET_CODE (code))
2112 case EQ:
2113 return INTVAL (XEXP (code, 1));
2114 case NE:
2115 return (INTVAL (XEXP (code, 1))) ^ 0xf;
2116 default:
2117 gcc_unreachable ();
2120 default:
2121 return -1;
2126 /* Return branch condition mask to implement a compare and branch
2127 specified by CODE. Return -1 for invalid comparisons. */
2130 s390_compare_and_branch_condition_mask (rtx code)
2132 const int CC0 = 1 << 3;
2133 const int CC1 = 1 << 2;
2134 const int CC2 = 1 << 1;
2136 switch (GET_CODE (code))
2138 case EQ:
2139 return CC0;
2140 case NE:
2141 return CC1 | CC2;
2142 case LT:
2143 case LTU:
2144 return CC1;
2145 case GT:
2146 case GTU:
2147 return CC2;
2148 case LE:
2149 case LEU:
2150 return CC0 | CC1;
2151 case GE:
2152 case GEU:
2153 return CC0 | CC2;
2154 default:
2155 gcc_unreachable ();
2157 return -1;
2160 /* If INV is false, return assembler mnemonic string to implement
2161 a branch specified by CODE. If INV is true, return mnemonic
2162 for the corresponding inverted branch. */
2164 static const char *
2165 s390_branch_condition_mnemonic (rtx code, int inv)
2167 int mask;
2169 static const char *const mnemonic[16] =
2171 NULL, "o", "h", "nle",
2172 "l", "nhe", "lh", "ne",
2173 "e", "nlh", "he", "nl",
2174 "le", "nh", "no", NULL
2177 if (GET_CODE (XEXP (code, 0)) == REG
2178 && REGNO (XEXP (code, 0)) == CC_REGNUM
2179 && (XEXP (code, 1) == const0_rtx
2180 || (GET_MODE (XEXP (code, 0)) == CCRAWmode
2181 && CONST_INT_P (XEXP (code, 1)))))
2182 mask = s390_branch_condition_mask (code);
2183 else
2184 mask = s390_compare_and_branch_condition_mask (code);
2186 gcc_assert (mask >= 0);
2188 if (inv)
2189 mask ^= 15;
2191 gcc_assert (mask >= 1 && mask <= 14);
2193 return mnemonic[mask];
2196 /* Return the part of op which has a value different from def.
2197 The size of the part is determined by mode.
2198 Use this function only if you already know that op really
2199 contains such a part. */
2201 unsigned HOST_WIDE_INT
2202 s390_extract_part (rtx op, machine_mode mode, int def)
2204 unsigned HOST_WIDE_INT value = 0;
2205 int max_parts = HOST_BITS_PER_WIDE_INT / GET_MODE_BITSIZE (mode);
2206 int part_bits = GET_MODE_BITSIZE (mode);
2207 unsigned HOST_WIDE_INT part_mask
2208 = ((unsigned HOST_WIDE_INT)1 << part_bits) - 1;
2209 int i;
2211 for (i = 0; i < max_parts; i++)
2213 if (i == 0)
2214 value = (unsigned HOST_WIDE_INT) INTVAL (op);
2215 else
2216 value >>= part_bits;
2218 if ((value & part_mask) != (def & part_mask))
2219 return value & part_mask;
2222 gcc_unreachable ();
2225 /* If OP is an integer constant of mode MODE with exactly one
2226 part of mode PART_MODE unequal to DEF, return the number of that
2227 part. Otherwise, return -1. */
2230 s390_single_part (rtx op,
2231 machine_mode mode,
2232 machine_mode part_mode,
2233 int def)
2235 unsigned HOST_WIDE_INT value = 0;
2236 int n_parts = GET_MODE_SIZE (mode) / GET_MODE_SIZE (part_mode);
2237 unsigned HOST_WIDE_INT part_mask
2238 = ((unsigned HOST_WIDE_INT)1 << GET_MODE_BITSIZE (part_mode)) - 1;
2239 int i, part = -1;
2241 if (GET_CODE (op) != CONST_INT)
2242 return -1;
2244 for (i = 0; i < n_parts; i++)
2246 if (i == 0)
2247 value = (unsigned HOST_WIDE_INT) INTVAL (op);
2248 else
2249 value >>= GET_MODE_BITSIZE (part_mode);
2251 if ((value & part_mask) != (def & part_mask))
2253 if (part != -1)
2254 return -1;
2255 else
2256 part = i;
2259 return part == -1 ? -1 : n_parts - 1 - part;
2262 /* Return true if IN contains a contiguous bitfield in the lower SIZE
2263 bits and no other bits are set in (the lower SIZE bits of) IN.
2265 PSTART and PEND can be used to obtain the start and end
2266 position (inclusive) of the bitfield relative to 64
2267 bits. *PSTART / *PEND gives the position of the first/last bit
2268 of the bitfield counting from the highest order bit starting
2269 with zero. */
2271 bool
2272 s390_contiguous_bitmask_nowrap_p (unsigned HOST_WIDE_INT in, int size,
2273 int *pstart, int *pend)
2275 int start;
2276 int end = -1;
2277 int lowbit = sizeof (HOST_WIDE_INT) * BITS_PER_UNIT - 1;
2278 int highbit = sizeof (HOST_WIDE_INT) * BITS_PER_UNIT - size;
2279 unsigned HOST_WIDE_INT bitmask = 1ULL;
2281 gcc_assert (!!pstart == !!pend);
2282 for (start = lowbit; start >= highbit; bitmask <<= 1, start--)
2283 if (end == -1)
2285 /* Look for the rightmost bit of a contiguous range of ones. */
2286 if (bitmask & in)
2287 /* Found it. */
2288 end = start;
2290 else
2292 /* Look for the firt zero bit after the range of ones. */
2293 if (! (bitmask & in))
2294 /* Found it. */
2295 break;
2297 /* We're one past the last one-bit. */
2298 start++;
2300 if (end == -1)
2301 /* No one bits found. */
2302 return false;
2304 if (start > highbit)
2306 unsigned HOST_WIDE_INT mask;
2308 /* Calculate a mask for all bits beyond the contiguous bits. */
2309 mask = ((~(0ULL) >> highbit) & (~(0ULL) << (lowbit - start + 1)));
2310 if (mask & in)
2311 /* There are more bits set beyond the first range of one bits. */
2312 return false;
2315 if (pstart)
2317 *pstart = start;
2318 *pend = end;
2321 return true;
2324 /* Same as s390_contiguous_bitmask_nowrap_p but also returns true
2325 if ~IN contains a contiguous bitfield. In that case, *END is <
2326 *START.
2328 If WRAP_P is true, a bitmask that wraps around is also tested.
2329 When a wraparoud occurs *START is greater than *END (in
2330 non-null pointers), and the uppermost (64 - SIZE) bits are thus
2331 part of the range. If WRAP_P is false, no wraparound is
2332 tested. */
2334 bool
2335 s390_contiguous_bitmask_p (unsigned HOST_WIDE_INT in, bool wrap_p,
2336 int size, int *start, int *end)
2338 int bs = sizeof (HOST_WIDE_INT) * BITS_PER_UNIT;
2339 bool b;
2341 gcc_assert (!!start == !!end);
2342 if ((in & ((~(0ULL)) >> (bs - size))) == 0)
2343 /* This cannot be expressed as a contiguous bitmask. Exit early because
2344 the second call of s390_contiguous_bitmask_nowrap_p would accept this as
2345 a valid bitmask. */
2346 return false;
2347 b = s390_contiguous_bitmask_nowrap_p (in, size, start, end);
2348 if (b)
2349 return true;
2350 if (! wrap_p)
2351 return false;
2352 b = s390_contiguous_bitmask_nowrap_p (~in, size, start, end);
2353 if (b && start)
2355 int s = *start;
2356 int e = *end;
2358 gcc_assert (s >= 1);
2359 *start = ((e + 1) & (bs - 1));
2360 *end = ((s - 1 + bs) & (bs - 1));
2363 return b;
2366 /* Return true if OP contains the same contiguous bitfield in *all*
2367 its elements. START and END can be used to obtain the start and
2368 end position of the bitfield.
2370 START/STOP give the position of the first/last bit of the bitfield
2371 counting from the lowest order bit starting with zero. In order to
2372 use these values for S/390 instructions this has to be converted to
2373 "bits big endian" style. */
2375 bool
2376 s390_contiguous_bitmask_vector_p (rtx op, int *start, int *end)
2378 unsigned HOST_WIDE_INT mask;
2379 int size;
2380 rtx elt;
2381 bool b;
2383 gcc_assert (!!start == !!end);
2384 if (!const_vec_duplicate_p (op, &elt)
2385 || !CONST_INT_P (elt))
2386 return false;
2388 size = GET_MODE_UNIT_BITSIZE (GET_MODE (op));
2390 /* We cannot deal with V1TI/V1TF. This would require a vgmq. */
2391 if (size > 64)
2392 return false;
2394 mask = UINTVAL (elt);
2396 b = s390_contiguous_bitmask_p (mask, true, size, start, end);
2397 if (b)
2399 if (start)
2401 int bs = sizeof (HOST_WIDE_INT) * BITS_PER_UNIT;
2403 *start -= (bs - size);
2404 *end -= (bs - size);
2406 return true;
2408 else
2409 return false;
2412 /* Return true if C consists only of byte chunks being either 0 or
2413 0xff. If MASK is !=NULL a byte mask is generated which is
2414 appropriate for the vector generate byte mask instruction. */
2416 bool
2417 s390_bytemask_vector_p (rtx op, unsigned *mask)
2419 int i;
2420 unsigned tmp_mask = 0;
2421 int nunit, unit_size;
2423 if (!VECTOR_MODE_P (GET_MODE (op))
2424 || GET_CODE (op) != CONST_VECTOR
2425 || !CONST_INT_P (XVECEXP (op, 0, 0)))
2426 return false;
2428 nunit = GET_MODE_NUNITS (GET_MODE (op));
2429 unit_size = GET_MODE_UNIT_SIZE (GET_MODE (op));
2431 for (i = 0; i < nunit; i++)
2433 unsigned HOST_WIDE_INT c;
2434 int j;
2436 if (!CONST_INT_P (XVECEXP (op, 0, i)))
2437 return false;
2439 c = UINTVAL (XVECEXP (op, 0, i));
2440 for (j = 0; j < unit_size; j++)
2442 if ((c & 0xff) != 0 && (c & 0xff) != 0xff)
2443 return false;
2444 tmp_mask |= (c & 1) << ((nunit - 1 - i) * unit_size + j);
2445 c = c >> BITS_PER_UNIT;
2449 if (mask != NULL)
2450 *mask = tmp_mask;
2452 return true;
2455 /* Check whether a rotate of ROTL followed by an AND of CONTIG is
2456 equivalent to a shift followed by the AND. In particular, CONTIG
2457 should not overlap the (rotated) bit 0/bit 63 gap. Negative values
2458 for ROTL indicate a rotate to the right. */
2460 bool
2461 s390_extzv_shift_ok (int bitsize, int rotl, unsigned HOST_WIDE_INT contig)
2463 int start, end;
2464 bool ok;
2466 ok = s390_contiguous_bitmask_nowrap_p (contig, bitsize, &start, &end);
2467 gcc_assert (ok);
2469 if (rotl >= 0)
2470 return (64 - end >= rotl);
2471 else
2473 /* Translate "- rotate right" in BITSIZE mode to "rotate left" in
2474 DIMode. */
2475 rotl = -rotl + (64 - bitsize);
2476 return (start >= rotl);
2480 /* Check whether we can (and want to) split a double-word
2481 move in mode MODE from SRC to DST into two single-word
2482 moves, moving the subword FIRST_SUBWORD first. */
2484 bool
2485 s390_split_ok_p (rtx dst, rtx src, machine_mode mode, int first_subword)
2487 /* Floating point and vector registers cannot be split. */
2488 if (FP_REG_P (src) || FP_REG_P (dst) || VECTOR_REG_P (src) || VECTOR_REG_P (dst))
2489 return false;
2491 /* We don't need to split if operands are directly accessible. */
2492 if (s_operand (src, mode) || s_operand (dst, mode))
2493 return false;
2495 /* Non-offsettable memory references cannot be split. */
2496 if ((GET_CODE (src) == MEM && !offsettable_memref_p (src))
2497 || (GET_CODE (dst) == MEM && !offsettable_memref_p (dst)))
2498 return false;
2500 /* Moving the first subword must not clobber a register
2501 needed to move the second subword. */
2502 if (register_operand (dst, mode))
2504 rtx subreg = operand_subword (dst, first_subword, 0, mode);
2505 if (reg_overlap_mentioned_p (subreg, src))
2506 return false;
2509 return true;
2512 /* Return true if it can be proven that [MEM1, MEM1 + SIZE]
2513 and [MEM2, MEM2 + SIZE] do overlap and false
2514 otherwise. */
2516 bool
2517 s390_overlap_p (rtx mem1, rtx mem2, HOST_WIDE_INT size)
2519 rtx addr1, addr2, addr_delta;
2520 HOST_WIDE_INT delta;
2522 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
2523 return true;
2525 if (size == 0)
2526 return false;
2528 addr1 = XEXP (mem1, 0);
2529 addr2 = XEXP (mem2, 0);
2531 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
2533 /* This overlapping check is used by peepholes merging memory block operations.
2534 Overlapping operations would otherwise be recognized by the S/390 hardware
2535 and would fall back to a slower implementation. Allowing overlapping
2536 operations would lead to slow code but not to wrong code. Therefore we are
2537 somewhat optimistic if we cannot prove that the memory blocks are
2538 overlapping.
2539 That's why we return false here although this may accept operations on
2540 overlapping memory areas. */
2541 if (!addr_delta || GET_CODE (addr_delta) != CONST_INT)
2542 return false;
2544 delta = INTVAL (addr_delta);
2546 if (delta == 0
2547 || (delta > 0 && delta < size)
2548 || (delta < 0 && -delta < size))
2549 return true;
2551 return false;
2554 /* Check whether the address of memory reference MEM2 equals exactly
2555 the address of memory reference MEM1 plus DELTA. Return true if
2556 we can prove this to be the case, false otherwise. */
2558 bool
2559 s390_offset_p (rtx mem1, rtx mem2, rtx delta)
2561 rtx addr1, addr2, addr_delta;
2563 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
2564 return false;
2566 addr1 = XEXP (mem1, 0);
2567 addr2 = XEXP (mem2, 0);
2569 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
2570 if (!addr_delta || !rtx_equal_p (addr_delta, delta))
2571 return false;
2573 return true;
2576 /* Expand logical operator CODE in mode MODE with operands OPERANDS. */
2578 void
2579 s390_expand_logical_operator (enum rtx_code code, machine_mode mode,
2580 rtx *operands)
2582 machine_mode wmode = mode;
2583 rtx dst = operands[0];
2584 rtx src1 = operands[1];
2585 rtx src2 = operands[2];
2586 rtx op, clob, tem;
2588 /* If we cannot handle the operation directly, use a temp register. */
2589 if (!s390_logical_operator_ok_p (operands))
2590 dst = gen_reg_rtx (mode);
2592 /* QImode and HImode patterns make sense only if we have a destination
2593 in memory. Otherwise perform the operation in SImode. */
2594 if ((mode == QImode || mode == HImode) && GET_CODE (dst) != MEM)
2595 wmode = SImode;
2597 /* Widen operands if required. */
2598 if (mode != wmode)
2600 if (GET_CODE (dst) == SUBREG
2601 && (tem = simplify_subreg (wmode, dst, mode, 0)) != 0)
2602 dst = tem;
2603 else if (REG_P (dst))
2604 dst = gen_rtx_SUBREG (wmode, dst, 0);
2605 else
2606 dst = gen_reg_rtx (wmode);
2608 if (GET_CODE (src1) == SUBREG
2609 && (tem = simplify_subreg (wmode, src1, mode, 0)) != 0)
2610 src1 = tem;
2611 else if (GET_MODE (src1) != VOIDmode)
2612 src1 = gen_rtx_SUBREG (wmode, force_reg (mode, src1), 0);
2614 if (GET_CODE (src2) == SUBREG
2615 && (tem = simplify_subreg (wmode, src2, mode, 0)) != 0)
2616 src2 = tem;
2617 else if (GET_MODE (src2) != VOIDmode)
2618 src2 = gen_rtx_SUBREG (wmode, force_reg (mode, src2), 0);
2621 /* Emit the instruction. */
2622 op = gen_rtx_SET (dst, gen_rtx_fmt_ee (code, wmode, src1, src2));
2623 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
2624 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
2626 /* Fix up the destination if needed. */
2627 if (dst != operands[0])
2628 emit_move_insn (operands[0], gen_lowpart (mode, dst));
2631 /* Check whether OPERANDS are OK for a logical operation (AND, IOR, XOR). */
2633 bool
2634 s390_logical_operator_ok_p (rtx *operands)
2636 /* If the destination operand is in memory, it needs to coincide
2637 with one of the source operands. After reload, it has to be
2638 the first source operand. */
2639 if (GET_CODE (operands[0]) == MEM)
2640 return rtx_equal_p (operands[0], operands[1])
2641 || (!reload_completed && rtx_equal_p (operands[0], operands[2]));
2643 return true;
2646 /* Narrow logical operation CODE of memory operand MEMOP with immediate
2647 operand IMMOP to switch from SS to SI type instructions. */
2649 void
2650 s390_narrow_logical_operator (enum rtx_code code, rtx *memop, rtx *immop)
2652 int def = code == AND ? -1 : 0;
2653 HOST_WIDE_INT mask;
2654 int part;
2656 gcc_assert (GET_CODE (*memop) == MEM);
2657 gcc_assert (!MEM_VOLATILE_P (*memop));
2659 mask = s390_extract_part (*immop, QImode, def);
2660 part = s390_single_part (*immop, GET_MODE (*memop), QImode, def);
2661 gcc_assert (part >= 0);
2663 *memop = adjust_address (*memop, QImode, part);
2664 *immop = gen_int_mode (mask, QImode);
2668 /* How to allocate a 'struct machine_function'. */
2670 static struct machine_function *
2671 s390_init_machine_status (void)
2673 return ggc_cleared_alloc<machine_function> ();
2676 /* Map for smallest class containing reg regno. */
2678 const enum reg_class regclass_map[FIRST_PSEUDO_REGISTER] =
2679 { GENERAL_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 0 */
2680 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 4 */
2681 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 8 */
2682 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 12 */
2683 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 16 */
2684 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 20 */
2685 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 24 */
2686 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 28 */
2687 ADDR_REGS, CC_REGS, ADDR_REGS, ADDR_REGS, /* 32 */
2688 ACCESS_REGS, ACCESS_REGS, VEC_REGS, VEC_REGS, /* 36 */
2689 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 40 */
2690 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 44 */
2691 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 48 */
2692 VEC_REGS, VEC_REGS /* 52 */
2695 /* Return attribute type of insn. */
2697 static enum attr_type
2698 s390_safe_attr_type (rtx_insn *insn)
2700 if (recog_memoized (insn) >= 0)
2701 return get_attr_type (insn);
2702 else
2703 return TYPE_NONE;
2706 /* Return true if DISP is a valid short displacement. */
2708 static bool
2709 s390_short_displacement (rtx disp)
2711 /* No displacement is OK. */
2712 if (!disp)
2713 return true;
2715 /* Without the long displacement facility we don't need to
2716 distingiush between long and short displacement. */
2717 if (!TARGET_LONG_DISPLACEMENT)
2718 return true;
2720 /* Integer displacement in range. */
2721 if (GET_CODE (disp) == CONST_INT)
2722 return INTVAL (disp) >= 0 && INTVAL (disp) < 4096;
2724 /* GOT offset is not OK, the GOT can be large. */
2725 if (GET_CODE (disp) == CONST
2726 && GET_CODE (XEXP (disp, 0)) == UNSPEC
2727 && (XINT (XEXP (disp, 0), 1) == UNSPEC_GOT
2728 || XINT (XEXP (disp, 0), 1) == UNSPEC_GOTNTPOFF))
2729 return false;
2731 /* All other symbolic constants are literal pool references,
2732 which are OK as the literal pool must be small. */
2733 if (GET_CODE (disp) == CONST)
2734 return true;
2736 return false;
2739 /* Decompose a RTL expression ADDR for a memory address into
2740 its components, returned in OUT.
2742 Returns false if ADDR is not a valid memory address, true
2743 otherwise. If OUT is NULL, don't return the components,
2744 but check for validity only.
2746 Note: Only addresses in canonical form are recognized.
2747 LEGITIMIZE_ADDRESS should convert non-canonical forms to the
2748 canonical form so that they will be recognized. */
2750 static int
2751 s390_decompose_address (rtx addr, struct s390_address *out)
2753 HOST_WIDE_INT offset = 0;
2754 rtx base = NULL_RTX;
2755 rtx indx = NULL_RTX;
2756 rtx disp = NULL_RTX;
2757 rtx orig_disp;
2758 bool pointer = false;
2759 bool base_ptr = false;
2760 bool indx_ptr = false;
2761 bool literal_pool = false;
2763 /* We may need to substitute the literal pool base register into the address
2764 below. However, at this point we do not know which register is going to
2765 be used as base, so we substitute the arg pointer register. This is going
2766 to be treated as holding a pointer below -- it shouldn't be used for any
2767 other purpose. */
2768 rtx fake_pool_base = gen_rtx_REG (Pmode, ARG_POINTER_REGNUM);
2770 /* Decompose address into base + index + displacement. */
2772 if (GET_CODE (addr) == REG || GET_CODE (addr) == UNSPEC)
2773 base = addr;
2775 else if (GET_CODE (addr) == PLUS)
2777 rtx op0 = XEXP (addr, 0);
2778 rtx op1 = XEXP (addr, 1);
2779 enum rtx_code code0 = GET_CODE (op0);
2780 enum rtx_code code1 = GET_CODE (op1);
2782 if (code0 == REG || code0 == UNSPEC)
2784 if (code1 == REG || code1 == UNSPEC)
2786 indx = op0; /* index + base */
2787 base = op1;
2790 else
2792 base = op0; /* base + displacement */
2793 disp = op1;
2797 else if (code0 == PLUS)
2799 indx = XEXP (op0, 0); /* index + base + disp */
2800 base = XEXP (op0, 1);
2801 disp = op1;
2804 else
2806 return false;
2810 else
2811 disp = addr; /* displacement */
2813 /* Extract integer part of displacement. */
2814 orig_disp = disp;
2815 if (disp)
2817 if (GET_CODE (disp) == CONST_INT)
2819 offset = INTVAL (disp);
2820 disp = NULL_RTX;
2822 else if (GET_CODE (disp) == CONST
2823 && GET_CODE (XEXP (disp, 0)) == PLUS
2824 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
2826 offset = INTVAL (XEXP (XEXP (disp, 0), 1));
2827 disp = XEXP (XEXP (disp, 0), 0);
2831 /* Strip off CONST here to avoid special case tests later. */
2832 if (disp && GET_CODE (disp) == CONST)
2833 disp = XEXP (disp, 0);
2835 /* We can convert literal pool addresses to
2836 displacements by basing them off the base register. */
2837 if (disp && GET_CODE (disp) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (disp))
2839 /* Either base or index must be free to hold the base register. */
2840 if (!base)
2841 base = fake_pool_base, literal_pool = true;
2842 else if (!indx)
2843 indx = fake_pool_base, literal_pool = true;
2844 else
2845 return false;
2847 /* Mark up the displacement. */
2848 disp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, disp),
2849 UNSPEC_LTREL_OFFSET);
2852 /* Validate base register. */
2853 if (base)
2855 if (GET_CODE (base) == UNSPEC)
2856 switch (XINT (base, 1))
2858 case UNSPEC_LTREF:
2859 if (!disp)
2860 disp = gen_rtx_UNSPEC (Pmode,
2861 gen_rtvec (1, XVECEXP (base, 0, 0)),
2862 UNSPEC_LTREL_OFFSET);
2863 else
2864 return false;
2866 base = XVECEXP (base, 0, 1);
2867 break;
2869 case UNSPEC_LTREL_BASE:
2870 if (XVECLEN (base, 0) == 1)
2871 base = fake_pool_base, literal_pool = true;
2872 else
2873 base = XVECEXP (base, 0, 1);
2874 break;
2876 default:
2877 return false;
2880 if (!REG_P (base) || GET_MODE (base) != Pmode)
2881 return false;
2883 if (REGNO (base) == STACK_POINTER_REGNUM
2884 || REGNO (base) == FRAME_POINTER_REGNUM
2885 || ((reload_completed || reload_in_progress)
2886 && frame_pointer_needed
2887 && REGNO (base) == HARD_FRAME_POINTER_REGNUM)
2888 || REGNO (base) == ARG_POINTER_REGNUM
2889 || (flag_pic
2890 && REGNO (base) == PIC_OFFSET_TABLE_REGNUM))
2891 pointer = base_ptr = true;
2893 if ((reload_completed || reload_in_progress)
2894 && base == cfun->machine->base_reg)
2895 pointer = base_ptr = literal_pool = true;
2898 /* Validate index register. */
2899 if (indx)
2901 if (GET_CODE (indx) == UNSPEC)
2902 switch (XINT (indx, 1))
2904 case UNSPEC_LTREF:
2905 if (!disp)
2906 disp = gen_rtx_UNSPEC (Pmode,
2907 gen_rtvec (1, XVECEXP (indx, 0, 0)),
2908 UNSPEC_LTREL_OFFSET);
2909 else
2910 return false;
2912 indx = XVECEXP (indx, 0, 1);
2913 break;
2915 case UNSPEC_LTREL_BASE:
2916 if (XVECLEN (indx, 0) == 1)
2917 indx = fake_pool_base, literal_pool = true;
2918 else
2919 indx = XVECEXP (indx, 0, 1);
2920 break;
2922 default:
2923 return false;
2926 if (!REG_P (indx) || GET_MODE (indx) != Pmode)
2927 return false;
2929 if (REGNO (indx) == STACK_POINTER_REGNUM
2930 || REGNO (indx) == FRAME_POINTER_REGNUM
2931 || ((reload_completed || reload_in_progress)
2932 && frame_pointer_needed
2933 && REGNO (indx) == HARD_FRAME_POINTER_REGNUM)
2934 || REGNO (indx) == ARG_POINTER_REGNUM
2935 || (flag_pic
2936 && REGNO (indx) == PIC_OFFSET_TABLE_REGNUM))
2937 pointer = indx_ptr = true;
2939 if ((reload_completed || reload_in_progress)
2940 && indx == cfun->machine->base_reg)
2941 pointer = indx_ptr = literal_pool = true;
2944 /* Prefer to use pointer as base, not index. */
2945 if (base && indx && !base_ptr
2946 && (indx_ptr || (!REG_POINTER (base) && REG_POINTER (indx))))
2948 rtx tmp = base;
2949 base = indx;
2950 indx = tmp;
2953 /* Validate displacement. */
2954 if (!disp)
2956 /* If virtual registers are involved, the displacement will change later
2957 anyway as the virtual registers get eliminated. This could make a
2958 valid displacement invalid, but it is more likely to make an invalid
2959 displacement valid, because we sometimes access the register save area
2960 via negative offsets to one of those registers.
2961 Thus we don't check the displacement for validity here. If after
2962 elimination the displacement turns out to be invalid after all,
2963 this is fixed up by reload in any case. */
2964 /* LRA maintains always displacements up to date and we need to
2965 know the displacement is right during all LRA not only at the
2966 final elimination. */
2967 if (lra_in_progress
2968 || (base != arg_pointer_rtx
2969 && indx != arg_pointer_rtx
2970 && base != return_address_pointer_rtx
2971 && indx != return_address_pointer_rtx
2972 && base != frame_pointer_rtx
2973 && indx != frame_pointer_rtx
2974 && base != virtual_stack_vars_rtx
2975 && indx != virtual_stack_vars_rtx))
2976 if (!DISP_IN_RANGE (offset))
2977 return false;
2979 else
2981 /* All the special cases are pointers. */
2982 pointer = true;
2984 /* In the small-PIC case, the linker converts @GOT
2985 and @GOTNTPOFF offsets to possible displacements. */
2986 if (GET_CODE (disp) == UNSPEC
2987 && (XINT (disp, 1) == UNSPEC_GOT
2988 || XINT (disp, 1) == UNSPEC_GOTNTPOFF)
2989 && flag_pic == 1)
2994 /* Accept pool label offsets. */
2995 else if (GET_CODE (disp) == UNSPEC
2996 && XINT (disp, 1) == UNSPEC_POOL_OFFSET)
2999 /* Accept literal pool references. */
3000 else if (GET_CODE (disp) == UNSPEC
3001 && XINT (disp, 1) == UNSPEC_LTREL_OFFSET)
3003 /* In case CSE pulled a non literal pool reference out of
3004 the pool we have to reject the address. This is
3005 especially important when loading the GOT pointer on non
3006 zarch CPUs. In this case the literal pool contains an lt
3007 relative offset to the _GLOBAL_OFFSET_TABLE_ label which
3008 will most likely exceed the displacement. */
3009 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
3010 || !CONSTANT_POOL_ADDRESS_P (XVECEXP (disp, 0, 0)))
3011 return false;
3013 orig_disp = gen_rtx_CONST (Pmode, disp);
3014 if (offset)
3016 /* If we have an offset, make sure it does not
3017 exceed the size of the constant pool entry. */
3018 rtx sym = XVECEXP (disp, 0, 0);
3019 if (offset >= GET_MODE_SIZE (get_pool_mode (sym)))
3020 return false;
3022 orig_disp = plus_constant (Pmode, orig_disp, offset);
3026 else
3027 return false;
3030 if (!base && !indx)
3031 pointer = true;
3033 if (out)
3035 out->base = base;
3036 out->indx = indx;
3037 out->disp = orig_disp;
3038 out->pointer = pointer;
3039 out->literal_pool = literal_pool;
3042 return true;
3045 /* Decompose a RTL expression OP for an address style operand into its
3046 components, and return the base register in BASE and the offset in
3047 OFFSET. While OP looks like an address it is never supposed to be
3048 used as such.
3050 Return true if OP is a valid address operand, false if not. */
3052 bool
3053 s390_decompose_addrstyle_without_index (rtx op, rtx *base,
3054 HOST_WIDE_INT *offset)
3056 rtx off = NULL_RTX;
3058 /* We can have an integer constant, an address register,
3059 or a sum of the two. */
3060 if (CONST_SCALAR_INT_P (op))
3062 off = op;
3063 op = NULL_RTX;
3065 if (op && GET_CODE (op) == PLUS && CONST_SCALAR_INT_P (XEXP (op, 1)))
3067 off = XEXP (op, 1);
3068 op = XEXP (op, 0);
3070 while (op && GET_CODE (op) == SUBREG)
3071 op = SUBREG_REG (op);
3073 if (op && GET_CODE (op) != REG)
3074 return false;
3076 if (offset)
3078 if (off == NULL_RTX)
3079 *offset = 0;
3080 else if (CONST_INT_P (off))
3081 *offset = INTVAL (off);
3082 else if (CONST_WIDE_INT_P (off))
3083 /* The offset will anyway be cut down to 12 bits so take just
3084 the lowest order chunk of the wide int. */
3085 *offset = CONST_WIDE_INT_ELT (off, 0);
3086 else
3087 gcc_unreachable ();
3089 if (base)
3090 *base = op;
3092 return true;
3096 /* Return true if CODE is a valid address without index. */
3098 bool
3099 s390_legitimate_address_without_index_p (rtx op)
3101 struct s390_address addr;
3103 if (!s390_decompose_address (XEXP (op, 0), &addr))
3104 return false;
3105 if (addr.indx)
3106 return false;
3108 return true;
3112 /* Return TRUE if ADDR is an operand valid for a load/store relative
3113 instruction. Be aware that the alignment of the operand needs to
3114 be checked separately.
3115 Valid addresses are single references or a sum of a reference and a
3116 constant integer. Return these parts in SYMREF and ADDEND. You can
3117 pass NULL in REF and/or ADDEND if you are not interested in these
3118 values. Literal pool references are *not* considered symbol
3119 references. */
3121 static bool
3122 s390_loadrelative_operand_p (rtx addr, rtx *symref, HOST_WIDE_INT *addend)
3124 HOST_WIDE_INT tmpaddend = 0;
3126 if (GET_CODE (addr) == CONST)
3127 addr = XEXP (addr, 0);
3129 if (GET_CODE (addr) == PLUS)
3131 if (!CONST_INT_P (XEXP (addr, 1)))
3132 return false;
3134 tmpaddend = INTVAL (XEXP (addr, 1));
3135 addr = XEXP (addr, 0);
3138 if ((GET_CODE (addr) == SYMBOL_REF && !CONSTANT_POOL_ADDRESS_P (addr))
3139 || (GET_CODE (addr) == UNSPEC
3140 && (XINT (addr, 1) == UNSPEC_GOTENT
3141 || (TARGET_CPU_ZARCH && XINT (addr, 1) == UNSPEC_PLT))))
3143 if (symref)
3144 *symref = addr;
3145 if (addend)
3146 *addend = tmpaddend;
3148 return true;
3150 return false;
3153 /* Return true if the address in OP is valid for constraint letter C
3154 if wrapped in a MEM rtx. Set LIT_POOL_OK to true if it literal
3155 pool MEMs should be accepted. Only the Q, R, S, T constraint
3156 letters are allowed for C. */
3158 static int
3159 s390_check_qrst_address (char c, rtx op, bool lit_pool_ok)
3161 struct s390_address addr;
3162 bool decomposed = false;
3164 /* This check makes sure that no symbolic address (except literal
3165 pool references) are accepted by the R or T constraints. */
3166 if (s390_loadrelative_operand_p (op, NULL, NULL))
3167 return 0;
3169 /* Ensure literal pool references are only accepted if LIT_POOL_OK. */
3170 if (!lit_pool_ok)
3172 if (!s390_decompose_address (op, &addr))
3173 return 0;
3174 if (addr.literal_pool)
3175 return 0;
3176 decomposed = true;
3179 /* With reload, we sometimes get intermediate address forms that are
3180 actually invalid as-is, but we need to accept them in the most
3181 generic cases below ('R' or 'T'), since reload will in fact fix
3182 them up. LRA behaves differently here; we never see such forms,
3183 but on the other hand, we need to strictly reject every invalid
3184 address form. Perform this check right up front. */
3185 if (lra_in_progress)
3187 if (!decomposed && !s390_decompose_address (op, &addr))
3188 return 0;
3189 decomposed = true;
3192 switch (c)
3194 case 'Q': /* no index short displacement */
3195 if (!decomposed && !s390_decompose_address (op, &addr))
3196 return 0;
3197 if (addr.indx)
3198 return 0;
3199 if (!s390_short_displacement (addr.disp))
3200 return 0;
3201 break;
3203 case 'R': /* with index short displacement */
3204 if (TARGET_LONG_DISPLACEMENT)
3206 if (!decomposed && !s390_decompose_address (op, &addr))
3207 return 0;
3208 if (!s390_short_displacement (addr.disp))
3209 return 0;
3211 /* Any invalid address here will be fixed up by reload,
3212 so accept it for the most generic constraint. */
3213 break;
3215 case 'S': /* no index long displacement */
3216 if (!decomposed && !s390_decompose_address (op, &addr))
3217 return 0;
3218 if (addr.indx)
3219 return 0;
3220 break;
3222 case 'T': /* with index long displacement */
3223 /* Any invalid address here will be fixed up by reload,
3224 so accept it for the most generic constraint. */
3225 break;
3227 default:
3228 return 0;
3230 return 1;
3234 /* Evaluates constraint strings described by the regular expression
3235 ([A|B|Z](Q|R|S|T))|Y and returns 1 if OP is a valid operand for
3236 the constraint given in STR, or 0 else. */
3239 s390_mem_constraint (const char *str, rtx op)
3241 char c = str[0];
3243 switch (c)
3245 case 'A':
3246 /* Check for offsettable variants of memory constraints. */
3247 if (!MEM_P (op) || MEM_VOLATILE_P (op))
3248 return 0;
3249 if ((reload_completed || reload_in_progress)
3250 ? !offsettable_memref_p (op) : !offsettable_nonstrict_memref_p (op))
3251 return 0;
3252 return s390_check_qrst_address (str[1], XEXP (op, 0), true);
3253 case 'B':
3254 /* Check for non-literal-pool variants of memory constraints. */
3255 if (!MEM_P (op))
3256 return 0;
3257 return s390_check_qrst_address (str[1], XEXP (op, 0), false);
3258 case 'Q':
3259 case 'R':
3260 case 'S':
3261 case 'T':
3262 if (GET_CODE (op) != MEM)
3263 return 0;
3264 return s390_check_qrst_address (c, XEXP (op, 0), true);
3265 case 'Y':
3266 /* Simply check for the basic form of a shift count. Reload will
3267 take care of making sure we have a proper base register. */
3268 if (!s390_decompose_addrstyle_without_index (op, NULL, NULL))
3269 return 0;
3270 break;
3271 case 'Z':
3272 return s390_check_qrst_address (str[1], op, true);
3273 default:
3274 return 0;
3276 return 1;
3280 /* Evaluates constraint strings starting with letter O. Input
3281 parameter C is the second letter following the "O" in the constraint
3282 string. Returns 1 if VALUE meets the respective constraint and 0
3283 otherwise. */
3286 s390_O_constraint_str (const char c, HOST_WIDE_INT value)
3288 if (!TARGET_EXTIMM)
3289 return 0;
3291 switch (c)
3293 case 's':
3294 return trunc_int_for_mode (value, SImode) == value;
3296 case 'p':
3297 return value == 0
3298 || s390_single_part (GEN_INT (value), DImode, SImode, 0) == 1;
3300 case 'n':
3301 return s390_single_part (GEN_INT (value - 1), DImode, SImode, -1) == 1;
3303 default:
3304 gcc_unreachable ();
3309 /* Evaluates constraint strings starting with letter N. Parameter STR
3310 contains the letters following letter "N" in the constraint string.
3311 Returns true if VALUE matches the constraint. */
3314 s390_N_constraint_str (const char *str, HOST_WIDE_INT value)
3316 machine_mode mode, part_mode;
3317 int def;
3318 int part, part_goal;
3321 if (str[0] == 'x')
3322 part_goal = -1;
3323 else
3324 part_goal = str[0] - '0';
3326 switch (str[1])
3328 case 'Q':
3329 part_mode = QImode;
3330 break;
3331 case 'H':
3332 part_mode = HImode;
3333 break;
3334 case 'S':
3335 part_mode = SImode;
3336 break;
3337 default:
3338 return 0;
3341 switch (str[2])
3343 case 'H':
3344 mode = HImode;
3345 break;
3346 case 'S':
3347 mode = SImode;
3348 break;
3349 case 'D':
3350 mode = DImode;
3351 break;
3352 default:
3353 return 0;
3356 switch (str[3])
3358 case '0':
3359 def = 0;
3360 break;
3361 case 'F':
3362 def = -1;
3363 break;
3364 default:
3365 return 0;
3368 if (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (part_mode))
3369 return 0;
3371 part = s390_single_part (GEN_INT (value), mode, part_mode, def);
3372 if (part < 0)
3373 return 0;
3374 if (part_goal != -1 && part_goal != part)
3375 return 0;
3377 return 1;
3381 /* Returns true if the input parameter VALUE is a float zero. */
3384 s390_float_const_zero_p (rtx value)
3386 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
3387 && value == CONST0_RTX (GET_MODE (value)));
3390 /* Implement TARGET_REGISTER_MOVE_COST. */
3392 static int
3393 s390_register_move_cost (machine_mode mode,
3394 reg_class_t from, reg_class_t to)
3396 /* On s390, copy between fprs and gprs is expensive. */
3398 /* It becomes somewhat faster having ldgr/lgdr. */
3399 if (TARGET_Z10 && GET_MODE_SIZE (mode) == 8)
3401 /* ldgr is single cycle. */
3402 if (reg_classes_intersect_p (from, GENERAL_REGS)
3403 && reg_classes_intersect_p (to, FP_REGS))
3404 return 1;
3405 /* lgdr needs 3 cycles. */
3406 if (reg_classes_intersect_p (to, GENERAL_REGS)
3407 && reg_classes_intersect_p (from, FP_REGS))
3408 return 3;
3411 /* Otherwise copying is done via memory. */
3412 if ((reg_classes_intersect_p (from, GENERAL_REGS)
3413 && reg_classes_intersect_p (to, FP_REGS))
3414 || (reg_classes_intersect_p (from, FP_REGS)
3415 && reg_classes_intersect_p (to, GENERAL_REGS)))
3416 return 10;
3418 return 1;
3421 /* Implement TARGET_MEMORY_MOVE_COST. */
3423 static int
3424 s390_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
3425 reg_class_t rclass ATTRIBUTE_UNUSED,
3426 bool in ATTRIBUTE_UNUSED)
3428 return 2;
3431 /* Compute a (partial) cost for rtx X. Return true if the complete
3432 cost has been computed, and false if subexpressions should be
3433 scanned. In either case, *TOTAL contains the cost result. The
3434 initial value of *TOTAL is the default value computed by
3435 rtx_cost. It may be left unmodified. OUTER_CODE contains the
3436 code of the superexpression of x. */
3438 static bool
3439 s390_rtx_costs (rtx x, machine_mode mode, int outer_code,
3440 int opno ATTRIBUTE_UNUSED,
3441 int *total, bool speed ATTRIBUTE_UNUSED)
3443 int code = GET_CODE (x);
3444 switch (code)
3446 case CONST:
3447 case CONST_INT:
3448 case LABEL_REF:
3449 case SYMBOL_REF:
3450 case CONST_DOUBLE:
3451 case CONST_WIDE_INT:
3452 case MEM:
3453 *total = 0;
3454 return true;
3456 case IOR:
3457 /* risbg */
3458 if (GET_CODE (XEXP (x, 0)) == AND
3459 && GET_CODE (XEXP (x, 1)) == ASHIFT
3460 && REG_P (XEXP (XEXP (x, 0), 0))
3461 && REG_P (XEXP (XEXP (x, 1), 0))
3462 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
3463 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
3464 && (UINTVAL (XEXP (XEXP (x, 0), 1)) ==
3465 (1UL << UINTVAL (XEXP (XEXP (x, 1), 1))) - 1))
3467 *total = COSTS_N_INSNS (2);
3468 return true;
3470 /* fallthrough */
3471 case ASHIFT:
3472 case ASHIFTRT:
3473 case LSHIFTRT:
3474 case ROTATE:
3475 case ROTATERT:
3476 case AND:
3477 case XOR:
3478 case NEG:
3479 case NOT:
3480 *total = COSTS_N_INSNS (1);
3481 return false;
3483 case PLUS:
3484 case MINUS:
3485 *total = COSTS_N_INSNS (1);
3486 return false;
3488 case MULT:
3489 switch (mode)
3491 case SImode:
3493 rtx left = XEXP (x, 0);
3494 rtx right = XEXP (x, 1);
3495 if (GET_CODE (right) == CONST_INT
3496 && CONST_OK_FOR_K (INTVAL (right)))
3497 *total = s390_cost->mhi;
3498 else if (GET_CODE (left) == SIGN_EXTEND)
3499 *total = s390_cost->mh;
3500 else
3501 *total = s390_cost->ms; /* msr, ms, msy */
3502 break;
3504 case DImode:
3506 rtx left = XEXP (x, 0);
3507 rtx right = XEXP (x, 1);
3508 if (TARGET_ZARCH)
3510 if (GET_CODE (right) == CONST_INT
3511 && CONST_OK_FOR_K (INTVAL (right)))
3512 *total = s390_cost->mghi;
3513 else if (GET_CODE (left) == SIGN_EXTEND)
3514 *total = s390_cost->msgf;
3515 else
3516 *total = s390_cost->msg; /* msgr, msg */
3518 else /* TARGET_31BIT */
3520 if (GET_CODE (left) == SIGN_EXTEND
3521 && GET_CODE (right) == SIGN_EXTEND)
3522 /* mulsidi case: mr, m */
3523 *total = s390_cost->m;
3524 else if (GET_CODE (left) == ZERO_EXTEND
3525 && GET_CODE (right) == ZERO_EXTEND
3526 && TARGET_CPU_ZARCH)
3527 /* umulsidi case: ml, mlr */
3528 *total = s390_cost->ml;
3529 else
3530 /* Complex calculation is required. */
3531 *total = COSTS_N_INSNS (40);
3533 break;
3535 case SFmode:
3536 case DFmode:
3537 *total = s390_cost->mult_df;
3538 break;
3539 case TFmode:
3540 *total = s390_cost->mxbr;
3541 break;
3542 default:
3543 return false;
3545 return false;
3547 case FMA:
3548 switch (mode)
3550 case DFmode:
3551 *total = s390_cost->madbr;
3552 break;
3553 case SFmode:
3554 *total = s390_cost->maebr;
3555 break;
3556 default:
3557 return false;
3559 /* Negate in the third argument is free: FMSUB. */
3560 if (GET_CODE (XEXP (x, 2)) == NEG)
3562 *total += (rtx_cost (XEXP (x, 0), mode, FMA, 0, speed)
3563 + rtx_cost (XEXP (x, 1), mode, FMA, 1, speed)
3564 + rtx_cost (XEXP (XEXP (x, 2), 0), mode, FMA, 2, speed));
3565 return true;
3567 return false;
3569 case UDIV:
3570 case UMOD:
3571 if (mode == TImode) /* 128 bit division */
3572 *total = s390_cost->dlgr;
3573 else if (mode == DImode)
3575 rtx right = XEXP (x, 1);
3576 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
3577 *total = s390_cost->dlr;
3578 else /* 64 by 64 bit division */
3579 *total = s390_cost->dlgr;
3581 else if (mode == SImode) /* 32 bit division */
3582 *total = s390_cost->dlr;
3583 return false;
3585 case DIV:
3586 case MOD:
3587 if (mode == DImode)
3589 rtx right = XEXP (x, 1);
3590 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
3591 if (TARGET_ZARCH)
3592 *total = s390_cost->dsgfr;
3593 else
3594 *total = s390_cost->dr;
3595 else /* 64 by 64 bit division */
3596 *total = s390_cost->dsgr;
3598 else if (mode == SImode) /* 32 bit division */
3599 *total = s390_cost->dlr;
3600 else if (mode == SFmode)
3602 *total = s390_cost->debr;
3604 else if (mode == DFmode)
3606 *total = s390_cost->ddbr;
3608 else if (mode == TFmode)
3610 *total = s390_cost->dxbr;
3612 return false;
3614 case SQRT:
3615 if (mode == SFmode)
3616 *total = s390_cost->sqebr;
3617 else if (mode == DFmode)
3618 *total = s390_cost->sqdbr;
3619 else /* TFmode */
3620 *total = s390_cost->sqxbr;
3621 return false;
3623 case SIGN_EXTEND:
3624 case ZERO_EXTEND:
3625 if (outer_code == MULT || outer_code == DIV || outer_code == MOD
3626 || outer_code == PLUS || outer_code == MINUS
3627 || outer_code == COMPARE)
3628 *total = 0;
3629 return false;
3631 case COMPARE:
3632 *total = COSTS_N_INSNS (1);
3633 if (GET_CODE (XEXP (x, 0)) == AND
3634 && GET_CODE (XEXP (x, 1)) == CONST_INT
3635 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
3637 rtx op0 = XEXP (XEXP (x, 0), 0);
3638 rtx op1 = XEXP (XEXP (x, 0), 1);
3639 rtx op2 = XEXP (x, 1);
3641 if (memory_operand (op0, GET_MODE (op0))
3642 && s390_tm_ccmode (op1, op2, 0) != VOIDmode)
3643 return true;
3644 if (register_operand (op0, GET_MODE (op0))
3645 && s390_tm_ccmode (op1, op2, 1) != VOIDmode)
3646 return true;
3648 return false;
3650 default:
3651 return false;
3655 /* Return the cost of an address rtx ADDR. */
3657 static int
3658 s390_address_cost (rtx addr, machine_mode mode ATTRIBUTE_UNUSED,
3659 addr_space_t as ATTRIBUTE_UNUSED,
3660 bool speed ATTRIBUTE_UNUSED)
3662 struct s390_address ad;
3663 if (!s390_decompose_address (addr, &ad))
3664 return 1000;
3666 return ad.indx? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (1);
3669 /* If OP is a SYMBOL_REF of a thread-local symbol, return its TLS mode,
3670 otherwise return 0. */
3673 tls_symbolic_operand (rtx op)
3675 if (GET_CODE (op) != SYMBOL_REF)
3676 return 0;
3677 return SYMBOL_REF_TLS_MODEL (op);
3680 /* Split DImode access register reference REG (on 64-bit) into its constituent
3681 low and high parts, and store them into LO and HI. Note that gen_lowpart/
3682 gen_highpart cannot be used as they assume all registers are word-sized,
3683 while our access registers have only half that size. */
3685 void
3686 s390_split_access_reg (rtx reg, rtx *lo, rtx *hi)
3688 gcc_assert (TARGET_64BIT);
3689 gcc_assert (ACCESS_REG_P (reg));
3690 gcc_assert (GET_MODE (reg) == DImode);
3691 gcc_assert (!(REGNO (reg) & 1));
3693 *lo = gen_rtx_REG (SImode, REGNO (reg) + 1);
3694 *hi = gen_rtx_REG (SImode, REGNO (reg));
3697 /* Return true if OP contains a symbol reference */
3699 bool
3700 symbolic_reference_mentioned_p (rtx op)
3702 const char *fmt;
3703 int i;
3705 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
3706 return 1;
3708 fmt = GET_RTX_FORMAT (GET_CODE (op));
3709 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
3711 if (fmt[i] == 'E')
3713 int j;
3715 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
3716 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
3717 return 1;
3720 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
3721 return 1;
3724 return 0;
3727 /* Return true if OP contains a reference to a thread-local symbol. */
3729 bool
3730 tls_symbolic_reference_mentioned_p (rtx op)
3732 const char *fmt;
3733 int i;
3735 if (GET_CODE (op) == SYMBOL_REF)
3736 return tls_symbolic_operand (op);
3738 fmt = GET_RTX_FORMAT (GET_CODE (op));
3739 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
3741 if (fmt[i] == 'E')
3743 int j;
3745 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
3746 if (tls_symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
3747 return true;
3750 else if (fmt[i] == 'e' && tls_symbolic_reference_mentioned_p (XEXP (op, i)))
3751 return true;
3754 return false;
3758 /* Return true if OP is a legitimate general operand when
3759 generating PIC code. It is given that flag_pic is on
3760 and that OP satisfies CONSTANT_P. */
3763 legitimate_pic_operand_p (rtx op)
3765 /* Accept all non-symbolic constants. */
3766 if (!SYMBOLIC_CONST (op))
3767 return 1;
3769 /* Reject everything else; must be handled
3770 via emit_symbolic_move. */
3771 return 0;
3774 /* Returns true if the constant value OP is a legitimate general operand.
3775 It is given that OP satisfies CONSTANT_P. */
3777 static bool
3778 s390_legitimate_constant_p (machine_mode mode, rtx op)
3780 if (TARGET_VX && VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
3782 if (GET_MODE_SIZE (mode) != 16)
3783 return 0;
3785 if (!satisfies_constraint_j00 (op)
3786 && !satisfies_constraint_jm1 (op)
3787 && !satisfies_constraint_jKK (op)
3788 && !satisfies_constraint_jxx (op)
3789 && !satisfies_constraint_jyy (op))
3790 return 0;
3793 /* Accept all non-symbolic constants. */
3794 if (!SYMBOLIC_CONST (op))
3795 return 1;
3797 /* Accept immediate LARL operands. */
3798 if (TARGET_CPU_ZARCH && larl_operand (op, mode))
3799 return 1;
3801 /* Thread-local symbols are never legal constants. This is
3802 so that emit_call knows that computing such addresses
3803 might require a function call. */
3804 if (TLS_SYMBOLIC_CONST (op))
3805 return 0;
3807 /* In the PIC case, symbolic constants must *not* be
3808 forced into the literal pool. We accept them here,
3809 so that they will be handled by emit_symbolic_move. */
3810 if (flag_pic)
3811 return 1;
3813 /* All remaining non-PIC symbolic constants are
3814 forced into the literal pool. */
3815 return 0;
3818 /* Determine if it's legal to put X into the constant pool. This
3819 is not possible if X contains the address of a symbol that is
3820 not constant (TLS) or not known at final link time (PIC). */
3822 static bool
3823 s390_cannot_force_const_mem (machine_mode mode, rtx x)
3825 switch (GET_CODE (x))
3827 case CONST_INT:
3828 case CONST_DOUBLE:
3829 case CONST_WIDE_INT:
3830 case CONST_VECTOR:
3831 /* Accept all non-symbolic constants. */
3832 return false;
3834 case LABEL_REF:
3835 /* Labels are OK iff we are non-PIC. */
3836 return flag_pic != 0;
3838 case SYMBOL_REF:
3839 /* 'Naked' TLS symbol references are never OK,
3840 non-TLS symbols are OK iff we are non-PIC. */
3841 if (tls_symbolic_operand (x))
3842 return true;
3843 else
3844 return flag_pic != 0;
3846 case CONST:
3847 return s390_cannot_force_const_mem (mode, XEXP (x, 0));
3848 case PLUS:
3849 case MINUS:
3850 return s390_cannot_force_const_mem (mode, XEXP (x, 0))
3851 || s390_cannot_force_const_mem (mode, XEXP (x, 1));
3853 case UNSPEC:
3854 switch (XINT (x, 1))
3856 /* Only lt-relative or GOT-relative UNSPECs are OK. */
3857 case UNSPEC_LTREL_OFFSET:
3858 case UNSPEC_GOT:
3859 case UNSPEC_GOTOFF:
3860 case UNSPEC_PLTOFF:
3861 case UNSPEC_TLSGD:
3862 case UNSPEC_TLSLDM:
3863 case UNSPEC_NTPOFF:
3864 case UNSPEC_DTPOFF:
3865 case UNSPEC_GOTNTPOFF:
3866 case UNSPEC_INDNTPOFF:
3867 return false;
3869 /* If the literal pool shares the code section, be put
3870 execute template placeholders into the pool as well. */
3871 case UNSPEC_INSN:
3872 return TARGET_CPU_ZARCH;
3874 default:
3875 return true;
3877 break;
3879 default:
3880 gcc_unreachable ();
3884 /* Returns true if the constant value OP is a legitimate general
3885 operand during and after reload. The difference to
3886 legitimate_constant_p is that this function will not accept
3887 a constant that would need to be forced to the literal pool
3888 before it can be used as operand.
3889 This function accepts all constants which can be loaded directly
3890 into a GPR. */
3892 bool
3893 legitimate_reload_constant_p (rtx op)
3895 /* Accept la(y) operands. */
3896 if (GET_CODE (op) == CONST_INT
3897 && DISP_IN_RANGE (INTVAL (op)))
3898 return true;
3900 /* Accept l(g)hi/l(g)fi operands. */
3901 if (GET_CODE (op) == CONST_INT
3902 && (CONST_OK_FOR_K (INTVAL (op)) || CONST_OK_FOR_Os (INTVAL (op))))
3903 return true;
3905 /* Accept lliXX operands. */
3906 if (TARGET_ZARCH
3907 && GET_CODE (op) == CONST_INT
3908 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
3909 && s390_single_part (op, word_mode, HImode, 0) >= 0)
3910 return true;
3912 if (TARGET_EXTIMM
3913 && GET_CODE (op) == CONST_INT
3914 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
3915 && s390_single_part (op, word_mode, SImode, 0) >= 0)
3916 return true;
3918 /* Accept larl operands. */
3919 if (TARGET_CPU_ZARCH
3920 && larl_operand (op, VOIDmode))
3921 return true;
3923 /* Accept floating-point zero operands that fit into a single GPR. */
3924 if (GET_CODE (op) == CONST_DOUBLE
3925 && s390_float_const_zero_p (op)
3926 && GET_MODE_SIZE (GET_MODE (op)) <= UNITS_PER_WORD)
3927 return true;
3929 /* Accept double-word operands that can be split. */
3930 if (GET_CODE (op) == CONST_WIDE_INT
3931 || (GET_CODE (op) == CONST_INT
3932 && trunc_int_for_mode (INTVAL (op), word_mode) != INTVAL (op)))
3934 machine_mode dword_mode = word_mode == SImode ? DImode : TImode;
3935 rtx hi = operand_subword (op, 0, 0, dword_mode);
3936 rtx lo = operand_subword (op, 1, 0, dword_mode);
3937 return legitimate_reload_constant_p (hi)
3938 && legitimate_reload_constant_p (lo);
3941 /* Everything else cannot be handled without reload. */
3942 return false;
3945 /* Returns true if the constant value OP is a legitimate fp operand
3946 during and after reload.
3947 This function accepts all constants which can be loaded directly
3948 into an FPR. */
3950 static bool
3951 legitimate_reload_fp_constant_p (rtx op)
3953 /* Accept floating-point zero operands if the load zero instruction
3954 can be used. Prior to z196 the load fp zero instruction caused a
3955 performance penalty if the result is used as BFP number. */
3956 if (TARGET_Z196
3957 && GET_CODE (op) == CONST_DOUBLE
3958 && s390_float_const_zero_p (op))
3959 return true;
3961 return false;
3964 /* Returns true if the constant value OP is a legitimate vector operand
3965 during and after reload.
3966 This function accepts all constants which can be loaded directly
3967 into an VR. */
3969 static bool
3970 legitimate_reload_vector_constant_p (rtx op)
3972 if (TARGET_VX && GET_MODE_SIZE (GET_MODE (op)) == 16
3973 && (satisfies_constraint_j00 (op)
3974 || satisfies_constraint_jm1 (op)
3975 || satisfies_constraint_jKK (op)
3976 || satisfies_constraint_jxx (op)
3977 || satisfies_constraint_jyy (op)))
3978 return true;
3980 return false;
3983 /* Given an rtx OP being reloaded into a reg required to be in class RCLASS,
3984 return the class of reg to actually use. */
3986 static reg_class_t
3987 s390_preferred_reload_class (rtx op, reg_class_t rclass)
3989 switch (GET_CODE (op))
3991 /* Constants we cannot reload into general registers
3992 must be forced into the literal pool. */
3993 case CONST_VECTOR:
3994 case CONST_DOUBLE:
3995 case CONST_INT:
3996 case CONST_WIDE_INT:
3997 if (reg_class_subset_p (GENERAL_REGS, rclass)
3998 && legitimate_reload_constant_p (op))
3999 return GENERAL_REGS;
4000 else if (reg_class_subset_p (ADDR_REGS, rclass)
4001 && legitimate_reload_constant_p (op))
4002 return ADDR_REGS;
4003 else if (reg_class_subset_p (FP_REGS, rclass)
4004 && legitimate_reload_fp_constant_p (op))
4005 return FP_REGS;
4006 else if (reg_class_subset_p (VEC_REGS, rclass)
4007 && legitimate_reload_vector_constant_p (op))
4008 return VEC_REGS;
4010 return NO_REGS;
4012 /* If a symbolic constant or a PLUS is reloaded,
4013 it is most likely being used as an address, so
4014 prefer ADDR_REGS. If 'class' is not a superset
4015 of ADDR_REGS, e.g. FP_REGS, reject this reload. */
4016 case CONST:
4017 /* Symrefs cannot be pushed into the literal pool with -fPIC
4018 so we *MUST NOT* return NO_REGS for these cases
4019 (s390_cannot_force_const_mem will return true).
4021 On the other hand we MUST return NO_REGS for symrefs with
4022 invalid addend which might have been pushed to the literal
4023 pool (no -fPIC). Usually we would expect them to be
4024 handled via secondary reload but this does not happen if
4025 they are used as literal pool slot replacement in reload
4026 inheritance (see emit_input_reload_insns). */
4027 if (TARGET_CPU_ZARCH
4028 && GET_CODE (XEXP (op, 0)) == PLUS
4029 && GET_CODE (XEXP (XEXP(op, 0), 0)) == SYMBOL_REF
4030 && GET_CODE (XEXP (XEXP(op, 0), 1)) == CONST_INT)
4032 if (flag_pic && reg_class_subset_p (ADDR_REGS, rclass))
4033 return ADDR_REGS;
4034 else
4035 return NO_REGS;
4037 /* fallthrough */
4038 case LABEL_REF:
4039 case SYMBOL_REF:
4040 if (!legitimate_reload_constant_p (op))
4041 return NO_REGS;
4042 /* fallthrough */
4043 case PLUS:
4044 /* load address will be used. */
4045 if (reg_class_subset_p (ADDR_REGS, rclass))
4046 return ADDR_REGS;
4047 else
4048 return NO_REGS;
4050 default:
4051 break;
4054 return rclass;
4057 /* Return true if ADDR is SYMBOL_REF + addend with addend being a
4058 multiple of ALIGNMENT and the SYMBOL_REF being naturally
4059 aligned. */
4061 bool
4062 s390_check_symref_alignment (rtx addr, HOST_WIDE_INT alignment)
4064 HOST_WIDE_INT addend;
4065 rtx symref;
4067 /* The "required alignment" might be 0 (e.g. for certain structs
4068 accessed via BLKmode). Early abort in this case, as well as when
4069 an alignment > 8 is required. */
4070 if (alignment < 2 || alignment > 8)
4071 return false;
4073 if (!s390_loadrelative_operand_p (addr, &symref, &addend))
4074 return false;
4076 if (addend & (alignment - 1))
4077 return false;
4079 if (GET_CODE (symref) == SYMBOL_REF)
4081 /* We have load-relative instructions for 2-byte, 4-byte, and
4082 8-byte alignment so allow only these. */
4083 switch (alignment)
4085 case 8: return !SYMBOL_FLAG_NOTALIGN8_P (symref);
4086 case 4: return !SYMBOL_FLAG_NOTALIGN4_P (symref);
4087 case 2: return !SYMBOL_FLAG_NOTALIGN2_P (symref);
4088 default: return false;
4092 if (GET_CODE (symref) == UNSPEC
4093 && alignment <= UNITS_PER_LONG)
4094 return true;
4096 return false;
4099 /* ADDR is moved into REG using larl. If ADDR isn't a valid larl
4100 operand SCRATCH is used to reload the even part of the address and
4101 adding one. */
4103 void
4104 s390_reload_larl_operand (rtx reg, rtx addr, rtx scratch)
4106 HOST_WIDE_INT addend;
4107 rtx symref;
4109 if (!s390_loadrelative_operand_p (addr, &symref, &addend))
4110 gcc_unreachable ();
4112 if (!(addend & 1))
4113 /* Easy case. The addend is even so larl will do fine. */
4114 emit_move_insn (reg, addr);
4115 else
4117 /* We can leave the scratch register untouched if the target
4118 register is a valid base register. */
4119 if (REGNO (reg) < FIRST_PSEUDO_REGISTER
4120 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS)
4121 scratch = reg;
4123 gcc_assert (REGNO (scratch) < FIRST_PSEUDO_REGISTER);
4124 gcc_assert (REGNO_REG_CLASS (REGNO (scratch)) == ADDR_REGS);
4126 if (addend != 1)
4127 emit_move_insn (scratch,
4128 gen_rtx_CONST (Pmode,
4129 gen_rtx_PLUS (Pmode, symref,
4130 GEN_INT (addend - 1))));
4131 else
4132 emit_move_insn (scratch, symref);
4134 /* Increment the address using la in order to avoid clobbering cc. */
4135 s390_load_address (reg, gen_rtx_PLUS (Pmode, scratch, const1_rtx));
4139 /* Generate what is necessary to move between REG and MEM using
4140 SCRATCH. The direction is given by TOMEM. */
4142 void
4143 s390_reload_symref_address (rtx reg, rtx mem, rtx scratch, bool tomem)
4145 /* Reload might have pulled a constant out of the literal pool.
4146 Force it back in. */
4147 if (CONST_INT_P (mem) || GET_CODE (mem) == CONST_DOUBLE
4148 || GET_CODE (mem) == CONST_WIDE_INT
4149 || GET_CODE (mem) == CONST_VECTOR
4150 || GET_CODE (mem) == CONST)
4151 mem = force_const_mem (GET_MODE (reg), mem);
4153 gcc_assert (MEM_P (mem));
4155 /* For a load from memory we can leave the scratch register
4156 untouched if the target register is a valid base register. */
4157 if (!tomem
4158 && REGNO (reg) < FIRST_PSEUDO_REGISTER
4159 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS
4160 && GET_MODE (reg) == GET_MODE (scratch))
4161 scratch = reg;
4163 /* Load address into scratch register. Since we can't have a
4164 secondary reload for a secondary reload we have to cover the case
4165 where larl would need a secondary reload here as well. */
4166 s390_reload_larl_operand (scratch, XEXP (mem, 0), scratch);
4168 /* Now we can use a standard load/store to do the move. */
4169 if (tomem)
4170 emit_move_insn (replace_equiv_address (mem, scratch), reg);
4171 else
4172 emit_move_insn (reg, replace_equiv_address (mem, scratch));
4175 /* Inform reload about cases where moving X with a mode MODE to a register in
4176 RCLASS requires an extra scratch or immediate register. Return the class
4177 needed for the immediate register. */
4179 static reg_class_t
4180 s390_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
4181 machine_mode mode, secondary_reload_info *sri)
4183 enum reg_class rclass = (enum reg_class) rclass_i;
4185 /* Intermediate register needed. */
4186 if (reg_classes_intersect_p (CC_REGS, rclass))
4187 return GENERAL_REGS;
4189 if (TARGET_VX)
4191 /* The vst/vl vector move instructions allow only for short
4192 displacements. */
4193 if (MEM_P (x)
4194 && GET_CODE (XEXP (x, 0)) == PLUS
4195 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4196 && !SHORT_DISP_IN_RANGE(INTVAL (XEXP (XEXP (x, 0), 1)))
4197 && reg_class_subset_p (rclass, VEC_REGS)
4198 && (!reg_class_subset_p (rclass, FP_REGS)
4199 || (GET_MODE_SIZE (mode) > 8
4200 && s390_class_max_nregs (FP_REGS, mode) == 1)))
4202 if (in_p)
4203 sri->icode = (TARGET_64BIT ?
4204 CODE_FOR_reloaddi_la_in :
4205 CODE_FOR_reloadsi_la_in);
4206 else
4207 sri->icode = (TARGET_64BIT ?
4208 CODE_FOR_reloaddi_la_out :
4209 CODE_FOR_reloadsi_la_out);
4213 if (TARGET_Z10)
4215 HOST_WIDE_INT offset;
4216 rtx symref;
4218 /* On z10 several optimizer steps may generate larl operands with
4219 an odd addend. */
4220 if (in_p
4221 && s390_loadrelative_operand_p (x, &symref, &offset)
4222 && mode == Pmode
4223 && !SYMBOL_FLAG_NOTALIGN2_P (symref)
4224 && (offset & 1) == 1)
4225 sri->icode = ((mode == DImode) ? CODE_FOR_reloaddi_larl_odd_addend_z10
4226 : CODE_FOR_reloadsi_larl_odd_addend_z10);
4228 /* Handle all the (mem (symref)) accesses we cannot use the z10
4229 instructions for. */
4230 if (MEM_P (x)
4231 && s390_loadrelative_operand_p (XEXP (x, 0), NULL, NULL)
4232 && (mode == QImode
4233 || !reg_class_subset_p (rclass, GENERAL_REGS)
4234 || GET_MODE_SIZE (mode) > UNITS_PER_WORD
4235 || !s390_check_symref_alignment (XEXP (x, 0),
4236 GET_MODE_SIZE (mode))))
4238 #define __SECONDARY_RELOAD_CASE(M,m) \
4239 case M##mode: \
4240 if (TARGET_64BIT) \
4241 sri->icode = in_p ? CODE_FOR_reload##m##di_toreg_z10 : \
4242 CODE_FOR_reload##m##di_tomem_z10; \
4243 else \
4244 sri->icode = in_p ? CODE_FOR_reload##m##si_toreg_z10 : \
4245 CODE_FOR_reload##m##si_tomem_z10; \
4246 break;
4248 switch (GET_MODE (x))
4250 __SECONDARY_RELOAD_CASE (QI, qi);
4251 __SECONDARY_RELOAD_CASE (HI, hi);
4252 __SECONDARY_RELOAD_CASE (SI, si);
4253 __SECONDARY_RELOAD_CASE (DI, di);
4254 __SECONDARY_RELOAD_CASE (TI, ti);
4255 __SECONDARY_RELOAD_CASE (SF, sf);
4256 __SECONDARY_RELOAD_CASE (DF, df);
4257 __SECONDARY_RELOAD_CASE (TF, tf);
4258 __SECONDARY_RELOAD_CASE (SD, sd);
4259 __SECONDARY_RELOAD_CASE (DD, dd);
4260 __SECONDARY_RELOAD_CASE (TD, td);
4261 __SECONDARY_RELOAD_CASE (V1QI, v1qi);
4262 __SECONDARY_RELOAD_CASE (V2QI, v2qi);
4263 __SECONDARY_RELOAD_CASE (V4QI, v4qi);
4264 __SECONDARY_RELOAD_CASE (V8QI, v8qi);
4265 __SECONDARY_RELOAD_CASE (V16QI, v16qi);
4266 __SECONDARY_RELOAD_CASE (V1HI, v1hi);
4267 __SECONDARY_RELOAD_CASE (V2HI, v2hi);
4268 __SECONDARY_RELOAD_CASE (V4HI, v4hi);
4269 __SECONDARY_RELOAD_CASE (V8HI, v8hi);
4270 __SECONDARY_RELOAD_CASE (V1SI, v1si);
4271 __SECONDARY_RELOAD_CASE (V2SI, v2si);
4272 __SECONDARY_RELOAD_CASE (V4SI, v4si);
4273 __SECONDARY_RELOAD_CASE (V1DI, v1di);
4274 __SECONDARY_RELOAD_CASE (V2DI, v2di);
4275 __SECONDARY_RELOAD_CASE (V1TI, v1ti);
4276 __SECONDARY_RELOAD_CASE (V1SF, v1sf);
4277 __SECONDARY_RELOAD_CASE (V2SF, v2sf);
4278 __SECONDARY_RELOAD_CASE (V4SF, v4sf);
4279 __SECONDARY_RELOAD_CASE (V1DF, v1df);
4280 __SECONDARY_RELOAD_CASE (V2DF, v2df);
4281 __SECONDARY_RELOAD_CASE (V1TF, v1tf);
4282 default:
4283 gcc_unreachable ();
4285 #undef __SECONDARY_RELOAD_CASE
4289 /* We need a scratch register when loading a PLUS expression which
4290 is not a legitimate operand of the LOAD ADDRESS instruction. */
4291 /* LRA can deal with transformation of plus op very well -- so we
4292 don't need to prompt LRA in this case. */
4293 if (! lra_in_progress && in_p && s390_plus_operand (x, mode))
4294 sri->icode = (TARGET_64BIT ?
4295 CODE_FOR_reloaddi_plus : CODE_FOR_reloadsi_plus);
4297 /* Performing a multiword move from or to memory we have to make sure the
4298 second chunk in memory is addressable without causing a displacement
4299 overflow. If that would be the case we calculate the address in
4300 a scratch register. */
4301 if (MEM_P (x)
4302 && GET_CODE (XEXP (x, 0)) == PLUS
4303 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4304 && !DISP_IN_RANGE (INTVAL (XEXP (XEXP (x, 0), 1))
4305 + GET_MODE_SIZE (mode) - 1))
4307 /* For GENERAL_REGS a displacement overflow is no problem if occurring
4308 in a s_operand address since we may fallback to lm/stm. So we only
4309 have to care about overflows in the b+i+d case. */
4310 if ((reg_classes_intersect_p (GENERAL_REGS, rclass)
4311 && s390_class_max_nregs (GENERAL_REGS, mode) > 1
4312 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
4313 /* For FP_REGS no lm/stm is available so this check is triggered
4314 for displacement overflows in b+i+d and b+d like addresses. */
4315 || (reg_classes_intersect_p (FP_REGS, rclass)
4316 && s390_class_max_nregs (FP_REGS, mode) > 1))
4318 if (in_p)
4319 sri->icode = (TARGET_64BIT ?
4320 CODE_FOR_reloaddi_la_in :
4321 CODE_FOR_reloadsi_la_in);
4322 else
4323 sri->icode = (TARGET_64BIT ?
4324 CODE_FOR_reloaddi_la_out :
4325 CODE_FOR_reloadsi_la_out);
4329 /* A scratch address register is needed when a symbolic constant is
4330 copied to r0 compiling with -fPIC. In other cases the target
4331 register might be used as temporary (see legitimize_pic_address). */
4332 if (in_p && SYMBOLIC_CONST (x) && flag_pic == 2 && rclass != ADDR_REGS)
4333 sri->icode = (TARGET_64BIT ?
4334 CODE_FOR_reloaddi_PIC_addr :
4335 CODE_FOR_reloadsi_PIC_addr);
4337 /* Either scratch or no register needed. */
4338 return NO_REGS;
4341 /* Generate code to load SRC, which is PLUS that is not a
4342 legitimate operand for the LA instruction, into TARGET.
4343 SCRATCH may be used as scratch register. */
4345 void
4346 s390_expand_plus_operand (rtx target, rtx src,
4347 rtx scratch)
4349 rtx sum1, sum2;
4350 struct s390_address ad;
4352 /* src must be a PLUS; get its two operands. */
4353 gcc_assert (GET_CODE (src) == PLUS);
4354 gcc_assert (GET_MODE (src) == Pmode);
4356 /* Check if any of the two operands is already scheduled
4357 for replacement by reload. This can happen e.g. when
4358 float registers occur in an address. */
4359 sum1 = find_replacement (&XEXP (src, 0));
4360 sum2 = find_replacement (&XEXP (src, 1));
4361 src = gen_rtx_PLUS (Pmode, sum1, sum2);
4363 /* If the address is already strictly valid, there's nothing to do. */
4364 if (!s390_decompose_address (src, &ad)
4365 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
4366 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
4368 /* Otherwise, one of the operands cannot be an address register;
4369 we reload its value into the scratch register. */
4370 if (true_regnum (sum1) < 1 || true_regnum (sum1) > 15)
4372 emit_move_insn (scratch, sum1);
4373 sum1 = scratch;
4375 if (true_regnum (sum2) < 1 || true_regnum (sum2) > 15)
4377 emit_move_insn (scratch, sum2);
4378 sum2 = scratch;
4381 /* According to the way these invalid addresses are generated
4382 in reload.c, it should never happen (at least on s390) that
4383 *neither* of the PLUS components, after find_replacements
4384 was applied, is an address register. */
4385 if (sum1 == scratch && sum2 == scratch)
4387 debug_rtx (src);
4388 gcc_unreachable ();
4391 src = gen_rtx_PLUS (Pmode, sum1, sum2);
4394 /* Emit the LOAD ADDRESS pattern. Note that reload of PLUS
4395 is only ever performed on addresses, so we can mark the
4396 sum as legitimate for LA in any case. */
4397 s390_load_address (target, src);
4401 /* Return true if ADDR is a valid memory address.
4402 STRICT specifies whether strict register checking applies. */
4404 static bool
4405 s390_legitimate_address_p (machine_mode mode, rtx addr, bool strict)
4407 struct s390_address ad;
4409 if (TARGET_Z10
4410 && larl_operand (addr, VOIDmode)
4411 && (mode == VOIDmode
4412 || s390_check_symref_alignment (addr, GET_MODE_SIZE (mode))))
4413 return true;
4415 if (!s390_decompose_address (addr, &ad))
4416 return false;
4418 if (strict)
4420 if (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
4421 return false;
4423 if (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx)))
4424 return false;
4426 else
4428 if (ad.base
4429 && !(REGNO (ad.base) >= FIRST_PSEUDO_REGISTER
4430 || REGNO_REG_CLASS (REGNO (ad.base)) == ADDR_REGS))
4431 return false;
4433 if (ad.indx
4434 && !(REGNO (ad.indx) >= FIRST_PSEUDO_REGISTER
4435 || REGNO_REG_CLASS (REGNO (ad.indx)) == ADDR_REGS))
4436 return false;
4438 return true;
4441 /* Return true if OP is a valid operand for the LA instruction.
4442 In 31-bit, we need to prove that the result is used as an
4443 address, as LA performs only a 31-bit addition. */
4445 bool
4446 legitimate_la_operand_p (rtx op)
4448 struct s390_address addr;
4449 if (!s390_decompose_address (op, &addr))
4450 return false;
4452 return (TARGET_64BIT || addr.pointer);
4455 /* Return true if it is valid *and* preferable to use LA to
4456 compute the sum of OP1 and OP2. */
4458 bool
4459 preferred_la_operand_p (rtx op1, rtx op2)
4461 struct s390_address addr;
4463 if (op2 != const0_rtx)
4464 op1 = gen_rtx_PLUS (Pmode, op1, op2);
4466 if (!s390_decompose_address (op1, &addr))
4467 return false;
4468 if (addr.base && !REGNO_OK_FOR_BASE_P (REGNO (addr.base)))
4469 return false;
4470 if (addr.indx && !REGNO_OK_FOR_INDEX_P (REGNO (addr.indx)))
4471 return false;
4473 /* Avoid LA instructions with index register on z196; it is
4474 preferable to use regular add instructions when possible.
4475 Starting with zEC12 the la with index register is "uncracked"
4476 again. */
4477 if (addr.indx && s390_tune == PROCESSOR_2817_Z196)
4478 return false;
4480 if (!TARGET_64BIT && !addr.pointer)
4481 return false;
4483 if (addr.pointer)
4484 return true;
4486 if ((addr.base && REG_P (addr.base) && REG_POINTER (addr.base))
4487 || (addr.indx && REG_P (addr.indx) && REG_POINTER (addr.indx)))
4488 return true;
4490 return false;
4493 /* Emit a forced load-address operation to load SRC into DST.
4494 This will use the LOAD ADDRESS instruction even in situations
4495 where legitimate_la_operand_p (SRC) returns false. */
4497 void
4498 s390_load_address (rtx dst, rtx src)
4500 if (TARGET_64BIT)
4501 emit_move_insn (dst, src);
4502 else
4503 emit_insn (gen_force_la_31 (dst, src));
4506 /* Return a legitimate reference for ORIG (an address) using the
4507 register REG. If REG is 0, a new pseudo is generated.
4509 There are two types of references that must be handled:
4511 1. Global data references must load the address from the GOT, via
4512 the PIC reg. An insn is emitted to do this load, and the reg is
4513 returned.
4515 2. Static data references, constant pool addresses, and code labels
4516 compute the address as an offset from the GOT, whose base is in
4517 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
4518 differentiate them from global data objects. The returned
4519 address is the PIC reg + an unspec constant.
4521 TARGET_LEGITIMIZE_ADDRESS_P rejects symbolic references unless the PIC
4522 reg also appears in the address. */
4525 legitimize_pic_address (rtx orig, rtx reg)
4527 rtx addr = orig;
4528 rtx addend = const0_rtx;
4529 rtx new_rtx = orig;
4531 gcc_assert (!TLS_SYMBOLIC_CONST (addr));
4533 if (GET_CODE (addr) == CONST)
4534 addr = XEXP (addr, 0);
4536 if (GET_CODE (addr) == PLUS)
4538 addend = XEXP (addr, 1);
4539 addr = XEXP (addr, 0);
4542 if ((GET_CODE (addr) == LABEL_REF
4543 || (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (addr))
4544 || (GET_CODE (addr) == UNSPEC &&
4545 (XINT (addr, 1) == UNSPEC_GOTENT
4546 || (TARGET_CPU_ZARCH && XINT (addr, 1) == UNSPEC_PLT))))
4547 && GET_CODE (addend) == CONST_INT)
4549 /* This can be locally addressed. */
4551 /* larl_operand requires UNSPECs to be wrapped in a const rtx. */
4552 rtx const_addr = (GET_CODE (addr) == UNSPEC ?
4553 gen_rtx_CONST (Pmode, addr) : addr);
4555 if (TARGET_CPU_ZARCH
4556 && larl_operand (const_addr, VOIDmode)
4557 && INTVAL (addend) < (HOST_WIDE_INT)1 << 31
4558 && INTVAL (addend) >= -((HOST_WIDE_INT)1 << 31))
4560 if (INTVAL (addend) & 1)
4562 /* LARL can't handle odd offsets, so emit a pair of LARL
4563 and LA. */
4564 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4566 if (!DISP_IN_RANGE (INTVAL (addend)))
4568 HOST_WIDE_INT even = INTVAL (addend) - 1;
4569 addr = gen_rtx_PLUS (Pmode, addr, GEN_INT (even));
4570 addr = gen_rtx_CONST (Pmode, addr);
4571 addend = const1_rtx;
4574 emit_move_insn (temp, addr);
4575 new_rtx = gen_rtx_PLUS (Pmode, temp, addend);
4577 if (reg != 0)
4579 s390_load_address (reg, new_rtx);
4580 new_rtx = reg;
4583 else
4585 /* If the offset is even, we can just use LARL. This
4586 will happen automatically. */
4589 else
4591 /* No larl - Access local symbols relative to the GOT. */
4593 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4595 if (reload_in_progress || reload_completed)
4596 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4598 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
4599 if (addend != const0_rtx)
4600 addr = gen_rtx_PLUS (Pmode, addr, addend);
4601 addr = gen_rtx_CONST (Pmode, addr);
4602 addr = force_const_mem (Pmode, addr);
4603 emit_move_insn (temp, addr);
4605 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4606 if (reg != 0)
4608 s390_load_address (reg, new_rtx);
4609 new_rtx = reg;
4613 else if (GET_CODE (addr) == SYMBOL_REF && addend == const0_rtx)
4615 /* A non-local symbol reference without addend.
4617 The symbol ref is wrapped into an UNSPEC to make sure the
4618 proper operand modifier (@GOT or @GOTENT) will be emitted.
4619 This will tell the linker to put the symbol into the GOT.
4621 Additionally the code dereferencing the GOT slot is emitted here.
4623 An addend to the symref needs to be added afterwards.
4624 legitimize_pic_address calls itself recursively to handle
4625 that case. So no need to do it here. */
4627 if (reg == 0)
4628 reg = gen_reg_rtx (Pmode);
4630 if (TARGET_Z10)
4632 /* Use load relative if possible.
4633 lgrl <target>, sym@GOTENT */
4634 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
4635 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4636 new_rtx = gen_const_mem (GET_MODE (reg), new_rtx);
4638 emit_move_insn (reg, new_rtx);
4639 new_rtx = reg;
4641 else if (flag_pic == 1)
4643 /* Assume GOT offset is a valid displacement operand (< 4k
4644 or < 512k with z990). This is handled the same way in
4645 both 31- and 64-bit code (@GOT).
4646 lg <target>, sym@GOT(r12) */
4648 if (reload_in_progress || reload_completed)
4649 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4651 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
4652 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4653 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
4654 new_rtx = gen_const_mem (Pmode, new_rtx);
4655 emit_move_insn (reg, new_rtx);
4656 new_rtx = reg;
4658 else if (TARGET_CPU_ZARCH)
4660 /* If the GOT offset might be >= 4k, we determine the position
4661 of the GOT entry via a PC-relative LARL (@GOTENT).
4662 larl temp, sym@GOTENT
4663 lg <target>, 0(temp) */
4665 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
4667 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
4668 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
4670 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
4671 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4672 emit_move_insn (temp, new_rtx);
4674 new_rtx = gen_const_mem (Pmode, temp);
4675 emit_move_insn (reg, new_rtx);
4677 new_rtx = reg;
4679 else
4681 /* If the GOT offset might be >= 4k, we have to load it
4682 from the literal pool (@GOT).
4684 lg temp, lit-litbase(r13)
4685 lg <target>, 0(temp)
4686 lit: .long sym@GOT */
4688 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
4690 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
4691 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
4693 if (reload_in_progress || reload_completed)
4694 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4696 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
4697 addr = gen_rtx_CONST (Pmode, addr);
4698 addr = force_const_mem (Pmode, addr);
4699 emit_move_insn (temp, addr);
4701 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4702 new_rtx = gen_const_mem (Pmode, new_rtx);
4703 emit_move_insn (reg, new_rtx);
4704 new_rtx = reg;
4707 else if (GET_CODE (addr) == UNSPEC && GET_CODE (addend) == CONST_INT)
4709 gcc_assert (XVECLEN (addr, 0) == 1);
4710 switch (XINT (addr, 1))
4712 /* These address symbols (or PLT slots) relative to the GOT
4713 (not GOT slots!). In general this will exceed the
4714 displacement range so these value belong into the literal
4715 pool. */
4716 case UNSPEC_GOTOFF:
4717 case UNSPEC_PLTOFF:
4718 new_rtx = force_const_mem (Pmode, orig);
4719 break;
4721 /* For -fPIC the GOT size might exceed the displacement
4722 range so make sure the value is in the literal pool. */
4723 case UNSPEC_GOT:
4724 if (flag_pic == 2)
4725 new_rtx = force_const_mem (Pmode, orig);
4726 break;
4728 /* For @GOTENT larl is used. This is handled like local
4729 symbol refs. */
4730 case UNSPEC_GOTENT:
4731 gcc_unreachable ();
4732 break;
4734 /* @PLT is OK as is on 64-bit, must be converted to
4735 GOT-relative @PLTOFF on 31-bit. */
4736 case UNSPEC_PLT:
4737 if (!TARGET_CPU_ZARCH)
4739 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4741 if (reload_in_progress || reload_completed)
4742 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4744 addr = XVECEXP (addr, 0, 0);
4745 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr),
4746 UNSPEC_PLTOFF);
4747 if (addend != const0_rtx)
4748 addr = gen_rtx_PLUS (Pmode, addr, addend);
4749 addr = gen_rtx_CONST (Pmode, addr);
4750 addr = force_const_mem (Pmode, addr);
4751 emit_move_insn (temp, addr);
4753 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4754 if (reg != 0)
4756 s390_load_address (reg, new_rtx);
4757 new_rtx = reg;
4760 else
4761 /* On 64 bit larl can be used. This case is handled like
4762 local symbol refs. */
4763 gcc_unreachable ();
4764 break;
4766 /* Everything else cannot happen. */
4767 default:
4768 gcc_unreachable ();
4771 else if (addend != const0_rtx)
4773 /* Otherwise, compute the sum. */
4775 rtx base = legitimize_pic_address (addr, reg);
4776 new_rtx = legitimize_pic_address (addend,
4777 base == reg ? NULL_RTX : reg);
4778 if (GET_CODE (new_rtx) == CONST_INT)
4779 new_rtx = plus_constant (Pmode, base, INTVAL (new_rtx));
4780 else
4782 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
4784 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
4785 new_rtx = XEXP (new_rtx, 1);
4787 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
4790 if (GET_CODE (new_rtx) == CONST)
4791 new_rtx = XEXP (new_rtx, 0);
4792 new_rtx = force_operand (new_rtx, 0);
4795 return new_rtx;
4798 /* Load the thread pointer into a register. */
4801 s390_get_thread_pointer (void)
4803 rtx tp = gen_reg_rtx (Pmode);
4805 emit_move_insn (tp, gen_rtx_REG (Pmode, TP_REGNUM));
4806 mark_reg_pointer (tp, BITS_PER_WORD);
4808 return tp;
4811 /* Emit a tls call insn. The call target is the SYMBOL_REF stored
4812 in s390_tls_symbol which always refers to __tls_get_offset.
4813 The returned offset is written to RESULT_REG and an USE rtx is
4814 generated for TLS_CALL. */
4816 static GTY(()) rtx s390_tls_symbol;
4818 static void
4819 s390_emit_tls_call_insn (rtx result_reg, rtx tls_call)
4821 rtx insn;
4823 if (!flag_pic)
4824 emit_insn (s390_load_got ());
4826 if (!s390_tls_symbol)
4827 s390_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_offset");
4829 insn = s390_emit_call (s390_tls_symbol, tls_call, result_reg,
4830 gen_rtx_REG (Pmode, RETURN_REGNUM));
4832 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), result_reg);
4833 RTL_CONST_CALL_P (insn) = 1;
4836 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
4837 this (thread-local) address. REG may be used as temporary. */
4839 static rtx
4840 legitimize_tls_address (rtx addr, rtx reg)
4842 rtx new_rtx, tls_call, temp, base, r2, insn;
4844 if (GET_CODE (addr) == SYMBOL_REF)
4845 switch (tls_symbolic_operand (addr))
4847 case TLS_MODEL_GLOBAL_DYNAMIC:
4848 start_sequence ();
4849 r2 = gen_rtx_REG (Pmode, 2);
4850 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_TLSGD);
4851 new_rtx = gen_rtx_CONST (Pmode, tls_call);
4852 new_rtx = force_const_mem (Pmode, new_rtx);
4853 emit_move_insn (r2, new_rtx);
4854 s390_emit_tls_call_insn (r2, tls_call);
4855 insn = get_insns ();
4856 end_sequence ();
4858 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
4859 temp = gen_reg_rtx (Pmode);
4860 emit_libcall_block (insn, temp, r2, new_rtx);
4862 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
4863 if (reg != 0)
4865 s390_load_address (reg, new_rtx);
4866 new_rtx = reg;
4868 break;
4870 case TLS_MODEL_LOCAL_DYNAMIC:
4871 start_sequence ();
4872 r2 = gen_rtx_REG (Pmode, 2);
4873 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM);
4874 new_rtx = gen_rtx_CONST (Pmode, tls_call);
4875 new_rtx = force_const_mem (Pmode, new_rtx);
4876 emit_move_insn (r2, new_rtx);
4877 s390_emit_tls_call_insn (r2, tls_call);
4878 insn = get_insns ();
4879 end_sequence ();
4881 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM_NTPOFF);
4882 temp = gen_reg_rtx (Pmode);
4883 emit_libcall_block (insn, temp, r2, new_rtx);
4885 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
4886 base = gen_reg_rtx (Pmode);
4887 s390_load_address (base, new_rtx);
4889 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_DTPOFF);
4890 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4891 new_rtx = force_const_mem (Pmode, new_rtx);
4892 temp = gen_reg_rtx (Pmode);
4893 emit_move_insn (temp, new_rtx);
4895 new_rtx = gen_rtx_PLUS (Pmode, base, temp);
4896 if (reg != 0)
4898 s390_load_address (reg, new_rtx);
4899 new_rtx = reg;
4901 break;
4903 case TLS_MODEL_INITIAL_EXEC:
4904 if (flag_pic == 1)
4906 /* Assume GOT offset < 4k. This is handled the same way
4907 in both 31- and 64-bit code. */
4909 if (reload_in_progress || reload_completed)
4910 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4912 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
4913 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4914 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
4915 new_rtx = gen_const_mem (Pmode, new_rtx);
4916 temp = gen_reg_rtx (Pmode);
4917 emit_move_insn (temp, new_rtx);
4919 else if (TARGET_CPU_ZARCH)
4921 /* If the GOT offset might be >= 4k, we determine the position
4922 of the GOT entry via a PC-relative LARL. */
4924 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
4925 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4926 temp = gen_reg_rtx (Pmode);
4927 emit_move_insn (temp, new_rtx);
4929 new_rtx = gen_const_mem (Pmode, temp);
4930 temp = gen_reg_rtx (Pmode);
4931 emit_move_insn (temp, new_rtx);
4933 else if (flag_pic)
4935 /* If the GOT offset might be >= 4k, we have to load it
4936 from the literal pool. */
4938 if (reload_in_progress || reload_completed)
4939 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4941 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
4942 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4943 new_rtx = force_const_mem (Pmode, new_rtx);
4944 temp = gen_reg_rtx (Pmode);
4945 emit_move_insn (temp, new_rtx);
4947 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4948 new_rtx = gen_const_mem (Pmode, new_rtx);
4950 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
4951 temp = gen_reg_rtx (Pmode);
4952 emit_insn (gen_rtx_SET (temp, new_rtx));
4954 else
4956 /* In position-dependent code, load the absolute address of
4957 the GOT entry from the literal pool. */
4959 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
4960 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4961 new_rtx = force_const_mem (Pmode, new_rtx);
4962 temp = gen_reg_rtx (Pmode);
4963 emit_move_insn (temp, new_rtx);
4965 new_rtx = temp;
4966 new_rtx = gen_const_mem (Pmode, new_rtx);
4967 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
4968 temp = gen_reg_rtx (Pmode);
4969 emit_insn (gen_rtx_SET (temp, new_rtx));
4972 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
4973 if (reg != 0)
4975 s390_load_address (reg, new_rtx);
4976 new_rtx = reg;
4978 break;
4980 case TLS_MODEL_LOCAL_EXEC:
4981 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
4982 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4983 new_rtx = force_const_mem (Pmode, new_rtx);
4984 temp = gen_reg_rtx (Pmode);
4985 emit_move_insn (temp, new_rtx);
4987 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
4988 if (reg != 0)
4990 s390_load_address (reg, new_rtx);
4991 new_rtx = reg;
4993 break;
4995 default:
4996 gcc_unreachable ();
4999 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == UNSPEC)
5001 switch (XINT (XEXP (addr, 0), 1))
5003 case UNSPEC_INDNTPOFF:
5004 gcc_assert (TARGET_CPU_ZARCH);
5005 new_rtx = addr;
5006 break;
5008 default:
5009 gcc_unreachable ();
5013 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
5014 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
5016 new_rtx = XEXP (XEXP (addr, 0), 0);
5017 if (GET_CODE (new_rtx) != SYMBOL_REF)
5018 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
5020 new_rtx = legitimize_tls_address (new_rtx, reg);
5021 new_rtx = plus_constant (Pmode, new_rtx,
5022 INTVAL (XEXP (XEXP (addr, 0), 1)));
5023 new_rtx = force_operand (new_rtx, 0);
5026 else
5027 gcc_unreachable (); /* for now ... */
5029 return new_rtx;
5032 /* Emit insns making the address in operands[1] valid for a standard
5033 move to operands[0]. operands[1] is replaced by an address which
5034 should be used instead of the former RTX to emit the move
5035 pattern. */
5037 void
5038 emit_symbolic_move (rtx *operands)
5040 rtx temp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (Pmode);
5042 if (GET_CODE (operands[0]) == MEM)
5043 operands[1] = force_reg (Pmode, operands[1]);
5044 else if (TLS_SYMBOLIC_CONST (operands[1]))
5045 operands[1] = legitimize_tls_address (operands[1], temp);
5046 else if (flag_pic)
5047 operands[1] = legitimize_pic_address (operands[1], temp);
5050 /* Try machine-dependent ways of modifying an illegitimate address X
5051 to be legitimate. If we find one, return the new, valid address.
5053 OLDX is the address as it was before break_out_memory_refs was called.
5054 In some cases it is useful to look at this to decide what needs to be done.
5056 MODE is the mode of the operand pointed to by X.
5058 When -fpic is used, special handling is needed for symbolic references.
5059 See comments by legitimize_pic_address for details. */
5061 static rtx
5062 s390_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
5063 machine_mode mode ATTRIBUTE_UNUSED)
5065 rtx constant_term = const0_rtx;
5067 if (TLS_SYMBOLIC_CONST (x))
5069 x = legitimize_tls_address (x, 0);
5071 if (s390_legitimate_address_p (mode, x, FALSE))
5072 return x;
5074 else if (GET_CODE (x) == PLUS
5075 && (TLS_SYMBOLIC_CONST (XEXP (x, 0))
5076 || TLS_SYMBOLIC_CONST (XEXP (x, 1))))
5078 return x;
5080 else if (flag_pic)
5082 if (SYMBOLIC_CONST (x)
5083 || (GET_CODE (x) == PLUS
5084 && (SYMBOLIC_CONST (XEXP (x, 0))
5085 || SYMBOLIC_CONST (XEXP (x, 1)))))
5086 x = legitimize_pic_address (x, 0);
5088 if (s390_legitimate_address_p (mode, x, FALSE))
5089 return x;
5092 x = eliminate_constant_term (x, &constant_term);
5094 /* Optimize loading of large displacements by splitting them
5095 into the multiple of 4K and the rest; this allows the
5096 former to be CSE'd if possible.
5098 Don't do this if the displacement is added to a register
5099 pointing into the stack frame, as the offsets will
5100 change later anyway. */
5102 if (GET_CODE (constant_term) == CONST_INT
5103 && !TARGET_LONG_DISPLACEMENT
5104 && !DISP_IN_RANGE (INTVAL (constant_term))
5105 && !(REG_P (x) && REGNO_PTR_FRAME_P (REGNO (x))))
5107 HOST_WIDE_INT lower = INTVAL (constant_term) & 0xfff;
5108 HOST_WIDE_INT upper = INTVAL (constant_term) ^ lower;
5110 rtx temp = gen_reg_rtx (Pmode);
5111 rtx val = force_operand (GEN_INT (upper), temp);
5112 if (val != temp)
5113 emit_move_insn (temp, val);
5115 x = gen_rtx_PLUS (Pmode, x, temp);
5116 constant_term = GEN_INT (lower);
5119 if (GET_CODE (x) == PLUS)
5121 if (GET_CODE (XEXP (x, 0)) == REG)
5123 rtx temp = gen_reg_rtx (Pmode);
5124 rtx val = force_operand (XEXP (x, 1), temp);
5125 if (val != temp)
5126 emit_move_insn (temp, val);
5128 x = gen_rtx_PLUS (Pmode, XEXP (x, 0), temp);
5131 else if (GET_CODE (XEXP (x, 1)) == REG)
5133 rtx temp = gen_reg_rtx (Pmode);
5134 rtx val = force_operand (XEXP (x, 0), temp);
5135 if (val != temp)
5136 emit_move_insn (temp, val);
5138 x = gen_rtx_PLUS (Pmode, temp, XEXP (x, 1));
5142 if (constant_term != const0_rtx)
5143 x = gen_rtx_PLUS (Pmode, x, constant_term);
5145 return x;
5148 /* Try a machine-dependent way of reloading an illegitimate address AD
5149 operand. If we find one, push the reload and return the new address.
5151 MODE is the mode of the enclosing MEM. OPNUM is the operand number
5152 and TYPE is the reload type of the current reload. */
5155 legitimize_reload_address (rtx ad, machine_mode mode ATTRIBUTE_UNUSED,
5156 int opnum, int type)
5158 if (!optimize || TARGET_LONG_DISPLACEMENT)
5159 return NULL_RTX;
5161 if (GET_CODE (ad) == PLUS)
5163 rtx tem = simplify_binary_operation (PLUS, Pmode,
5164 XEXP (ad, 0), XEXP (ad, 1));
5165 if (tem)
5166 ad = tem;
5169 if (GET_CODE (ad) == PLUS
5170 && GET_CODE (XEXP (ad, 0)) == REG
5171 && GET_CODE (XEXP (ad, 1)) == CONST_INT
5172 && !DISP_IN_RANGE (INTVAL (XEXP (ad, 1))))
5174 HOST_WIDE_INT lower = INTVAL (XEXP (ad, 1)) & 0xfff;
5175 HOST_WIDE_INT upper = INTVAL (XEXP (ad, 1)) ^ lower;
5176 rtx cst, tem, new_rtx;
5178 cst = GEN_INT (upper);
5179 if (!legitimate_reload_constant_p (cst))
5180 cst = force_const_mem (Pmode, cst);
5182 tem = gen_rtx_PLUS (Pmode, XEXP (ad, 0), cst);
5183 new_rtx = gen_rtx_PLUS (Pmode, tem, GEN_INT (lower));
5185 push_reload (XEXP (tem, 1), 0, &XEXP (tem, 1), 0,
5186 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
5187 opnum, (enum reload_type) type);
5188 return new_rtx;
5191 return NULL_RTX;
5194 /* Emit code to move LEN bytes from DST to SRC. */
5196 bool
5197 s390_expand_movmem (rtx dst, rtx src, rtx len)
5199 /* When tuning for z10 or higher we rely on the Glibc functions to
5200 do the right thing. Only for constant lengths below 64k we will
5201 generate inline code. */
5202 if (s390_tune >= PROCESSOR_2097_Z10
5203 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
5204 return false;
5206 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
5208 if (INTVAL (len) > 0)
5209 emit_insn (gen_movmem_short (dst, src, GEN_INT (INTVAL (len) - 1)));
5212 else if (TARGET_MVCLE)
5214 emit_insn (gen_movmem_long (dst, src, convert_to_mode (Pmode, len, 1)));
5217 else
5219 rtx dst_addr, src_addr, count, blocks, temp;
5220 rtx_code_label *loop_start_label = gen_label_rtx ();
5221 rtx_code_label *loop_end_label = gen_label_rtx ();
5222 rtx_code_label *end_label = gen_label_rtx ();
5223 machine_mode mode;
5225 mode = GET_MODE (len);
5226 if (mode == VOIDmode)
5227 mode = Pmode;
5229 dst_addr = gen_reg_rtx (Pmode);
5230 src_addr = gen_reg_rtx (Pmode);
5231 count = gen_reg_rtx (mode);
5232 blocks = gen_reg_rtx (mode);
5234 convert_move (count, len, 1);
5235 emit_cmp_and_jump_insns (count, const0_rtx,
5236 EQ, NULL_RTX, mode, 1, end_label);
5238 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
5239 emit_move_insn (src_addr, force_operand (XEXP (src, 0), NULL_RTX));
5240 dst = change_address (dst, VOIDmode, dst_addr);
5241 src = change_address (src, VOIDmode, src_addr);
5243 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
5244 OPTAB_DIRECT);
5245 if (temp != count)
5246 emit_move_insn (count, temp);
5248 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5249 OPTAB_DIRECT);
5250 if (temp != blocks)
5251 emit_move_insn (blocks, temp);
5253 emit_cmp_and_jump_insns (blocks, const0_rtx,
5254 EQ, NULL_RTX, mode, 1, loop_end_label);
5256 emit_label (loop_start_label);
5258 if (TARGET_Z10
5259 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 768))
5261 rtx prefetch;
5263 /* Issue a read prefetch for the +3 cache line. */
5264 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, src_addr, GEN_INT (768)),
5265 const0_rtx, const0_rtx);
5266 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5267 emit_insn (prefetch);
5269 /* Issue a write prefetch for the +3 cache line. */
5270 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (768)),
5271 const1_rtx, const0_rtx);
5272 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5273 emit_insn (prefetch);
5276 emit_insn (gen_movmem_short (dst, src, GEN_INT (255)));
5277 s390_load_address (dst_addr,
5278 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
5279 s390_load_address (src_addr,
5280 gen_rtx_PLUS (Pmode, src_addr, GEN_INT (256)));
5282 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5283 OPTAB_DIRECT);
5284 if (temp != blocks)
5285 emit_move_insn (blocks, temp);
5287 emit_cmp_and_jump_insns (blocks, const0_rtx,
5288 EQ, NULL_RTX, mode, 1, loop_end_label);
5290 emit_jump (loop_start_label);
5291 emit_label (loop_end_label);
5293 emit_insn (gen_movmem_short (dst, src,
5294 convert_to_mode (Pmode, count, 1)));
5295 emit_label (end_label);
5297 return true;
5300 /* Emit code to set LEN bytes at DST to VAL.
5301 Make use of clrmem if VAL is zero. */
5303 void
5304 s390_expand_setmem (rtx dst, rtx len, rtx val)
5306 if (GET_CODE (len) == CONST_INT && INTVAL (len) == 0)
5307 return;
5309 gcc_assert (GET_CODE (val) == CONST_INT || GET_MODE (val) == QImode);
5311 if (GET_CODE (len) == CONST_INT && INTVAL (len) > 0 && INTVAL (len) <= 257)
5313 if (val == const0_rtx && INTVAL (len) <= 256)
5314 emit_insn (gen_clrmem_short (dst, GEN_INT (INTVAL (len) - 1)));
5315 else
5317 /* Initialize memory by storing the first byte. */
5318 emit_move_insn (adjust_address (dst, QImode, 0), val);
5320 if (INTVAL (len) > 1)
5322 /* Initiate 1 byte overlap move.
5323 The first byte of DST is propagated through DSTP1.
5324 Prepare a movmem for: DST+1 = DST (length = LEN - 1).
5325 DST is set to size 1 so the rest of the memory location
5326 does not count as source operand. */
5327 rtx dstp1 = adjust_address (dst, VOIDmode, 1);
5328 set_mem_size (dst, 1);
5330 emit_insn (gen_movmem_short (dstp1, dst,
5331 GEN_INT (INTVAL (len) - 2)));
5336 else if (TARGET_MVCLE)
5338 val = force_not_mem (convert_modes (Pmode, QImode, val, 1));
5339 if (TARGET_64BIT)
5340 emit_insn (gen_setmem_long_di (dst, convert_to_mode (Pmode, len, 1),
5341 val));
5342 else
5343 emit_insn (gen_setmem_long_si (dst, convert_to_mode (Pmode, len, 1),
5344 val));
5347 else
5349 rtx dst_addr, count, blocks, temp, dstp1 = NULL_RTX;
5350 rtx_code_label *loop_start_label = gen_label_rtx ();
5351 rtx_code_label *loop_end_label = gen_label_rtx ();
5352 rtx_code_label *end_label = gen_label_rtx ();
5353 machine_mode mode;
5355 mode = GET_MODE (len);
5356 if (mode == VOIDmode)
5357 mode = Pmode;
5359 dst_addr = gen_reg_rtx (Pmode);
5360 count = gen_reg_rtx (mode);
5361 blocks = gen_reg_rtx (mode);
5363 convert_move (count, len, 1);
5364 emit_cmp_and_jump_insns (count, const0_rtx,
5365 EQ, NULL_RTX, mode, 1, end_label);
5367 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
5368 dst = change_address (dst, VOIDmode, dst_addr);
5370 if (val == const0_rtx)
5371 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
5372 OPTAB_DIRECT);
5373 else
5375 dstp1 = adjust_address (dst, VOIDmode, 1);
5376 set_mem_size (dst, 1);
5378 /* Initialize memory by storing the first byte. */
5379 emit_move_insn (adjust_address (dst, QImode, 0), val);
5381 /* If count is 1 we are done. */
5382 emit_cmp_and_jump_insns (count, const1_rtx,
5383 EQ, NULL_RTX, mode, 1, end_label);
5385 temp = expand_binop (mode, add_optab, count, GEN_INT (-2), count, 1,
5386 OPTAB_DIRECT);
5388 if (temp != count)
5389 emit_move_insn (count, temp);
5391 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5392 OPTAB_DIRECT);
5393 if (temp != blocks)
5394 emit_move_insn (blocks, temp);
5396 emit_cmp_and_jump_insns (blocks, const0_rtx,
5397 EQ, NULL_RTX, mode, 1, loop_end_label);
5399 emit_label (loop_start_label);
5401 if (TARGET_Z10
5402 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 1024))
5404 /* Issue a write prefetch for the +4 cache line. */
5405 rtx prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr,
5406 GEN_INT (1024)),
5407 const1_rtx, const0_rtx);
5408 emit_insn (prefetch);
5409 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5412 if (val == const0_rtx)
5413 emit_insn (gen_clrmem_short (dst, GEN_INT (255)));
5414 else
5415 emit_insn (gen_movmem_short (dstp1, dst, GEN_INT (255)));
5416 s390_load_address (dst_addr,
5417 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
5419 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5420 OPTAB_DIRECT);
5421 if (temp != blocks)
5422 emit_move_insn (blocks, temp);
5424 emit_cmp_and_jump_insns (blocks, const0_rtx,
5425 EQ, NULL_RTX, mode, 1, loop_end_label);
5427 emit_jump (loop_start_label);
5428 emit_label (loop_end_label);
5430 if (val == const0_rtx)
5431 emit_insn (gen_clrmem_short (dst, convert_to_mode (Pmode, count, 1)));
5432 else
5433 emit_insn (gen_movmem_short (dstp1, dst, convert_to_mode (Pmode, count, 1)));
5434 emit_label (end_label);
5438 /* Emit code to compare LEN bytes at OP0 with those at OP1,
5439 and return the result in TARGET. */
5441 bool
5442 s390_expand_cmpmem (rtx target, rtx op0, rtx op1, rtx len)
5444 rtx ccreg = gen_rtx_REG (CCUmode, CC_REGNUM);
5445 rtx tmp;
5447 /* When tuning for z10 or higher we rely on the Glibc functions to
5448 do the right thing. Only for constant lengths below 64k we will
5449 generate inline code. */
5450 if (s390_tune >= PROCESSOR_2097_Z10
5451 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
5452 return false;
5454 /* As the result of CMPINT is inverted compared to what we need,
5455 we have to swap the operands. */
5456 tmp = op0; op0 = op1; op1 = tmp;
5458 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
5460 if (INTVAL (len) > 0)
5462 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (INTVAL (len) - 1)));
5463 emit_insn (gen_cmpint (target, ccreg));
5465 else
5466 emit_move_insn (target, const0_rtx);
5468 else if (TARGET_MVCLE)
5470 emit_insn (gen_cmpmem_long (op0, op1, convert_to_mode (Pmode, len, 1)));
5471 emit_insn (gen_cmpint (target, ccreg));
5473 else
5475 rtx addr0, addr1, count, blocks, temp;
5476 rtx_code_label *loop_start_label = gen_label_rtx ();
5477 rtx_code_label *loop_end_label = gen_label_rtx ();
5478 rtx_code_label *end_label = gen_label_rtx ();
5479 machine_mode mode;
5481 mode = GET_MODE (len);
5482 if (mode == VOIDmode)
5483 mode = Pmode;
5485 addr0 = gen_reg_rtx (Pmode);
5486 addr1 = gen_reg_rtx (Pmode);
5487 count = gen_reg_rtx (mode);
5488 blocks = gen_reg_rtx (mode);
5490 convert_move (count, len, 1);
5491 emit_cmp_and_jump_insns (count, const0_rtx,
5492 EQ, NULL_RTX, mode, 1, end_label);
5494 emit_move_insn (addr0, force_operand (XEXP (op0, 0), NULL_RTX));
5495 emit_move_insn (addr1, force_operand (XEXP (op1, 0), NULL_RTX));
5496 op0 = change_address (op0, VOIDmode, addr0);
5497 op1 = change_address (op1, VOIDmode, addr1);
5499 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
5500 OPTAB_DIRECT);
5501 if (temp != count)
5502 emit_move_insn (count, temp);
5504 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5505 OPTAB_DIRECT);
5506 if (temp != blocks)
5507 emit_move_insn (blocks, temp);
5509 emit_cmp_and_jump_insns (blocks, const0_rtx,
5510 EQ, NULL_RTX, mode, 1, loop_end_label);
5512 emit_label (loop_start_label);
5514 if (TARGET_Z10
5515 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 512))
5517 rtx prefetch;
5519 /* Issue a read prefetch for the +2 cache line of operand 1. */
5520 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr0, GEN_INT (512)),
5521 const0_rtx, const0_rtx);
5522 emit_insn (prefetch);
5523 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5525 /* Issue a read prefetch for the +2 cache line of operand 2. */
5526 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr1, GEN_INT (512)),
5527 const0_rtx, const0_rtx);
5528 emit_insn (prefetch);
5529 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5532 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (255)));
5533 temp = gen_rtx_NE (VOIDmode, ccreg, const0_rtx);
5534 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
5535 gen_rtx_LABEL_REF (VOIDmode, end_label), pc_rtx);
5536 temp = gen_rtx_SET (pc_rtx, temp);
5537 emit_jump_insn (temp);
5539 s390_load_address (addr0,
5540 gen_rtx_PLUS (Pmode, addr0, GEN_INT (256)));
5541 s390_load_address (addr1,
5542 gen_rtx_PLUS (Pmode, addr1, GEN_INT (256)));
5544 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5545 OPTAB_DIRECT);
5546 if (temp != blocks)
5547 emit_move_insn (blocks, temp);
5549 emit_cmp_and_jump_insns (blocks, const0_rtx,
5550 EQ, NULL_RTX, mode, 1, loop_end_label);
5552 emit_jump (loop_start_label);
5553 emit_label (loop_end_label);
5555 emit_insn (gen_cmpmem_short (op0, op1,
5556 convert_to_mode (Pmode, count, 1)));
5557 emit_label (end_label);
5559 emit_insn (gen_cmpint (target, ccreg));
5561 return true;
5564 /* Emit a conditional jump to LABEL for condition code mask MASK using
5565 comparsion operator COMPARISON. Return the emitted jump insn. */
5567 static rtx
5568 s390_emit_ccraw_jump (HOST_WIDE_INT mask, enum rtx_code comparison, rtx label)
5570 rtx temp;
5572 gcc_assert (comparison == EQ || comparison == NE);
5573 gcc_assert (mask > 0 && mask < 15);
5575 temp = gen_rtx_fmt_ee (comparison, VOIDmode,
5576 gen_rtx_REG (CCRAWmode, CC_REGNUM), GEN_INT (mask));
5577 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
5578 gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx);
5579 temp = gen_rtx_SET (pc_rtx, temp);
5580 return emit_jump_insn (temp);
5583 /* Emit the instructions to implement strlen of STRING and store the
5584 result in TARGET. The string has the known ALIGNMENT. This
5585 version uses vector instructions and is therefore not appropriate
5586 for targets prior to z13. */
5588 void
5589 s390_expand_vec_strlen (rtx target, rtx string, rtx alignment)
5591 int very_unlikely = REG_BR_PROB_BASE / 100 - 1;
5592 int very_likely = REG_BR_PROB_BASE - 1;
5593 rtx highest_index_to_load_reg = gen_reg_rtx (Pmode);
5594 rtx str_reg = gen_reg_rtx (V16QImode);
5595 rtx str_addr_base_reg = gen_reg_rtx (Pmode);
5596 rtx str_idx_reg = gen_reg_rtx (Pmode);
5597 rtx result_reg = gen_reg_rtx (V16QImode);
5598 rtx is_aligned_label = gen_label_rtx ();
5599 rtx into_loop_label = NULL_RTX;
5600 rtx loop_start_label = gen_label_rtx ();
5601 rtx temp;
5602 rtx len = gen_reg_rtx (QImode);
5603 rtx cond;
5605 s390_load_address (str_addr_base_reg, XEXP (string, 0));
5606 emit_move_insn (str_idx_reg, const0_rtx);
5608 if (INTVAL (alignment) < 16)
5610 /* Check whether the address happens to be aligned properly so
5611 jump directly to the aligned loop. */
5612 emit_cmp_and_jump_insns (gen_rtx_AND (Pmode,
5613 str_addr_base_reg, GEN_INT (15)),
5614 const0_rtx, EQ, NULL_RTX,
5615 Pmode, 1, is_aligned_label);
5617 temp = gen_reg_rtx (Pmode);
5618 temp = expand_binop (Pmode, and_optab, str_addr_base_reg,
5619 GEN_INT (15), temp, 1, OPTAB_DIRECT);
5620 gcc_assert (REG_P (temp));
5621 highest_index_to_load_reg =
5622 expand_binop (Pmode, sub_optab, GEN_INT (15), temp,
5623 highest_index_to_load_reg, 1, OPTAB_DIRECT);
5624 gcc_assert (REG_P (highest_index_to_load_reg));
5625 emit_insn (gen_vllv16qi (str_reg,
5626 convert_to_mode (SImode, highest_index_to_load_reg, 1),
5627 gen_rtx_MEM (BLKmode, str_addr_base_reg)));
5629 into_loop_label = gen_label_rtx ();
5630 s390_emit_jump (into_loop_label, NULL_RTX);
5631 emit_barrier ();
5634 emit_label (is_aligned_label);
5635 LABEL_NUSES (is_aligned_label) = INTVAL (alignment) < 16 ? 2 : 1;
5637 /* Reaching this point we are only performing 16 bytes aligned
5638 loads. */
5639 emit_move_insn (highest_index_to_load_reg, GEN_INT (15));
5641 emit_label (loop_start_label);
5642 LABEL_NUSES (loop_start_label) = 1;
5644 /* Load 16 bytes of the string into VR. */
5645 emit_move_insn (str_reg,
5646 gen_rtx_MEM (V16QImode,
5647 gen_rtx_PLUS (Pmode, str_idx_reg,
5648 str_addr_base_reg)));
5649 if (into_loop_label != NULL_RTX)
5651 emit_label (into_loop_label);
5652 LABEL_NUSES (into_loop_label) = 1;
5655 /* Increment string index by 16 bytes. */
5656 expand_binop (Pmode, add_optab, str_idx_reg, GEN_INT (16),
5657 str_idx_reg, 1, OPTAB_DIRECT);
5659 emit_insn (gen_vec_vfenesv16qi (result_reg, str_reg, str_reg,
5660 GEN_INT (VSTRING_FLAG_ZS | VSTRING_FLAG_CS)));
5662 add_int_reg_note (s390_emit_ccraw_jump (8, NE, loop_start_label),
5663 REG_BR_PROB, very_likely);
5664 emit_insn (gen_vec_extractv16qi (len, result_reg, GEN_INT (7)));
5666 /* If the string pointer wasn't aligned we have loaded less then 16
5667 bytes and the remaining bytes got filled with zeros (by vll).
5668 Now we have to check whether the resulting index lies within the
5669 bytes actually part of the string. */
5671 cond = s390_emit_compare (GT, convert_to_mode (Pmode, len, 1),
5672 highest_index_to_load_reg);
5673 s390_load_address (highest_index_to_load_reg,
5674 gen_rtx_PLUS (Pmode, highest_index_to_load_reg,
5675 const1_rtx));
5676 if (TARGET_64BIT)
5677 emit_insn (gen_movdicc (str_idx_reg, cond,
5678 highest_index_to_load_reg, str_idx_reg));
5679 else
5680 emit_insn (gen_movsicc (str_idx_reg, cond,
5681 highest_index_to_load_reg, str_idx_reg));
5683 add_int_reg_note (s390_emit_jump (is_aligned_label, cond), REG_BR_PROB,
5684 very_unlikely);
5686 expand_binop (Pmode, add_optab, str_idx_reg,
5687 GEN_INT (-16), str_idx_reg, 1, OPTAB_DIRECT);
5688 /* FIXME: len is already zero extended - so avoid the llgcr emitted
5689 here. */
5690 temp = expand_binop (Pmode, add_optab, str_idx_reg,
5691 convert_to_mode (Pmode, len, 1),
5692 target, 1, OPTAB_DIRECT);
5693 if (temp != target)
5694 emit_move_insn (target, temp);
5697 void
5698 s390_expand_vec_movstr (rtx result, rtx dst, rtx src)
5700 int very_unlikely = REG_BR_PROB_BASE / 100 - 1;
5701 rtx temp = gen_reg_rtx (Pmode);
5702 rtx src_addr = XEXP (src, 0);
5703 rtx dst_addr = XEXP (dst, 0);
5704 rtx src_addr_reg = gen_reg_rtx (Pmode);
5705 rtx dst_addr_reg = gen_reg_rtx (Pmode);
5706 rtx offset = gen_reg_rtx (Pmode);
5707 rtx vsrc = gen_reg_rtx (V16QImode);
5708 rtx vpos = gen_reg_rtx (V16QImode);
5709 rtx loadlen = gen_reg_rtx (SImode);
5710 rtx gpos_qi = gen_reg_rtx(QImode);
5711 rtx gpos = gen_reg_rtx (SImode);
5712 rtx done_label = gen_label_rtx ();
5713 rtx loop_label = gen_label_rtx ();
5714 rtx exit_label = gen_label_rtx ();
5715 rtx full_label = gen_label_rtx ();
5717 /* Perform a quick check for string ending on the first up to 16
5718 bytes and exit early if successful. */
5720 emit_insn (gen_vlbb (vsrc, src, GEN_INT (6)));
5721 emit_insn (gen_lcbb (loadlen, src_addr, GEN_INT (6)));
5722 emit_insn (gen_vfenezv16qi (vpos, vsrc, vsrc));
5723 emit_insn (gen_vec_extractv16qi (gpos_qi, vpos, GEN_INT (7)));
5724 emit_move_insn (gpos, gen_rtx_SUBREG (SImode, gpos_qi, 0));
5725 /* gpos is the byte index if a zero was found and 16 otherwise.
5726 So if it is lower than the loaded bytes we have a hit. */
5727 emit_cmp_and_jump_insns (gpos, loadlen, GE, NULL_RTX, SImode, 1,
5728 full_label);
5729 emit_insn (gen_vstlv16qi (vsrc, gpos, dst));
5731 force_expand_binop (Pmode, add_optab, dst_addr, gpos, result,
5732 1, OPTAB_DIRECT);
5733 emit_jump (exit_label);
5734 emit_barrier ();
5736 emit_label (full_label);
5737 LABEL_NUSES (full_label) = 1;
5739 /* Calculate `offset' so that src + offset points to the last byte
5740 before 16 byte alignment. */
5742 /* temp = src_addr & 0xf */
5743 force_expand_binop (Pmode, and_optab, src_addr, GEN_INT (15), temp,
5744 1, OPTAB_DIRECT);
5746 /* offset = 0xf - temp */
5747 emit_move_insn (offset, GEN_INT (15));
5748 force_expand_binop (Pmode, sub_optab, offset, temp, offset,
5749 1, OPTAB_DIRECT);
5751 /* Store `offset' bytes in the dstination string. The quick check
5752 has loaded at least `offset' bytes into vsrc. */
5754 emit_insn (gen_vstlv16qi (vsrc, gen_lowpart (SImode, offset), dst));
5756 /* Advance to the next byte to be loaded. */
5757 force_expand_binop (Pmode, add_optab, offset, const1_rtx, offset,
5758 1, OPTAB_DIRECT);
5760 /* Make sure the addresses are single regs which can be used as a
5761 base. */
5762 emit_move_insn (src_addr_reg, src_addr);
5763 emit_move_insn (dst_addr_reg, dst_addr);
5765 /* MAIN LOOP */
5767 emit_label (loop_label);
5768 LABEL_NUSES (loop_label) = 1;
5770 emit_move_insn (vsrc,
5771 gen_rtx_MEM (V16QImode,
5772 gen_rtx_PLUS (Pmode, src_addr_reg, offset)));
5774 emit_insn (gen_vec_vfenesv16qi (vpos, vsrc, vsrc,
5775 GEN_INT (VSTRING_FLAG_ZS | VSTRING_FLAG_CS)));
5776 add_int_reg_note (s390_emit_ccraw_jump (8, EQ, done_label),
5777 REG_BR_PROB, very_unlikely);
5779 emit_move_insn (gen_rtx_MEM (V16QImode,
5780 gen_rtx_PLUS (Pmode, dst_addr_reg, offset)),
5781 vsrc);
5782 /* offset += 16 */
5783 force_expand_binop (Pmode, add_optab, offset, GEN_INT (16),
5784 offset, 1, OPTAB_DIRECT);
5786 emit_jump (loop_label);
5787 emit_barrier ();
5789 /* REGULAR EXIT */
5791 /* We are done. Add the offset of the zero character to the dst_addr
5792 pointer to get the result. */
5794 emit_label (done_label);
5795 LABEL_NUSES (done_label) = 1;
5797 force_expand_binop (Pmode, add_optab, dst_addr_reg, offset, dst_addr_reg,
5798 1, OPTAB_DIRECT);
5800 emit_insn (gen_vec_extractv16qi (gpos_qi, vpos, GEN_INT (7)));
5801 emit_move_insn (gpos, gen_rtx_SUBREG (SImode, gpos_qi, 0));
5803 emit_insn (gen_vstlv16qi (vsrc, gpos, gen_rtx_MEM (BLKmode, dst_addr_reg)));
5805 force_expand_binop (Pmode, add_optab, dst_addr_reg, gpos, result,
5806 1, OPTAB_DIRECT);
5808 /* EARLY EXIT */
5810 emit_label (exit_label);
5811 LABEL_NUSES (exit_label) = 1;
5815 /* Expand conditional increment or decrement using alc/slb instructions.
5816 Should generate code setting DST to either SRC or SRC + INCREMENT,
5817 depending on the result of the comparison CMP_OP0 CMP_CODE CMP_OP1.
5818 Returns true if successful, false otherwise.
5820 That makes it possible to implement some if-constructs without jumps e.g.:
5821 (borrow = CC0 | CC1 and carry = CC2 | CC3)
5822 unsigned int a, b, c;
5823 if (a < b) c++; -> CCU b > a -> CC2; c += carry;
5824 if (a < b) c--; -> CCL3 a - b -> borrow; c -= borrow;
5825 if (a <= b) c++; -> CCL3 b - a -> borrow; c += carry;
5826 if (a <= b) c--; -> CCU a <= b -> borrow; c -= borrow;
5828 Checks for EQ and NE with a nonzero value need an additional xor e.g.:
5829 if (a == b) c++; -> CCL3 a ^= b; 0 - a -> borrow; c += carry;
5830 if (a == b) c--; -> CCU a ^= b; a <= 0 -> CC0 | CC1; c -= borrow;
5831 if (a != b) c++; -> CCU a ^= b; a > 0 -> CC2; c += carry;
5832 if (a != b) c--; -> CCL3 a ^= b; 0 - a -> borrow; c -= borrow; */
5834 bool
5835 s390_expand_addcc (enum rtx_code cmp_code, rtx cmp_op0, rtx cmp_op1,
5836 rtx dst, rtx src, rtx increment)
5838 machine_mode cmp_mode;
5839 machine_mode cc_mode;
5840 rtx op_res;
5841 rtx insn;
5842 rtvec p;
5843 int ret;
5845 if ((GET_MODE (cmp_op0) == SImode || GET_MODE (cmp_op0) == VOIDmode)
5846 && (GET_MODE (cmp_op1) == SImode || GET_MODE (cmp_op1) == VOIDmode))
5847 cmp_mode = SImode;
5848 else if ((GET_MODE (cmp_op0) == DImode || GET_MODE (cmp_op0) == VOIDmode)
5849 && (GET_MODE (cmp_op1) == DImode || GET_MODE (cmp_op1) == VOIDmode))
5850 cmp_mode = DImode;
5851 else
5852 return false;
5854 /* Try ADD LOGICAL WITH CARRY. */
5855 if (increment == const1_rtx)
5857 /* Determine CC mode to use. */
5858 if (cmp_code == EQ || cmp_code == NE)
5860 if (cmp_op1 != const0_rtx)
5862 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
5863 NULL_RTX, 0, OPTAB_WIDEN);
5864 cmp_op1 = const0_rtx;
5867 cmp_code = cmp_code == EQ ? LEU : GTU;
5870 if (cmp_code == LTU || cmp_code == LEU)
5872 rtx tem = cmp_op0;
5873 cmp_op0 = cmp_op1;
5874 cmp_op1 = tem;
5875 cmp_code = swap_condition (cmp_code);
5878 switch (cmp_code)
5880 case GTU:
5881 cc_mode = CCUmode;
5882 break;
5884 case GEU:
5885 cc_mode = CCL3mode;
5886 break;
5888 default:
5889 return false;
5892 /* Emit comparison instruction pattern. */
5893 if (!register_operand (cmp_op0, cmp_mode))
5894 cmp_op0 = force_reg (cmp_mode, cmp_op0);
5896 insn = gen_rtx_SET (gen_rtx_REG (cc_mode, CC_REGNUM),
5897 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
5898 /* We use insn_invalid_p here to add clobbers if required. */
5899 ret = insn_invalid_p (emit_insn (insn), false);
5900 gcc_assert (!ret);
5902 /* Emit ALC instruction pattern. */
5903 op_res = gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
5904 gen_rtx_REG (cc_mode, CC_REGNUM),
5905 const0_rtx);
5907 if (src != const0_rtx)
5909 if (!register_operand (src, GET_MODE (dst)))
5910 src = force_reg (GET_MODE (dst), src);
5912 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, src);
5913 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, const0_rtx);
5916 p = rtvec_alloc (2);
5917 RTVEC_ELT (p, 0) =
5918 gen_rtx_SET (dst, op_res);
5919 RTVEC_ELT (p, 1) =
5920 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
5921 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
5923 return true;
5926 /* Try SUBTRACT LOGICAL WITH BORROW. */
5927 if (increment == constm1_rtx)
5929 /* Determine CC mode to use. */
5930 if (cmp_code == EQ || cmp_code == NE)
5932 if (cmp_op1 != const0_rtx)
5934 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
5935 NULL_RTX, 0, OPTAB_WIDEN);
5936 cmp_op1 = const0_rtx;
5939 cmp_code = cmp_code == EQ ? LEU : GTU;
5942 if (cmp_code == GTU || cmp_code == GEU)
5944 rtx tem = cmp_op0;
5945 cmp_op0 = cmp_op1;
5946 cmp_op1 = tem;
5947 cmp_code = swap_condition (cmp_code);
5950 switch (cmp_code)
5952 case LEU:
5953 cc_mode = CCUmode;
5954 break;
5956 case LTU:
5957 cc_mode = CCL3mode;
5958 break;
5960 default:
5961 return false;
5964 /* Emit comparison instruction pattern. */
5965 if (!register_operand (cmp_op0, cmp_mode))
5966 cmp_op0 = force_reg (cmp_mode, cmp_op0);
5968 insn = gen_rtx_SET (gen_rtx_REG (cc_mode, CC_REGNUM),
5969 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
5970 /* We use insn_invalid_p here to add clobbers if required. */
5971 ret = insn_invalid_p (emit_insn (insn), false);
5972 gcc_assert (!ret);
5974 /* Emit SLB instruction pattern. */
5975 if (!register_operand (src, GET_MODE (dst)))
5976 src = force_reg (GET_MODE (dst), src);
5978 op_res = gen_rtx_MINUS (GET_MODE (dst),
5979 gen_rtx_MINUS (GET_MODE (dst), src, const0_rtx),
5980 gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
5981 gen_rtx_REG (cc_mode, CC_REGNUM),
5982 const0_rtx));
5983 p = rtvec_alloc (2);
5984 RTVEC_ELT (p, 0) =
5985 gen_rtx_SET (dst, op_res);
5986 RTVEC_ELT (p, 1) =
5987 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
5988 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
5990 return true;
5993 return false;
5996 /* Expand code for the insv template. Return true if successful. */
5998 bool
5999 s390_expand_insv (rtx dest, rtx op1, rtx op2, rtx src)
6001 int bitsize = INTVAL (op1);
6002 int bitpos = INTVAL (op2);
6003 machine_mode mode = GET_MODE (dest);
6004 machine_mode smode;
6005 int smode_bsize, mode_bsize;
6006 rtx op, clobber;
6008 if (bitsize + bitpos > GET_MODE_BITSIZE (mode))
6009 return false;
6011 /* Generate INSERT IMMEDIATE (IILL et al). */
6012 /* (set (ze (reg)) (const_int)). */
6013 if (TARGET_ZARCH
6014 && register_operand (dest, word_mode)
6015 && (bitpos % 16) == 0
6016 && (bitsize % 16) == 0
6017 && const_int_operand (src, VOIDmode))
6019 HOST_WIDE_INT val = INTVAL (src);
6020 int regpos = bitpos + bitsize;
6022 while (regpos > bitpos)
6024 machine_mode putmode;
6025 int putsize;
6027 if (TARGET_EXTIMM && (regpos % 32 == 0) && (regpos >= bitpos + 32))
6028 putmode = SImode;
6029 else
6030 putmode = HImode;
6032 putsize = GET_MODE_BITSIZE (putmode);
6033 regpos -= putsize;
6034 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
6035 GEN_INT (putsize),
6036 GEN_INT (regpos)),
6037 gen_int_mode (val, putmode));
6038 val >>= putsize;
6040 gcc_assert (regpos == bitpos);
6041 return true;
6044 smode = smallest_mode_for_size (bitsize, MODE_INT);
6045 smode_bsize = GET_MODE_BITSIZE (smode);
6046 mode_bsize = GET_MODE_BITSIZE (mode);
6048 /* Generate STORE CHARACTERS UNDER MASK (STCM et al). */
6049 if (bitpos == 0
6050 && (bitsize % BITS_PER_UNIT) == 0
6051 && MEM_P (dest)
6052 && (register_operand (src, word_mode)
6053 || const_int_operand (src, VOIDmode)))
6055 /* Emit standard pattern if possible. */
6056 if (smode_bsize == bitsize)
6058 emit_move_insn (adjust_address (dest, smode, 0),
6059 gen_lowpart (smode, src));
6060 return true;
6063 /* (set (ze (mem)) (const_int)). */
6064 else if (const_int_operand (src, VOIDmode))
6066 int size = bitsize / BITS_PER_UNIT;
6067 rtx src_mem = adjust_address (force_const_mem (word_mode, src),
6068 BLKmode,
6069 UNITS_PER_WORD - size);
6071 dest = adjust_address (dest, BLKmode, 0);
6072 set_mem_size (dest, size);
6073 s390_expand_movmem (dest, src_mem, GEN_INT (size));
6074 return true;
6077 /* (set (ze (mem)) (reg)). */
6078 else if (register_operand (src, word_mode))
6080 if (bitsize <= 32)
6081 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, op1,
6082 const0_rtx), src);
6083 else
6085 /* Emit st,stcmh sequence. */
6086 int stcmh_width = bitsize - 32;
6087 int size = stcmh_width / BITS_PER_UNIT;
6089 emit_move_insn (adjust_address (dest, SImode, size),
6090 gen_lowpart (SImode, src));
6091 set_mem_size (dest, size);
6092 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
6093 GEN_INT (stcmh_width),
6094 const0_rtx),
6095 gen_rtx_LSHIFTRT (word_mode, src, GEN_INT (32)));
6097 return true;
6101 /* Generate INSERT CHARACTERS UNDER MASK (IC, ICM et al). */
6102 if ((bitpos % BITS_PER_UNIT) == 0
6103 && (bitsize % BITS_PER_UNIT) == 0
6104 && (bitpos & 32) == ((bitpos + bitsize - 1) & 32)
6105 && MEM_P (src)
6106 && (mode == DImode || mode == SImode)
6107 && register_operand (dest, mode))
6109 /* Emit a strict_low_part pattern if possible. */
6110 if (smode_bsize == bitsize && bitpos == mode_bsize - smode_bsize)
6112 op = gen_rtx_STRICT_LOW_PART (VOIDmode, gen_lowpart (smode, dest));
6113 op = gen_rtx_SET (op, gen_lowpart (smode, src));
6114 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
6115 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber)));
6116 return true;
6119 /* ??? There are more powerful versions of ICM that are not
6120 completely represented in the md file. */
6123 /* For z10, generate ROTATE THEN INSERT SELECTED BITS (RISBG et al). */
6124 if (TARGET_Z10 && (mode == DImode || mode == SImode))
6126 machine_mode mode_s = GET_MODE (src);
6128 if (CONSTANT_P (src))
6130 /* For constant zero values the representation with AND
6131 appears to be folded in more situations than the (set
6132 (zero_extract) ...).
6133 We only do this when the start and end of the bitfield
6134 remain in the same SImode chunk. That way nihf or nilf
6135 can be used.
6136 The AND patterns might still generate a risbg for this. */
6137 if (src == const0_rtx && bitpos / 32 == (bitpos + bitsize - 1) / 32)
6138 return false;
6139 else
6140 src = force_reg (mode, src);
6142 else if (mode_s != mode)
6144 gcc_assert (GET_MODE_BITSIZE (mode_s) >= bitsize);
6145 src = force_reg (mode_s, src);
6146 src = gen_lowpart (mode, src);
6149 op = gen_rtx_ZERO_EXTRACT (mode, dest, op1, op2),
6150 op = gen_rtx_SET (op, src);
6152 if (!TARGET_ZEC12)
6154 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
6155 op = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber));
6157 emit_insn (op);
6159 return true;
6162 return false;
6165 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic which returns a
6166 register that holds VAL of mode MODE shifted by COUNT bits. */
6168 static inline rtx
6169 s390_expand_mask_and_shift (rtx val, machine_mode mode, rtx count)
6171 val = expand_simple_binop (SImode, AND, val, GEN_INT (GET_MODE_MASK (mode)),
6172 NULL_RTX, 1, OPTAB_DIRECT);
6173 return expand_simple_binop (SImode, ASHIFT, val, count,
6174 NULL_RTX, 1, OPTAB_DIRECT);
6177 /* Generate a vector comparison COND of CMP_OP1 and CMP_OP2 and store
6178 the result in TARGET. */
6180 void
6181 s390_expand_vec_compare (rtx target, enum rtx_code cond,
6182 rtx cmp_op1, rtx cmp_op2)
6184 machine_mode mode = GET_MODE (target);
6185 bool neg_p = false, swap_p = false;
6186 rtx tmp;
6188 if (GET_MODE (cmp_op1) == V2DFmode)
6190 switch (cond)
6192 /* NE a != b -> !(a == b) */
6193 case NE: cond = EQ; neg_p = true; break;
6194 /* UNGT a u> b -> !(b >= a) */
6195 case UNGT: cond = GE; neg_p = true; swap_p = true; break;
6196 /* UNGE a u>= b -> !(b > a) */
6197 case UNGE: cond = GT; neg_p = true; swap_p = true; break;
6198 /* LE: a <= b -> b >= a */
6199 case LE: cond = GE; swap_p = true; break;
6200 /* UNLE: a u<= b -> !(a > b) */
6201 case UNLE: cond = GT; neg_p = true; break;
6202 /* LT: a < b -> b > a */
6203 case LT: cond = GT; swap_p = true; break;
6204 /* UNLT: a u< b -> !(a >= b) */
6205 case UNLT: cond = GE; neg_p = true; break;
6206 case UNEQ:
6207 emit_insn (gen_vec_cmpuneqv2df (target, cmp_op1, cmp_op2));
6208 return;
6209 case LTGT:
6210 emit_insn (gen_vec_cmpltgtv2df (target, cmp_op1, cmp_op2));
6211 return;
6212 case ORDERED:
6213 emit_insn (gen_vec_orderedv2df (target, cmp_op1, cmp_op2));
6214 return;
6215 case UNORDERED:
6216 emit_insn (gen_vec_unorderedv2df (target, cmp_op1, cmp_op2));
6217 return;
6218 default: break;
6221 else
6223 switch (cond)
6225 /* NE: a != b -> !(a == b) */
6226 case NE: cond = EQ; neg_p = true; break;
6227 /* GE: a >= b -> !(b > a) */
6228 case GE: cond = GT; neg_p = true; swap_p = true; break;
6229 /* GEU: a >= b -> !(b > a) */
6230 case GEU: cond = GTU; neg_p = true; swap_p = true; break;
6231 /* LE: a <= b -> !(a > b) */
6232 case LE: cond = GT; neg_p = true; break;
6233 /* LEU: a <= b -> !(a > b) */
6234 case LEU: cond = GTU; neg_p = true; break;
6235 /* LT: a < b -> b > a */
6236 case LT: cond = GT; swap_p = true; break;
6237 /* LTU: a < b -> b > a */
6238 case LTU: cond = GTU; swap_p = true; break;
6239 default: break;
6243 if (swap_p)
6245 tmp = cmp_op1; cmp_op1 = cmp_op2; cmp_op2 = tmp;
6248 emit_insn (gen_rtx_SET (target, gen_rtx_fmt_ee (cond,
6249 mode,
6250 cmp_op1, cmp_op2)));
6251 if (neg_p)
6252 emit_insn (gen_rtx_SET (target, gen_rtx_NOT (mode, target)));
6255 /* Expand the comparison CODE of CMP1 and CMP2 and copy 1 or 0 into
6256 TARGET if either all (ALL_P is true) or any (ALL_P is false) of the
6257 elements in CMP1 and CMP2 fulfill the comparison. */
6258 void
6259 s390_expand_vec_compare_cc (rtx target, enum rtx_code code,
6260 rtx cmp1, rtx cmp2, bool all_p)
6262 enum rtx_code new_code = code;
6263 machine_mode cmp_mode, full_cmp_mode, scratch_mode;
6264 rtx tmp_reg = gen_reg_rtx (SImode);
6265 bool swap_p = false;
6267 if (GET_MODE_CLASS (GET_MODE (cmp1)) == MODE_VECTOR_INT)
6269 switch (code)
6271 case EQ: cmp_mode = CCVEQmode; break;
6272 case NE: cmp_mode = CCVEQmode; break;
6273 case GT: cmp_mode = CCVHmode; break;
6274 case GE: cmp_mode = CCVHmode; new_code = LE; swap_p = true; break;
6275 case LT: cmp_mode = CCVHmode; new_code = GT; swap_p = true; break;
6276 case LE: cmp_mode = CCVHmode; new_code = LE; break;
6277 case GTU: cmp_mode = CCVHUmode; break;
6278 case GEU: cmp_mode = CCVHUmode; new_code = LEU; swap_p = true; break;
6279 case LTU: cmp_mode = CCVHUmode; new_code = GTU; swap_p = true; break;
6280 case LEU: cmp_mode = CCVHUmode; new_code = LEU; break;
6281 default: gcc_unreachable ();
6283 scratch_mode = GET_MODE (cmp1);
6285 else if (GET_MODE (cmp1) == V2DFmode)
6287 switch (code)
6289 case EQ: cmp_mode = CCVEQmode; break;
6290 case NE: cmp_mode = CCVEQmode; break;
6291 case GT: cmp_mode = CCVFHmode; break;
6292 case GE: cmp_mode = CCVFHEmode; break;
6293 case UNLE: cmp_mode = CCVFHmode; break;
6294 case UNLT: cmp_mode = CCVFHEmode; break;
6295 case LT: cmp_mode = CCVFHmode; new_code = GT; swap_p = true; break;
6296 case LE: cmp_mode = CCVFHEmode; new_code = GE; swap_p = true; break;
6297 default: gcc_unreachable ();
6299 scratch_mode = V2DImode;
6301 else
6302 gcc_unreachable ();
6304 if (!all_p)
6305 switch (cmp_mode)
6307 case CCVEQmode: full_cmp_mode = CCVEQANYmode; break;
6308 case CCVHmode: full_cmp_mode = CCVHANYmode; break;
6309 case CCVHUmode: full_cmp_mode = CCVHUANYmode; break;
6310 case CCVFHmode: full_cmp_mode = CCVFHANYmode; break;
6311 case CCVFHEmode: full_cmp_mode = CCVFHEANYmode; break;
6312 default: gcc_unreachable ();
6314 else
6315 /* The modes without ANY match the ALL modes. */
6316 full_cmp_mode = cmp_mode;
6318 if (swap_p)
6320 rtx tmp = cmp2;
6321 cmp2 = cmp1;
6322 cmp1 = tmp;
6325 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6326 gen_rtvec (2, gen_rtx_SET (
6327 gen_rtx_REG (cmp_mode, CC_REGNUM),
6328 gen_rtx_COMPARE (cmp_mode, cmp1, cmp2)),
6329 gen_rtx_CLOBBER (VOIDmode,
6330 gen_rtx_SCRATCH (scratch_mode)))));
6331 emit_move_insn (target, const0_rtx);
6332 emit_move_insn (tmp_reg, const1_rtx);
6334 emit_move_insn (target,
6335 gen_rtx_IF_THEN_ELSE (SImode,
6336 gen_rtx_fmt_ee (new_code, VOIDmode,
6337 gen_rtx_REG (full_cmp_mode, CC_REGNUM),
6338 const0_rtx),
6339 target, tmp_reg));
6342 /* Generate a vector comparison expression loading either elements of
6343 THEN or ELS into TARGET depending on the comparison COND of CMP_OP1
6344 and CMP_OP2. */
6346 void
6347 s390_expand_vcond (rtx target, rtx then, rtx els,
6348 enum rtx_code cond, rtx cmp_op1, rtx cmp_op2)
6350 rtx tmp;
6351 machine_mode result_mode;
6352 rtx result_target;
6354 machine_mode target_mode = GET_MODE (target);
6355 machine_mode cmp_mode = GET_MODE (cmp_op1);
6356 rtx op = (cond == LT) ? els : then;
6358 /* Try to optimize x < 0 ? -1 : 0 into (signed) x >> 31
6359 and x < 0 ? 1 : 0 into (unsigned) x >> 31. Likewise
6360 for short and byte (x >> 15 and x >> 7 respectively). */
6361 if ((cond == LT || cond == GE)
6362 && target_mode == cmp_mode
6363 && cmp_op2 == CONST0_RTX (cmp_mode)
6364 && op == CONST0_RTX (target_mode)
6365 && s390_vector_mode_supported_p (target_mode)
6366 && GET_MODE_CLASS (target_mode) == MODE_VECTOR_INT)
6368 rtx negop = (cond == LT) ? then : els;
6370 int shift = GET_MODE_BITSIZE (GET_MODE_INNER (target_mode)) - 1;
6372 /* if x < 0 ? 1 : 0 or if x >= 0 ? 0 : 1 */
6373 if (negop == CONST1_RTX (target_mode))
6375 rtx res = expand_simple_binop (cmp_mode, LSHIFTRT, cmp_op1,
6376 GEN_INT (shift), target,
6377 1, OPTAB_DIRECT);
6378 if (res != target)
6379 emit_move_insn (target, res);
6380 return;
6383 /* if x < 0 ? -1 : 0 or if x >= 0 ? 0 : -1 */
6384 else if (all_ones_operand (negop, target_mode))
6386 rtx res = expand_simple_binop (cmp_mode, ASHIFTRT, cmp_op1,
6387 GEN_INT (shift), target,
6388 0, OPTAB_DIRECT);
6389 if (res != target)
6390 emit_move_insn (target, res);
6391 return;
6395 /* We always use an integral type vector to hold the comparison
6396 result. */
6397 result_mode = cmp_mode == V2DFmode ? V2DImode : cmp_mode;
6398 result_target = gen_reg_rtx (result_mode);
6400 /* We allow vector immediates as comparison operands that
6401 can be handled by the optimization above but not by the
6402 following code. Hence, force them into registers here. */
6403 if (!REG_P (cmp_op1))
6404 cmp_op1 = force_reg (GET_MODE (cmp_op1), cmp_op1);
6406 if (!REG_P (cmp_op2))
6407 cmp_op2 = force_reg (GET_MODE (cmp_op2), cmp_op2);
6409 s390_expand_vec_compare (result_target, cond,
6410 cmp_op1, cmp_op2);
6412 /* If the results are supposed to be either -1 or 0 we are done
6413 since this is what our compare instructions generate anyway. */
6414 if (all_ones_operand (then, GET_MODE (then))
6415 && const0_operand (els, GET_MODE (els)))
6417 emit_move_insn (target, gen_rtx_SUBREG (target_mode,
6418 result_target, 0));
6419 return;
6422 /* Otherwise we will do a vsel afterwards. */
6423 /* This gets triggered e.g.
6424 with gcc.c-torture/compile/pr53410-1.c */
6425 if (!REG_P (then))
6426 then = force_reg (target_mode, then);
6428 if (!REG_P (els))
6429 els = force_reg (target_mode, els);
6431 tmp = gen_rtx_fmt_ee (EQ, VOIDmode,
6432 result_target,
6433 CONST0_RTX (result_mode));
6435 /* We compared the result against zero above so we have to swap then
6436 and els here. */
6437 tmp = gen_rtx_IF_THEN_ELSE (target_mode, tmp, els, then);
6439 gcc_assert (target_mode == GET_MODE (then));
6440 emit_insn (gen_rtx_SET (target, tmp));
6443 /* Emit the RTX necessary to initialize the vector TARGET with values
6444 in VALS. */
6445 void
6446 s390_expand_vec_init (rtx target, rtx vals)
6448 machine_mode mode = GET_MODE (target);
6449 machine_mode inner_mode = GET_MODE_INNER (mode);
6450 int n_elts = GET_MODE_NUNITS (mode);
6451 bool all_same = true, all_regs = true, all_const_int = true;
6452 rtx x;
6453 int i;
6455 for (i = 0; i < n_elts; ++i)
6457 x = XVECEXP (vals, 0, i);
6459 if (!CONST_INT_P (x))
6460 all_const_int = false;
6462 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
6463 all_same = false;
6465 if (!REG_P (x))
6466 all_regs = false;
6469 /* Use vector gen mask or vector gen byte mask if possible. */
6470 if (all_same && all_const_int
6471 && (XVECEXP (vals, 0, 0) == const0_rtx
6472 || s390_contiguous_bitmask_vector_p (XVECEXP (vals, 0, 0),
6473 NULL, NULL)
6474 || s390_bytemask_vector_p (XVECEXP (vals, 0, 0), NULL)))
6476 emit_insn (gen_rtx_SET (target,
6477 gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0))));
6478 return;
6481 if (all_same)
6483 emit_insn (gen_rtx_SET (target,
6484 gen_rtx_VEC_DUPLICATE (mode,
6485 XVECEXP (vals, 0, 0))));
6486 return;
6489 if (all_regs && REG_P (target) && n_elts == 2 && inner_mode == DImode)
6491 /* Use vector load pair. */
6492 emit_insn (gen_rtx_SET (target,
6493 gen_rtx_VEC_CONCAT (mode,
6494 XVECEXP (vals, 0, 0),
6495 XVECEXP (vals, 0, 1))));
6496 return;
6499 /* We are about to set the vector elements one by one. Zero out the
6500 full register first in order to help the data flow framework to
6501 detect it as full VR set. */
6502 emit_insn (gen_rtx_SET (target, CONST0_RTX (mode)));
6504 /* Unfortunately the vec_init expander is not allowed to fail. So
6505 we have to implement the fallback ourselves. */
6506 for (i = 0; i < n_elts; i++)
6508 rtx elem = XVECEXP (vals, 0, i);
6509 if (!general_operand (elem, GET_MODE (elem)))
6510 elem = force_reg (inner_mode, elem);
6512 emit_insn (gen_rtx_SET (target,
6513 gen_rtx_UNSPEC (mode,
6514 gen_rtvec (3, elem,
6515 GEN_INT (i), target),
6516 UNSPEC_VEC_SET)));
6520 /* Structure to hold the initial parameters for a compare_and_swap operation
6521 in HImode and QImode. */
6523 struct alignment_context
6525 rtx memsi; /* SI aligned memory location. */
6526 rtx shift; /* Bit offset with regard to lsb. */
6527 rtx modemask; /* Mask of the HQImode shifted by SHIFT bits. */
6528 rtx modemaski; /* ~modemask */
6529 bool aligned; /* True if memory is aligned, false else. */
6532 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic to initialize
6533 structure AC for transparent simplifying, if the memory alignment is known
6534 to be at least 32bit. MEM is the memory location for the actual operation
6535 and MODE its mode. */
6537 static void
6538 init_alignment_context (struct alignment_context *ac, rtx mem,
6539 machine_mode mode)
6541 ac->shift = GEN_INT (GET_MODE_SIZE (SImode) - GET_MODE_SIZE (mode));
6542 ac->aligned = (MEM_ALIGN (mem) >= GET_MODE_BITSIZE (SImode));
6544 if (ac->aligned)
6545 ac->memsi = adjust_address (mem, SImode, 0); /* Memory is aligned. */
6546 else
6548 /* Alignment is unknown. */
6549 rtx byteoffset, addr, align;
6551 /* Force the address into a register. */
6552 addr = force_reg (Pmode, XEXP (mem, 0));
6554 /* Align it to SImode. */
6555 align = expand_simple_binop (Pmode, AND, addr,
6556 GEN_INT (-GET_MODE_SIZE (SImode)),
6557 NULL_RTX, 1, OPTAB_DIRECT);
6558 /* Generate MEM. */
6559 ac->memsi = gen_rtx_MEM (SImode, align);
6560 MEM_VOLATILE_P (ac->memsi) = MEM_VOLATILE_P (mem);
6561 set_mem_alias_set (ac->memsi, ALIAS_SET_MEMORY_BARRIER);
6562 set_mem_align (ac->memsi, GET_MODE_BITSIZE (SImode));
6564 /* Calculate shiftcount. */
6565 byteoffset = expand_simple_binop (Pmode, AND, addr,
6566 GEN_INT (GET_MODE_SIZE (SImode) - 1),
6567 NULL_RTX, 1, OPTAB_DIRECT);
6568 /* As we already have some offset, evaluate the remaining distance. */
6569 ac->shift = expand_simple_binop (SImode, MINUS, ac->shift, byteoffset,
6570 NULL_RTX, 1, OPTAB_DIRECT);
6573 /* Shift is the byte count, but we need the bitcount. */
6574 ac->shift = expand_simple_binop (SImode, ASHIFT, ac->shift, GEN_INT (3),
6575 NULL_RTX, 1, OPTAB_DIRECT);
6577 /* Calculate masks. */
6578 ac->modemask = expand_simple_binop (SImode, ASHIFT,
6579 GEN_INT (GET_MODE_MASK (mode)),
6580 ac->shift, NULL_RTX, 1, OPTAB_DIRECT);
6581 ac->modemaski = expand_simple_unop (SImode, NOT, ac->modemask,
6582 NULL_RTX, 1);
6585 /* A subroutine of s390_expand_cs_hqi. Insert INS into VAL. If possible,
6586 use a single insv insn into SEQ2. Otherwise, put prep insns in SEQ1 and
6587 perform the merge in SEQ2. */
6589 static rtx
6590 s390_two_part_insv (struct alignment_context *ac, rtx *seq1, rtx *seq2,
6591 machine_mode mode, rtx val, rtx ins)
6593 rtx tmp;
6595 if (ac->aligned)
6597 start_sequence ();
6598 tmp = copy_to_mode_reg (SImode, val);
6599 if (s390_expand_insv (tmp, GEN_INT (GET_MODE_BITSIZE (mode)),
6600 const0_rtx, ins))
6602 *seq1 = NULL;
6603 *seq2 = get_insns ();
6604 end_sequence ();
6605 return tmp;
6607 end_sequence ();
6610 /* Failed to use insv. Generate a two part shift and mask. */
6611 start_sequence ();
6612 tmp = s390_expand_mask_and_shift (ins, mode, ac->shift);
6613 *seq1 = get_insns ();
6614 end_sequence ();
6616 start_sequence ();
6617 tmp = expand_simple_binop (SImode, IOR, tmp, val, NULL_RTX, 1, OPTAB_DIRECT);
6618 *seq2 = get_insns ();
6619 end_sequence ();
6621 return tmp;
6624 /* Expand an atomic compare and swap operation for HImode and QImode. MEM is
6625 the memory location, CMP the old value to compare MEM with and NEW_RTX the
6626 value to set if CMP == MEM. */
6628 void
6629 s390_expand_cs_hqi (machine_mode mode, rtx btarget, rtx vtarget, rtx mem,
6630 rtx cmp, rtx new_rtx, bool is_weak)
6632 struct alignment_context ac;
6633 rtx cmpv, newv, val, cc, seq0, seq1, seq2, seq3;
6634 rtx res = gen_reg_rtx (SImode);
6635 rtx_code_label *csloop = NULL, *csend = NULL;
6637 gcc_assert (MEM_P (mem));
6639 init_alignment_context (&ac, mem, mode);
6641 /* Load full word. Subsequent loads are performed by CS. */
6642 val = expand_simple_binop (SImode, AND, ac.memsi, ac.modemaski,
6643 NULL_RTX, 1, OPTAB_DIRECT);
6645 /* Prepare insertions of cmp and new_rtx into the loaded value. When
6646 possible, we try to use insv to make this happen efficiently. If
6647 that fails we'll generate code both inside and outside the loop. */
6648 cmpv = s390_two_part_insv (&ac, &seq0, &seq2, mode, val, cmp);
6649 newv = s390_two_part_insv (&ac, &seq1, &seq3, mode, val, new_rtx);
6651 if (seq0)
6652 emit_insn (seq0);
6653 if (seq1)
6654 emit_insn (seq1);
6656 /* Start CS loop. */
6657 if (!is_weak)
6659 /* Begin assuming success. */
6660 emit_move_insn (btarget, const1_rtx);
6662 csloop = gen_label_rtx ();
6663 csend = gen_label_rtx ();
6664 emit_label (csloop);
6667 /* val = "<mem>00..0<mem>"
6668 * cmp = "00..0<cmp>00..0"
6669 * new = "00..0<new>00..0"
6672 emit_insn (seq2);
6673 emit_insn (seq3);
6675 cc = s390_emit_compare_and_swap (EQ, res, ac.memsi, cmpv, newv);
6676 if (is_weak)
6677 emit_insn (gen_cstorecc4 (btarget, cc, XEXP (cc, 0), XEXP (cc, 1)));
6678 else
6680 rtx tmp;
6682 /* Jump to end if we're done (likely?). */
6683 s390_emit_jump (csend, cc);
6685 /* Check for changes outside mode, and loop internal if so.
6686 Arrange the moves so that the compare is adjacent to the
6687 branch so that we can generate CRJ. */
6688 tmp = copy_to_reg (val);
6689 force_expand_binop (SImode, and_optab, res, ac.modemaski, val,
6690 1, OPTAB_DIRECT);
6691 cc = s390_emit_compare (NE, val, tmp);
6692 s390_emit_jump (csloop, cc);
6694 /* Failed. */
6695 emit_move_insn (btarget, const0_rtx);
6696 emit_label (csend);
6699 /* Return the correct part of the bitfield. */
6700 convert_move (vtarget, expand_simple_binop (SImode, LSHIFTRT, res, ac.shift,
6701 NULL_RTX, 1, OPTAB_DIRECT), 1);
6704 /* Expand an atomic operation CODE of mode MODE. MEM is the memory location
6705 and VAL the value to play with. If AFTER is true then store the value
6706 MEM holds after the operation, if AFTER is false then store the value MEM
6707 holds before the operation. If TARGET is zero then discard that value, else
6708 store it to TARGET. */
6710 void
6711 s390_expand_atomic (machine_mode mode, enum rtx_code code,
6712 rtx target, rtx mem, rtx val, bool after)
6714 struct alignment_context ac;
6715 rtx cmp;
6716 rtx new_rtx = gen_reg_rtx (SImode);
6717 rtx orig = gen_reg_rtx (SImode);
6718 rtx_code_label *csloop = gen_label_rtx ();
6720 gcc_assert (!target || register_operand (target, VOIDmode));
6721 gcc_assert (MEM_P (mem));
6723 init_alignment_context (&ac, mem, mode);
6725 /* Shift val to the correct bit positions.
6726 Preserve "icm", but prevent "ex icm". */
6727 if (!(ac.aligned && code == SET && MEM_P (val)))
6728 val = s390_expand_mask_and_shift (val, mode, ac.shift);
6730 /* Further preparation insns. */
6731 if (code == PLUS || code == MINUS)
6732 emit_move_insn (orig, val);
6733 else if (code == MULT || code == AND) /* val = "11..1<val>11..1" */
6734 val = expand_simple_binop (SImode, XOR, val, ac.modemaski,
6735 NULL_RTX, 1, OPTAB_DIRECT);
6737 /* Load full word. Subsequent loads are performed by CS. */
6738 cmp = force_reg (SImode, ac.memsi);
6740 /* Start CS loop. */
6741 emit_label (csloop);
6742 emit_move_insn (new_rtx, cmp);
6744 /* Patch new with val at correct position. */
6745 switch (code)
6747 case PLUS:
6748 case MINUS:
6749 val = expand_simple_binop (SImode, code, new_rtx, orig,
6750 NULL_RTX, 1, OPTAB_DIRECT);
6751 val = expand_simple_binop (SImode, AND, val, ac.modemask,
6752 NULL_RTX, 1, OPTAB_DIRECT);
6753 /* FALLTHRU */
6754 case SET:
6755 if (ac.aligned && MEM_P (val))
6756 store_bit_field (new_rtx, GET_MODE_BITSIZE (mode), 0,
6757 0, 0, SImode, val, false);
6758 else
6760 new_rtx = expand_simple_binop (SImode, AND, new_rtx, ac.modemaski,
6761 NULL_RTX, 1, OPTAB_DIRECT);
6762 new_rtx = expand_simple_binop (SImode, IOR, new_rtx, val,
6763 NULL_RTX, 1, OPTAB_DIRECT);
6765 break;
6766 case AND:
6767 case IOR:
6768 case XOR:
6769 new_rtx = expand_simple_binop (SImode, code, new_rtx, val,
6770 NULL_RTX, 1, OPTAB_DIRECT);
6771 break;
6772 case MULT: /* NAND */
6773 new_rtx = expand_simple_binop (SImode, AND, new_rtx, val,
6774 NULL_RTX, 1, OPTAB_DIRECT);
6775 new_rtx = expand_simple_binop (SImode, XOR, new_rtx, ac.modemask,
6776 NULL_RTX, 1, OPTAB_DIRECT);
6777 break;
6778 default:
6779 gcc_unreachable ();
6782 s390_emit_jump (csloop, s390_emit_compare_and_swap (NE, cmp,
6783 ac.memsi, cmp, new_rtx));
6785 /* Return the correct part of the bitfield. */
6786 if (target)
6787 convert_move (target, expand_simple_binop (SImode, LSHIFTRT,
6788 after ? new_rtx : cmp, ac.shift,
6789 NULL_RTX, 1, OPTAB_DIRECT), 1);
6792 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
6793 We need to emit DTP-relative relocations. */
6795 static void s390_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
6797 static void
6798 s390_output_dwarf_dtprel (FILE *file, int size, rtx x)
6800 switch (size)
6802 case 4:
6803 fputs ("\t.long\t", file);
6804 break;
6805 case 8:
6806 fputs ("\t.quad\t", file);
6807 break;
6808 default:
6809 gcc_unreachable ();
6811 output_addr_const (file, x);
6812 fputs ("@DTPOFF", file);
6815 /* Return the proper mode for REGNO being represented in the dwarf
6816 unwind table. */
6817 machine_mode
6818 s390_dwarf_frame_reg_mode (int regno)
6820 machine_mode save_mode = default_dwarf_frame_reg_mode (regno);
6822 /* Make sure not to return DImode for any GPR with -m31 -mzarch. */
6823 if (GENERAL_REGNO_P (regno))
6824 save_mode = Pmode;
6826 /* The rightmost 64 bits of vector registers are call-clobbered. */
6827 if (GET_MODE_SIZE (save_mode) > 8)
6828 save_mode = DImode;
6830 return save_mode;
6833 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
6834 /* Implement TARGET_MANGLE_TYPE. */
6836 static const char *
6837 s390_mangle_type (const_tree type)
6839 type = TYPE_MAIN_VARIANT (type);
6841 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
6842 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
6843 return NULL;
6845 if (type == s390_builtin_types[BT_BV16QI]) return "U6__boolc";
6846 if (type == s390_builtin_types[BT_BV8HI]) return "U6__bools";
6847 if (type == s390_builtin_types[BT_BV4SI]) return "U6__booli";
6848 if (type == s390_builtin_types[BT_BV2DI]) return "U6__booll";
6850 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
6851 && TARGET_LONG_DOUBLE_128)
6852 return "g";
6854 /* For all other types, use normal C++ mangling. */
6855 return NULL;
6857 #endif
6859 /* In the name of slightly smaller debug output, and to cater to
6860 general assembler lossage, recognize various UNSPEC sequences
6861 and turn them back into a direct symbol reference. */
6863 static rtx
6864 s390_delegitimize_address (rtx orig_x)
6866 rtx x, y;
6868 orig_x = delegitimize_mem_from_attrs (orig_x);
6869 x = orig_x;
6871 /* Extract the symbol ref from:
6872 (plus:SI (reg:SI 12 %r12)
6873 (const:SI (unspec:SI [(symbol_ref/f:SI ("*.LC0"))]
6874 UNSPEC_GOTOFF/PLTOFF)))
6876 (plus:SI (reg:SI 12 %r12)
6877 (const:SI (plus:SI (unspec:SI [(symbol_ref:SI ("L"))]
6878 UNSPEC_GOTOFF/PLTOFF)
6879 (const_int 4 [0x4])))) */
6880 if (GET_CODE (x) == PLUS
6881 && REG_P (XEXP (x, 0))
6882 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM
6883 && GET_CODE (XEXP (x, 1)) == CONST)
6885 HOST_WIDE_INT offset = 0;
6887 /* The const operand. */
6888 y = XEXP (XEXP (x, 1), 0);
6890 if (GET_CODE (y) == PLUS
6891 && GET_CODE (XEXP (y, 1)) == CONST_INT)
6893 offset = INTVAL (XEXP (y, 1));
6894 y = XEXP (y, 0);
6897 if (GET_CODE (y) == UNSPEC
6898 && (XINT (y, 1) == UNSPEC_GOTOFF
6899 || XINT (y, 1) == UNSPEC_PLTOFF))
6900 return plus_constant (Pmode, XVECEXP (y, 0, 0), offset);
6903 if (GET_CODE (x) != MEM)
6904 return orig_x;
6906 x = XEXP (x, 0);
6907 if (GET_CODE (x) == PLUS
6908 && GET_CODE (XEXP (x, 1)) == CONST
6909 && GET_CODE (XEXP (x, 0)) == REG
6910 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
6912 y = XEXP (XEXP (x, 1), 0);
6913 if (GET_CODE (y) == UNSPEC
6914 && XINT (y, 1) == UNSPEC_GOT)
6915 y = XVECEXP (y, 0, 0);
6916 else
6917 return orig_x;
6919 else if (GET_CODE (x) == CONST)
6921 /* Extract the symbol ref from:
6922 (mem:QI (const:DI (unspec:DI [(symbol_ref:DI ("foo"))]
6923 UNSPEC_PLT/GOTENT))) */
6925 y = XEXP (x, 0);
6926 if (GET_CODE (y) == UNSPEC
6927 && (XINT (y, 1) == UNSPEC_GOTENT
6928 || XINT (y, 1) == UNSPEC_PLT))
6929 y = XVECEXP (y, 0, 0);
6930 else
6931 return orig_x;
6933 else
6934 return orig_x;
6936 if (GET_MODE (orig_x) != Pmode)
6938 if (GET_MODE (orig_x) == BLKmode)
6939 return orig_x;
6940 y = lowpart_subreg (GET_MODE (orig_x), y, Pmode);
6941 if (y == NULL_RTX)
6942 return orig_x;
6944 return y;
6947 /* Output operand OP to stdio stream FILE.
6948 OP is an address (register + offset) which is not used to address data;
6949 instead the rightmost bits are interpreted as the value. */
6951 static void
6952 print_addrstyle_operand (FILE *file, rtx op)
6954 HOST_WIDE_INT offset;
6955 rtx base;
6957 /* Extract base register and offset. */
6958 if (!s390_decompose_addrstyle_without_index (op, &base, &offset))
6959 gcc_unreachable ();
6961 /* Sanity check. */
6962 if (base)
6964 gcc_assert (GET_CODE (base) == REG);
6965 gcc_assert (REGNO (base) < FIRST_PSEUDO_REGISTER);
6966 gcc_assert (REGNO_REG_CLASS (REGNO (base)) == ADDR_REGS);
6969 /* Offsets are constricted to twelve bits. */
6970 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset & ((1 << 12) - 1));
6971 if (base)
6972 fprintf (file, "(%s)", reg_names[REGNO (base)]);
6975 /* Assigns the number of NOP halfwords to be emitted before and after the
6976 function label to *HW_BEFORE and *HW_AFTER. Both pointers must not be NULL.
6977 If hotpatching is disabled for the function, the values are set to zero.
6980 static void
6981 s390_function_num_hotpatch_hw (tree decl,
6982 int *hw_before,
6983 int *hw_after)
6985 tree attr;
6987 attr = lookup_attribute ("hotpatch", DECL_ATTRIBUTES (decl));
6989 /* Handle the arguments of the hotpatch attribute. The values
6990 specified via attribute might override the cmdline argument
6991 values. */
6992 if (attr)
6994 tree args = TREE_VALUE (attr);
6996 *hw_before = TREE_INT_CST_LOW (TREE_VALUE (args));
6997 *hw_after = TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (args)));
6999 else
7001 /* Use the values specified by the cmdline arguments. */
7002 *hw_before = s390_hotpatch_hw_before_label;
7003 *hw_after = s390_hotpatch_hw_after_label;
7007 /* Write the current .machine and .machinemode specification to the assembler
7008 file. */
7010 #ifdef HAVE_AS_MACHINE_MACHINEMODE
7011 static void
7012 s390_asm_output_machine_for_arch (FILE *asm_out_file)
7014 fprintf (asm_out_file, "\t.machinemode %s\n",
7015 (TARGET_ZARCH) ? "zarch" : "esa");
7016 fprintf (asm_out_file, "\t.machine \"%s", processor_table[s390_arch].name);
7017 if (S390_USE_ARCHITECTURE_MODIFIERS)
7019 int cpu_flags;
7021 cpu_flags = processor_flags_table[(int) s390_arch];
7022 if (TARGET_HTM && !(cpu_flags & PF_TX))
7023 fprintf (asm_out_file, "+htm");
7024 else if (!TARGET_HTM && (cpu_flags & PF_TX))
7025 fprintf (asm_out_file, "+nohtm");
7026 if (TARGET_VX && !(cpu_flags & PF_VX))
7027 fprintf (asm_out_file, "+vx");
7028 else if (!TARGET_VX && (cpu_flags & PF_VX))
7029 fprintf (asm_out_file, "+novx");
7031 fprintf (asm_out_file, "\"\n");
7034 /* Write an extra function header before the very start of the function. */
7036 void
7037 s390_asm_output_function_prefix (FILE *asm_out_file,
7038 const char *fnname ATTRIBUTE_UNUSED)
7040 if (DECL_FUNCTION_SPECIFIC_TARGET (current_function_decl) == NULL)
7041 return;
7042 /* Since only the function specific options are saved but not the indications
7043 which options are set, it's too much work here to figure out which options
7044 have actually changed. Thus, generate .machine and .machinemode whenever a
7045 function has the target attribute or pragma. */
7046 fprintf (asm_out_file, "\t.machinemode push\n");
7047 fprintf (asm_out_file, "\t.machine push\n");
7048 s390_asm_output_machine_for_arch (asm_out_file);
7051 /* Write an extra function footer after the very end of the function. */
7053 void
7054 s390_asm_declare_function_size (FILE *asm_out_file,
7055 const char *fnname, tree decl)
7057 if (!flag_inhibit_size_directive)
7058 ASM_OUTPUT_MEASURED_SIZE (asm_out_file, fnname);
7059 if (DECL_FUNCTION_SPECIFIC_TARGET (decl) == NULL)
7060 return;
7061 fprintf (asm_out_file, "\t.machine pop\n");
7062 fprintf (asm_out_file, "\t.machinemode pop\n");
7064 #endif
7066 /* Write the extra assembler code needed to declare a function properly. */
7068 void
7069 s390_asm_output_function_label (FILE *asm_out_file, const char *fname,
7070 tree decl)
7072 int hw_before, hw_after;
7074 s390_function_num_hotpatch_hw (decl, &hw_before, &hw_after);
7075 if (hw_before > 0)
7077 unsigned int function_alignment;
7078 int i;
7080 /* Add a trampoline code area before the function label and initialize it
7081 with two-byte nop instructions. This area can be overwritten with code
7082 that jumps to a patched version of the function. */
7083 asm_fprintf (asm_out_file, "\tnopr\t%%r7"
7084 "\t# pre-label NOPs for hotpatch (%d halfwords)\n",
7085 hw_before);
7086 for (i = 1; i < hw_before; i++)
7087 fputs ("\tnopr\t%r7\n", asm_out_file);
7089 /* Note: The function label must be aligned so that (a) the bytes of the
7090 following nop do not cross a cacheline boundary, and (b) a jump address
7091 (eight bytes for 64 bit targets, 4 bytes for 32 bit targets) can be
7092 stored directly before the label without crossing a cacheline
7093 boundary. All this is necessary to make sure the trampoline code can
7094 be changed atomically.
7095 This alignment is done automatically using the FOUNCTION_BOUNDARY, but
7096 if there are NOPs before the function label, the alignment is placed
7097 before them. So it is necessary to duplicate the alignment after the
7098 NOPs. */
7099 function_alignment = MAX (8, DECL_ALIGN (decl) / BITS_PER_UNIT);
7100 if (! DECL_USER_ALIGN (decl))
7101 function_alignment = MAX (function_alignment,
7102 (unsigned int) align_functions);
7103 fputs ("\t# alignment for hotpatch\n", asm_out_file);
7104 ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (function_alignment));
7107 if (S390_USE_TARGET_ATTRIBUTE && TARGET_DEBUG_ARG)
7109 asm_fprintf (asm_out_file, "\t# fn:%s ar%d\n", fname, s390_arch);
7110 asm_fprintf (asm_out_file, "\t# fn:%s tu%d\n", fname, s390_tune);
7111 asm_fprintf (asm_out_file, "\t# fn:%s sg%d\n", fname, s390_stack_guard);
7112 asm_fprintf (asm_out_file, "\t# fn:%s ss%d\n", fname, s390_stack_size);
7113 asm_fprintf (asm_out_file, "\t# fn:%s bc%d\n", fname, s390_branch_cost);
7114 asm_fprintf (asm_out_file, "\t# fn:%s wf%d\n", fname,
7115 s390_warn_framesize);
7116 asm_fprintf (asm_out_file, "\t# fn:%s ba%d\n", fname, TARGET_BACKCHAIN);
7117 asm_fprintf (asm_out_file, "\t# fn:%s hd%d\n", fname, TARGET_HARD_DFP);
7118 asm_fprintf (asm_out_file, "\t# fn:%s hf%d\n", fname, !TARGET_SOFT_FLOAT);
7119 asm_fprintf (asm_out_file, "\t# fn:%s ht%d\n", fname, TARGET_OPT_HTM);
7120 asm_fprintf (asm_out_file, "\t# fn:%s vx%d\n", fname, TARGET_OPT_VX);
7121 asm_fprintf (asm_out_file, "\t# fn:%s ps%d\n", fname,
7122 TARGET_PACKED_STACK);
7123 asm_fprintf (asm_out_file, "\t# fn:%s se%d\n", fname, TARGET_SMALL_EXEC);
7124 asm_fprintf (asm_out_file, "\t# fn:%s mv%d\n", fname, TARGET_MVCLE);
7125 asm_fprintf (asm_out_file, "\t# fn:%s zv%d\n", fname, TARGET_ZVECTOR);
7126 asm_fprintf (asm_out_file, "\t# fn:%s wd%d\n", fname,
7127 s390_warn_dynamicstack_p);
7129 ASM_OUTPUT_LABEL (asm_out_file, fname);
7130 if (hw_after > 0)
7131 asm_fprintf (asm_out_file,
7132 "\t# post-label NOPs for hotpatch (%d halfwords)\n",
7133 hw_after);
7136 /* Output machine-dependent UNSPECs occurring in address constant X
7137 in assembler syntax to stdio stream FILE. Returns true if the
7138 constant X could be recognized, false otherwise. */
7140 static bool
7141 s390_output_addr_const_extra (FILE *file, rtx x)
7143 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 1)
7144 switch (XINT (x, 1))
7146 case UNSPEC_GOTENT:
7147 output_addr_const (file, XVECEXP (x, 0, 0));
7148 fprintf (file, "@GOTENT");
7149 return true;
7150 case UNSPEC_GOT:
7151 output_addr_const (file, XVECEXP (x, 0, 0));
7152 fprintf (file, "@GOT");
7153 return true;
7154 case UNSPEC_GOTOFF:
7155 output_addr_const (file, XVECEXP (x, 0, 0));
7156 fprintf (file, "@GOTOFF");
7157 return true;
7158 case UNSPEC_PLT:
7159 output_addr_const (file, XVECEXP (x, 0, 0));
7160 fprintf (file, "@PLT");
7161 return true;
7162 case UNSPEC_PLTOFF:
7163 output_addr_const (file, XVECEXP (x, 0, 0));
7164 fprintf (file, "@PLTOFF");
7165 return true;
7166 case UNSPEC_TLSGD:
7167 output_addr_const (file, XVECEXP (x, 0, 0));
7168 fprintf (file, "@TLSGD");
7169 return true;
7170 case UNSPEC_TLSLDM:
7171 assemble_name (file, get_some_local_dynamic_name ());
7172 fprintf (file, "@TLSLDM");
7173 return true;
7174 case UNSPEC_DTPOFF:
7175 output_addr_const (file, XVECEXP (x, 0, 0));
7176 fprintf (file, "@DTPOFF");
7177 return true;
7178 case UNSPEC_NTPOFF:
7179 output_addr_const (file, XVECEXP (x, 0, 0));
7180 fprintf (file, "@NTPOFF");
7181 return true;
7182 case UNSPEC_GOTNTPOFF:
7183 output_addr_const (file, XVECEXP (x, 0, 0));
7184 fprintf (file, "@GOTNTPOFF");
7185 return true;
7186 case UNSPEC_INDNTPOFF:
7187 output_addr_const (file, XVECEXP (x, 0, 0));
7188 fprintf (file, "@INDNTPOFF");
7189 return true;
7192 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 2)
7193 switch (XINT (x, 1))
7195 case UNSPEC_POOL_OFFSET:
7196 x = gen_rtx_MINUS (GET_MODE (x), XVECEXP (x, 0, 0), XVECEXP (x, 0, 1));
7197 output_addr_const (file, x);
7198 return true;
7200 return false;
7203 /* Output address operand ADDR in assembler syntax to
7204 stdio stream FILE. */
7206 void
7207 print_operand_address (FILE *file, rtx addr)
7209 struct s390_address ad;
7211 if (s390_loadrelative_operand_p (addr, NULL, NULL))
7213 if (!TARGET_Z10)
7215 output_operand_lossage ("symbolic memory references are "
7216 "only supported on z10 or later");
7217 return;
7219 output_addr_const (file, addr);
7220 return;
7223 if (!s390_decompose_address (addr, &ad)
7224 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7225 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
7226 output_operand_lossage ("cannot decompose address");
7228 if (ad.disp)
7229 output_addr_const (file, ad.disp);
7230 else
7231 fprintf (file, "0");
7233 if (ad.base && ad.indx)
7234 fprintf (file, "(%s,%s)", reg_names[REGNO (ad.indx)],
7235 reg_names[REGNO (ad.base)]);
7236 else if (ad.base)
7237 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
7240 /* Output operand X in assembler syntax to stdio stream FILE.
7241 CODE specified the format flag. The following format flags
7242 are recognized:
7244 'C': print opcode suffix for branch condition.
7245 'D': print opcode suffix for inverse branch condition.
7246 'E': print opcode suffix for branch on index instruction.
7247 'G': print the size of the operand in bytes.
7248 'J': print tls_load/tls_gdcall/tls_ldcall suffix
7249 'M': print the second word of a TImode operand.
7250 'N': print the second word of a DImode operand.
7251 'O': print only the displacement of a memory reference or address.
7252 'R': print only the base register of a memory reference or address.
7253 'S': print S-type memory reference (base+displacement).
7254 'Y': print address style operand without index (e.g. shift count or setmem
7255 operand).
7257 'b': print integer X as if it's an unsigned byte.
7258 'c': print integer X as if it's an signed byte.
7259 'e': "end" contiguous bitmask X in either DImode or vector inner mode.
7260 'f': "end" contiguous bitmask X in SImode.
7261 'h': print integer X as if it's a signed halfword.
7262 'i': print the first nonzero HImode part of X.
7263 'j': print the first HImode part unequal to -1 of X.
7264 'k': print the first nonzero SImode part of X.
7265 'm': print the first SImode part unequal to -1 of X.
7266 'o': print integer X as if it's an unsigned 32bit word.
7267 's': "start" of contiguous bitmask X in either DImode or vector inner mode.
7268 't': CONST_INT: "start" of contiguous bitmask X in SImode.
7269 CONST_VECTOR: Generate a bitmask for vgbm instruction.
7270 'x': print integer X as if it's an unsigned halfword.
7271 'v': print register number as vector register (v1 instead of f1).
7274 void
7275 print_operand (FILE *file, rtx x, int code)
7277 HOST_WIDE_INT ival;
7279 switch (code)
7281 case 'C':
7282 fprintf (file, s390_branch_condition_mnemonic (x, FALSE));
7283 return;
7285 case 'D':
7286 fprintf (file, s390_branch_condition_mnemonic (x, TRUE));
7287 return;
7289 case 'E':
7290 if (GET_CODE (x) == LE)
7291 fprintf (file, "l");
7292 else if (GET_CODE (x) == GT)
7293 fprintf (file, "h");
7294 else
7295 output_operand_lossage ("invalid comparison operator "
7296 "for 'E' output modifier");
7297 return;
7299 case 'J':
7300 if (GET_CODE (x) == SYMBOL_REF)
7302 fprintf (file, "%s", ":tls_load:");
7303 output_addr_const (file, x);
7305 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
7307 fprintf (file, "%s", ":tls_gdcall:");
7308 output_addr_const (file, XVECEXP (x, 0, 0));
7310 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM)
7312 fprintf (file, "%s", ":tls_ldcall:");
7313 const char *name = get_some_local_dynamic_name ();
7314 gcc_assert (name);
7315 assemble_name (file, name);
7317 else
7318 output_operand_lossage ("invalid reference for 'J' output modifier");
7319 return;
7321 case 'G':
7322 fprintf (file, "%u", GET_MODE_SIZE (GET_MODE (x)));
7323 return;
7325 case 'O':
7327 struct s390_address ad;
7328 int ret;
7330 ret = s390_decompose_address (MEM_P (x) ? XEXP (x, 0) : x, &ad);
7332 if (!ret
7333 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7334 || ad.indx)
7336 output_operand_lossage ("invalid address for 'O' output modifier");
7337 return;
7340 if (ad.disp)
7341 output_addr_const (file, ad.disp);
7342 else
7343 fprintf (file, "0");
7345 return;
7347 case 'R':
7349 struct s390_address ad;
7350 int ret;
7352 ret = s390_decompose_address (MEM_P (x) ? XEXP (x, 0) : x, &ad);
7354 if (!ret
7355 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7356 || ad.indx)
7358 output_operand_lossage ("invalid address for 'R' output modifier");
7359 return;
7362 if (ad.base)
7363 fprintf (file, "%s", reg_names[REGNO (ad.base)]);
7364 else
7365 fprintf (file, "0");
7367 return;
7369 case 'S':
7371 struct s390_address ad;
7372 int ret;
7374 if (!MEM_P (x))
7376 output_operand_lossage ("memory reference expected for "
7377 "'S' output modifier");
7378 return;
7380 ret = s390_decompose_address (XEXP (x, 0), &ad);
7382 if (!ret
7383 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7384 || ad.indx)
7386 output_operand_lossage ("invalid address for 'S' output modifier");
7387 return;
7390 if (ad.disp)
7391 output_addr_const (file, ad.disp);
7392 else
7393 fprintf (file, "0");
7395 if (ad.base)
7396 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
7398 return;
7400 case 'N':
7401 if (GET_CODE (x) == REG)
7402 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
7403 else if (GET_CODE (x) == MEM)
7404 x = change_address (x, VOIDmode,
7405 plus_constant (Pmode, XEXP (x, 0), 4));
7406 else
7407 output_operand_lossage ("register or memory expression expected "
7408 "for 'N' output modifier");
7409 break;
7411 case 'M':
7412 if (GET_CODE (x) == REG)
7413 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
7414 else if (GET_CODE (x) == MEM)
7415 x = change_address (x, VOIDmode,
7416 plus_constant (Pmode, XEXP (x, 0), 8));
7417 else
7418 output_operand_lossage ("register or memory expression expected "
7419 "for 'M' output modifier");
7420 break;
7422 case 'Y':
7423 print_addrstyle_operand (file, x);
7424 return;
7427 switch (GET_CODE (x))
7429 case REG:
7430 /* Print FP regs as fx instead of vx when they are accessed
7431 through non-vector mode. */
7432 if (code == 'v'
7433 || VECTOR_NOFP_REG_P (x)
7434 || (FP_REG_P (x) && VECTOR_MODE_P (GET_MODE (x)))
7435 || (VECTOR_REG_P (x)
7436 && (GET_MODE_SIZE (GET_MODE (x)) /
7437 s390_class_max_nregs (FP_REGS, GET_MODE (x))) > 8))
7438 fprintf (file, "%%v%s", reg_names[REGNO (x)] + 2);
7439 else
7440 fprintf (file, "%s", reg_names[REGNO (x)]);
7441 break;
7443 case MEM:
7444 output_address (GET_MODE (x), XEXP (x, 0));
7445 break;
7447 case CONST:
7448 case CODE_LABEL:
7449 case LABEL_REF:
7450 case SYMBOL_REF:
7451 output_addr_const (file, x);
7452 break;
7454 case CONST_INT:
7455 ival = INTVAL (x);
7456 switch (code)
7458 case 0:
7459 break;
7460 case 'b':
7461 ival &= 0xff;
7462 break;
7463 case 'c':
7464 ival = ((ival & 0xff) ^ 0x80) - 0x80;
7465 break;
7466 case 'x':
7467 ival &= 0xffff;
7468 break;
7469 case 'h':
7470 ival = ((ival & 0xffff) ^ 0x8000) - 0x8000;
7471 break;
7472 case 'i':
7473 ival = s390_extract_part (x, HImode, 0);
7474 break;
7475 case 'j':
7476 ival = s390_extract_part (x, HImode, -1);
7477 break;
7478 case 'k':
7479 ival = s390_extract_part (x, SImode, 0);
7480 break;
7481 case 'm':
7482 ival = s390_extract_part (x, SImode, -1);
7483 break;
7484 case 'o':
7485 ival &= 0xffffffff;
7486 break;
7487 case 'e': case 'f':
7488 case 's': case 't':
7490 int start, end;
7491 int len;
7492 bool ok;
7494 len = (code == 's' || code == 'e' ? 64 : 32);
7495 ok = s390_contiguous_bitmask_p (ival, true, len, &start, &end);
7496 gcc_assert (ok);
7497 if (code == 's' || code == 't')
7498 ival = start;
7499 else
7500 ival = end;
7502 break;
7503 default:
7504 output_operand_lossage ("invalid constant for output modifier '%c'", code);
7506 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
7507 break;
7509 case CONST_WIDE_INT:
7510 if (code == 'b')
7511 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7512 CONST_WIDE_INT_ELT (x, 0) & 0xff);
7513 else if (code == 'x')
7514 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7515 CONST_WIDE_INT_ELT (x, 0) & 0xffff);
7516 else if (code == 'h')
7517 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7518 ((CONST_WIDE_INT_ELT (x, 0) & 0xffff) ^ 0x8000) - 0x8000);
7519 else
7521 if (code == 0)
7522 output_operand_lossage ("invalid constant - try using "
7523 "an output modifier");
7524 else
7525 output_operand_lossage ("invalid constant for output modifier '%c'",
7526 code);
7528 break;
7529 case CONST_VECTOR:
7530 switch (code)
7532 case 'h':
7533 gcc_assert (const_vec_duplicate_p (x));
7534 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7535 ((INTVAL (XVECEXP (x, 0, 0)) & 0xffff) ^ 0x8000) - 0x8000);
7536 break;
7537 case 'e':
7538 case 's':
7540 int start, end;
7541 bool ok;
7543 ok = s390_contiguous_bitmask_vector_p (x, &start, &end);
7544 gcc_assert (ok);
7545 ival = (code == 's') ? start : end;
7546 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
7548 break;
7549 case 't':
7551 unsigned mask;
7552 bool ok = s390_bytemask_vector_p (x, &mask);
7553 gcc_assert (ok);
7554 fprintf (file, "%u", mask);
7556 break;
7558 default:
7559 output_operand_lossage ("invalid constant vector for output "
7560 "modifier '%c'", code);
7562 break;
7564 default:
7565 if (code == 0)
7566 output_operand_lossage ("invalid expression - try using "
7567 "an output modifier");
7568 else
7569 output_operand_lossage ("invalid expression for output "
7570 "modifier '%c'", code);
7571 break;
7575 /* Target hook for assembling integer objects. We need to define it
7576 here to work a round a bug in some versions of GAS, which couldn't
7577 handle values smaller than INT_MIN when printed in decimal. */
7579 static bool
7580 s390_assemble_integer (rtx x, unsigned int size, int aligned_p)
7582 if (size == 8 && aligned_p
7583 && GET_CODE (x) == CONST_INT && INTVAL (x) < INT_MIN)
7585 fprintf (asm_out_file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n",
7586 INTVAL (x));
7587 return true;
7589 return default_assemble_integer (x, size, aligned_p);
7592 /* Returns true if register REGNO is used for forming
7593 a memory address in expression X. */
7595 static bool
7596 reg_used_in_mem_p (int regno, rtx x)
7598 enum rtx_code code = GET_CODE (x);
7599 int i, j;
7600 const char *fmt;
7602 if (code == MEM)
7604 if (refers_to_regno_p (regno, XEXP (x, 0)))
7605 return true;
7607 else if (code == SET
7608 && GET_CODE (SET_DEST (x)) == PC)
7610 if (refers_to_regno_p (regno, SET_SRC (x)))
7611 return true;
7614 fmt = GET_RTX_FORMAT (code);
7615 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
7617 if (fmt[i] == 'e'
7618 && reg_used_in_mem_p (regno, XEXP (x, i)))
7619 return true;
7621 else if (fmt[i] == 'E')
7622 for (j = 0; j < XVECLEN (x, i); j++)
7623 if (reg_used_in_mem_p (regno, XVECEXP (x, i, j)))
7624 return true;
7626 return false;
7629 /* Returns true if expression DEP_RTX sets an address register
7630 used by instruction INSN to address memory. */
7632 static bool
7633 addr_generation_dependency_p (rtx dep_rtx, rtx_insn *insn)
7635 rtx target, pat;
7637 if (NONJUMP_INSN_P (dep_rtx))
7638 dep_rtx = PATTERN (dep_rtx);
7640 if (GET_CODE (dep_rtx) == SET)
7642 target = SET_DEST (dep_rtx);
7643 if (GET_CODE (target) == STRICT_LOW_PART)
7644 target = XEXP (target, 0);
7645 while (GET_CODE (target) == SUBREG)
7646 target = SUBREG_REG (target);
7648 if (GET_CODE (target) == REG)
7650 int regno = REGNO (target);
7652 if (s390_safe_attr_type (insn) == TYPE_LA)
7654 pat = PATTERN (insn);
7655 if (GET_CODE (pat) == PARALLEL)
7657 gcc_assert (XVECLEN (pat, 0) == 2);
7658 pat = XVECEXP (pat, 0, 0);
7660 gcc_assert (GET_CODE (pat) == SET);
7661 return refers_to_regno_p (regno, SET_SRC (pat));
7663 else if (get_attr_atype (insn) == ATYPE_AGEN)
7664 return reg_used_in_mem_p (regno, PATTERN (insn));
7667 return false;
7670 /* Return 1, if dep_insn sets register used in insn in the agen unit. */
7673 s390_agen_dep_p (rtx_insn *dep_insn, rtx_insn *insn)
7675 rtx dep_rtx = PATTERN (dep_insn);
7676 int i;
7678 if (GET_CODE (dep_rtx) == SET
7679 && addr_generation_dependency_p (dep_rtx, insn))
7680 return 1;
7681 else if (GET_CODE (dep_rtx) == PARALLEL)
7683 for (i = 0; i < XVECLEN (dep_rtx, 0); i++)
7685 if (addr_generation_dependency_p (XVECEXP (dep_rtx, 0, i), insn))
7686 return 1;
7689 return 0;
7693 /* A C statement (sans semicolon) to update the integer scheduling priority
7694 INSN_PRIORITY (INSN). Increase the priority to execute the INSN earlier,
7695 reduce the priority to execute INSN later. Do not define this macro if
7696 you do not need to adjust the scheduling priorities of insns.
7698 A STD instruction should be scheduled earlier,
7699 in order to use the bypass. */
7700 static int
7701 s390_adjust_priority (rtx_insn *insn, int priority)
7703 if (! INSN_P (insn))
7704 return priority;
7706 if (s390_tune <= PROCESSOR_2064_Z900)
7707 return priority;
7709 switch (s390_safe_attr_type (insn))
7711 case TYPE_FSTOREDF:
7712 case TYPE_FSTORESF:
7713 priority = priority << 3;
7714 break;
7715 case TYPE_STORE:
7716 case TYPE_STM:
7717 priority = priority << 1;
7718 break;
7719 default:
7720 break;
7722 return priority;
7726 /* The number of instructions that can be issued per cycle. */
7728 static int
7729 s390_issue_rate (void)
7731 switch (s390_tune)
7733 case PROCESSOR_2084_Z990:
7734 case PROCESSOR_2094_Z9_109:
7735 case PROCESSOR_2094_Z9_EC:
7736 case PROCESSOR_2817_Z196:
7737 return 3;
7738 case PROCESSOR_2097_Z10:
7739 return 2;
7740 case PROCESSOR_9672_G5:
7741 case PROCESSOR_9672_G6:
7742 case PROCESSOR_2064_Z900:
7743 /* Starting with EC12 we use the sched_reorder hook to take care
7744 of instruction dispatch constraints. The algorithm only
7745 picks the best instruction and assumes only a single
7746 instruction gets issued per cycle. */
7747 case PROCESSOR_2827_ZEC12:
7748 case PROCESSOR_2964_Z13:
7749 default:
7750 return 1;
7754 static int
7755 s390_first_cycle_multipass_dfa_lookahead (void)
7757 return 4;
7760 /* Annotate every literal pool reference in X by an UNSPEC_LTREF expression.
7761 Fix up MEMs as required. */
7763 static void
7764 annotate_constant_pool_refs (rtx *x)
7766 int i, j;
7767 const char *fmt;
7769 gcc_assert (GET_CODE (*x) != SYMBOL_REF
7770 || !CONSTANT_POOL_ADDRESS_P (*x));
7772 /* Literal pool references can only occur inside a MEM ... */
7773 if (GET_CODE (*x) == MEM)
7775 rtx memref = XEXP (*x, 0);
7777 if (GET_CODE (memref) == SYMBOL_REF
7778 && CONSTANT_POOL_ADDRESS_P (memref))
7780 rtx base = cfun->machine->base_reg;
7781 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, memref, base),
7782 UNSPEC_LTREF);
7784 *x = replace_equiv_address (*x, addr);
7785 return;
7788 if (GET_CODE (memref) == CONST
7789 && GET_CODE (XEXP (memref, 0)) == PLUS
7790 && GET_CODE (XEXP (XEXP (memref, 0), 1)) == CONST_INT
7791 && GET_CODE (XEXP (XEXP (memref, 0), 0)) == SYMBOL_REF
7792 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (memref, 0), 0)))
7794 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (memref, 0), 1));
7795 rtx sym = XEXP (XEXP (memref, 0), 0);
7796 rtx base = cfun->machine->base_reg;
7797 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
7798 UNSPEC_LTREF);
7800 *x = replace_equiv_address (*x, plus_constant (Pmode, addr, off));
7801 return;
7805 /* ... or a load-address type pattern. */
7806 if (GET_CODE (*x) == SET)
7808 rtx addrref = SET_SRC (*x);
7810 if (GET_CODE (addrref) == SYMBOL_REF
7811 && CONSTANT_POOL_ADDRESS_P (addrref))
7813 rtx base = cfun->machine->base_reg;
7814 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addrref, base),
7815 UNSPEC_LTREF);
7817 SET_SRC (*x) = addr;
7818 return;
7821 if (GET_CODE (addrref) == CONST
7822 && GET_CODE (XEXP (addrref, 0)) == PLUS
7823 && GET_CODE (XEXP (XEXP (addrref, 0), 1)) == CONST_INT
7824 && GET_CODE (XEXP (XEXP (addrref, 0), 0)) == SYMBOL_REF
7825 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (addrref, 0), 0)))
7827 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (addrref, 0), 1));
7828 rtx sym = XEXP (XEXP (addrref, 0), 0);
7829 rtx base = cfun->machine->base_reg;
7830 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
7831 UNSPEC_LTREF);
7833 SET_SRC (*x) = plus_constant (Pmode, addr, off);
7834 return;
7838 /* Annotate LTREL_BASE as well. */
7839 if (GET_CODE (*x) == UNSPEC
7840 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
7842 rtx base = cfun->machine->base_reg;
7843 *x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XVECEXP (*x, 0, 0), base),
7844 UNSPEC_LTREL_BASE);
7845 return;
7848 fmt = GET_RTX_FORMAT (GET_CODE (*x));
7849 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
7851 if (fmt[i] == 'e')
7853 annotate_constant_pool_refs (&XEXP (*x, i));
7855 else if (fmt[i] == 'E')
7857 for (j = 0; j < XVECLEN (*x, i); j++)
7858 annotate_constant_pool_refs (&XVECEXP (*x, i, j));
7863 /* Split all branches that exceed the maximum distance.
7864 Returns true if this created a new literal pool entry. */
7866 static int
7867 s390_split_branches (void)
7869 rtx temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
7870 int new_literal = 0, ret;
7871 rtx_insn *insn;
7872 rtx pat, target;
7873 rtx *label;
7875 /* We need correct insn addresses. */
7877 shorten_branches (get_insns ());
7879 /* Find all branches that exceed 64KB, and split them. */
7881 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7883 if (! JUMP_P (insn) || tablejump_p (insn, NULL, NULL))
7884 continue;
7886 pat = PATTERN (insn);
7887 if (GET_CODE (pat) == PARALLEL)
7888 pat = XVECEXP (pat, 0, 0);
7889 if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx)
7890 continue;
7892 if (GET_CODE (SET_SRC (pat)) == LABEL_REF)
7894 label = &SET_SRC (pat);
7896 else if (GET_CODE (SET_SRC (pat)) == IF_THEN_ELSE)
7898 if (GET_CODE (XEXP (SET_SRC (pat), 1)) == LABEL_REF)
7899 label = &XEXP (SET_SRC (pat), 1);
7900 else if (GET_CODE (XEXP (SET_SRC (pat), 2)) == LABEL_REF)
7901 label = &XEXP (SET_SRC (pat), 2);
7902 else
7903 continue;
7905 else
7906 continue;
7908 if (get_attr_length (insn) <= 4)
7909 continue;
7911 /* We are going to use the return register as scratch register,
7912 make sure it will be saved/restored by the prologue/epilogue. */
7913 cfun_frame_layout.save_return_addr_p = 1;
7915 if (!flag_pic)
7917 new_literal = 1;
7918 rtx mem = force_const_mem (Pmode, *label);
7919 rtx_insn *set_insn = emit_insn_before (gen_rtx_SET (temp_reg, mem),
7920 insn);
7921 INSN_ADDRESSES_NEW (set_insn, -1);
7922 annotate_constant_pool_refs (&PATTERN (set_insn));
7924 target = temp_reg;
7926 else
7928 new_literal = 1;
7929 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, *label),
7930 UNSPEC_LTREL_OFFSET);
7931 target = gen_rtx_CONST (Pmode, target);
7932 target = force_const_mem (Pmode, target);
7933 rtx_insn *set_insn = emit_insn_before (gen_rtx_SET (temp_reg, target),
7934 insn);
7935 INSN_ADDRESSES_NEW (set_insn, -1);
7936 annotate_constant_pool_refs (&PATTERN (set_insn));
7938 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XEXP (target, 0),
7939 cfun->machine->base_reg),
7940 UNSPEC_LTREL_BASE);
7941 target = gen_rtx_PLUS (Pmode, temp_reg, target);
7944 ret = validate_change (insn, label, target, 0);
7945 gcc_assert (ret);
7948 return new_literal;
7952 /* Find an annotated literal pool symbol referenced in RTX X,
7953 and store it at REF. Will abort if X contains references to
7954 more than one such pool symbol; multiple references to the same
7955 symbol are allowed, however.
7957 The rtx pointed to by REF must be initialized to NULL_RTX
7958 by the caller before calling this routine. */
7960 static void
7961 find_constant_pool_ref (rtx x, rtx *ref)
7963 int i, j;
7964 const char *fmt;
7966 /* Ignore LTREL_BASE references. */
7967 if (GET_CODE (x) == UNSPEC
7968 && XINT (x, 1) == UNSPEC_LTREL_BASE)
7969 return;
7970 /* Likewise POOL_ENTRY insns. */
7971 if (GET_CODE (x) == UNSPEC_VOLATILE
7972 && XINT (x, 1) == UNSPECV_POOL_ENTRY)
7973 return;
7975 gcc_assert (GET_CODE (x) != SYMBOL_REF
7976 || !CONSTANT_POOL_ADDRESS_P (x));
7978 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_LTREF)
7980 rtx sym = XVECEXP (x, 0, 0);
7981 gcc_assert (GET_CODE (sym) == SYMBOL_REF
7982 && CONSTANT_POOL_ADDRESS_P (sym));
7984 if (*ref == NULL_RTX)
7985 *ref = sym;
7986 else
7987 gcc_assert (*ref == sym);
7989 return;
7992 fmt = GET_RTX_FORMAT (GET_CODE (x));
7993 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
7995 if (fmt[i] == 'e')
7997 find_constant_pool_ref (XEXP (x, i), ref);
7999 else if (fmt[i] == 'E')
8001 for (j = 0; j < XVECLEN (x, i); j++)
8002 find_constant_pool_ref (XVECEXP (x, i, j), ref);
8007 /* Replace every reference to the annotated literal pool
8008 symbol REF in X by its base plus OFFSET. */
8010 static void
8011 replace_constant_pool_ref (rtx *x, rtx ref, rtx offset)
8013 int i, j;
8014 const char *fmt;
8016 gcc_assert (*x != ref);
8018 if (GET_CODE (*x) == UNSPEC
8019 && XINT (*x, 1) == UNSPEC_LTREF
8020 && XVECEXP (*x, 0, 0) == ref)
8022 *x = gen_rtx_PLUS (Pmode, XVECEXP (*x, 0, 1), offset);
8023 return;
8026 if (GET_CODE (*x) == PLUS
8027 && GET_CODE (XEXP (*x, 1)) == CONST_INT
8028 && GET_CODE (XEXP (*x, 0)) == UNSPEC
8029 && XINT (XEXP (*x, 0), 1) == UNSPEC_LTREF
8030 && XVECEXP (XEXP (*x, 0), 0, 0) == ref)
8032 rtx addr = gen_rtx_PLUS (Pmode, XVECEXP (XEXP (*x, 0), 0, 1), offset);
8033 *x = plus_constant (Pmode, addr, INTVAL (XEXP (*x, 1)));
8034 return;
8037 fmt = GET_RTX_FORMAT (GET_CODE (*x));
8038 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
8040 if (fmt[i] == 'e')
8042 replace_constant_pool_ref (&XEXP (*x, i), ref, offset);
8044 else if (fmt[i] == 'E')
8046 for (j = 0; j < XVECLEN (*x, i); j++)
8047 replace_constant_pool_ref (&XVECEXP (*x, i, j), ref, offset);
8052 /* Check whether X contains an UNSPEC_LTREL_BASE.
8053 Return its constant pool symbol if found, NULL_RTX otherwise. */
8055 static rtx
8056 find_ltrel_base (rtx x)
8058 int i, j;
8059 const char *fmt;
8061 if (GET_CODE (x) == UNSPEC
8062 && XINT (x, 1) == UNSPEC_LTREL_BASE)
8063 return XVECEXP (x, 0, 0);
8065 fmt = GET_RTX_FORMAT (GET_CODE (x));
8066 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8068 if (fmt[i] == 'e')
8070 rtx fnd = find_ltrel_base (XEXP (x, i));
8071 if (fnd)
8072 return fnd;
8074 else if (fmt[i] == 'E')
8076 for (j = 0; j < XVECLEN (x, i); j++)
8078 rtx fnd = find_ltrel_base (XVECEXP (x, i, j));
8079 if (fnd)
8080 return fnd;
8085 return NULL_RTX;
8088 /* Replace any occurrence of UNSPEC_LTREL_BASE in X with its base. */
8090 static void
8091 replace_ltrel_base (rtx *x)
8093 int i, j;
8094 const char *fmt;
8096 if (GET_CODE (*x) == UNSPEC
8097 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
8099 *x = XVECEXP (*x, 0, 1);
8100 return;
8103 fmt = GET_RTX_FORMAT (GET_CODE (*x));
8104 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
8106 if (fmt[i] == 'e')
8108 replace_ltrel_base (&XEXP (*x, i));
8110 else if (fmt[i] == 'E')
8112 for (j = 0; j < XVECLEN (*x, i); j++)
8113 replace_ltrel_base (&XVECEXP (*x, i, j));
8119 /* We keep a list of constants which we have to add to internal
8120 constant tables in the middle of large functions. */
8122 #define NR_C_MODES 32
8123 machine_mode constant_modes[NR_C_MODES] =
8125 TFmode, TImode, TDmode,
8126 V16QImode, V8HImode, V4SImode, V2DImode, V1TImode,
8127 V4SFmode, V2DFmode, V1TFmode,
8128 DFmode, DImode, DDmode,
8129 V8QImode, V4HImode, V2SImode, V1DImode, V2SFmode, V1DFmode,
8130 SFmode, SImode, SDmode,
8131 V4QImode, V2HImode, V1SImode, V1SFmode,
8132 HImode,
8133 V2QImode, V1HImode,
8134 QImode,
8135 V1QImode
8138 struct constant
8140 struct constant *next;
8141 rtx value;
8142 rtx_code_label *label;
8145 struct constant_pool
8147 struct constant_pool *next;
8148 rtx_insn *first_insn;
8149 rtx_insn *pool_insn;
8150 bitmap insns;
8151 rtx_insn *emit_pool_after;
8153 struct constant *constants[NR_C_MODES];
8154 struct constant *execute;
8155 rtx_code_label *label;
8156 int size;
8159 /* Allocate new constant_pool structure. */
8161 static struct constant_pool *
8162 s390_alloc_pool (void)
8164 struct constant_pool *pool;
8165 int i;
8167 pool = (struct constant_pool *) xmalloc (sizeof *pool);
8168 pool->next = NULL;
8169 for (i = 0; i < NR_C_MODES; i++)
8170 pool->constants[i] = NULL;
8172 pool->execute = NULL;
8173 pool->label = gen_label_rtx ();
8174 pool->first_insn = NULL;
8175 pool->pool_insn = NULL;
8176 pool->insns = BITMAP_ALLOC (NULL);
8177 pool->size = 0;
8178 pool->emit_pool_after = NULL;
8180 return pool;
8183 /* Create new constant pool covering instructions starting at INSN
8184 and chain it to the end of POOL_LIST. */
8186 static struct constant_pool *
8187 s390_start_pool (struct constant_pool **pool_list, rtx_insn *insn)
8189 struct constant_pool *pool, **prev;
8191 pool = s390_alloc_pool ();
8192 pool->first_insn = insn;
8194 for (prev = pool_list; *prev; prev = &(*prev)->next)
8196 *prev = pool;
8198 return pool;
8201 /* End range of instructions covered by POOL at INSN and emit
8202 placeholder insn representing the pool. */
8204 static void
8205 s390_end_pool (struct constant_pool *pool, rtx_insn *insn)
8207 rtx pool_size = GEN_INT (pool->size + 8 /* alignment slop */);
8209 if (!insn)
8210 insn = get_last_insn ();
8212 pool->pool_insn = emit_insn_after (gen_pool (pool_size), insn);
8213 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8216 /* Add INSN to the list of insns covered by POOL. */
8218 static void
8219 s390_add_pool_insn (struct constant_pool *pool, rtx insn)
8221 bitmap_set_bit (pool->insns, INSN_UID (insn));
8224 /* Return pool out of POOL_LIST that covers INSN. */
8226 static struct constant_pool *
8227 s390_find_pool (struct constant_pool *pool_list, rtx insn)
8229 struct constant_pool *pool;
8231 for (pool = pool_list; pool; pool = pool->next)
8232 if (bitmap_bit_p (pool->insns, INSN_UID (insn)))
8233 break;
8235 return pool;
8238 /* Add constant VAL of mode MODE to the constant pool POOL. */
8240 static void
8241 s390_add_constant (struct constant_pool *pool, rtx val, machine_mode mode)
8243 struct constant *c;
8244 int i;
8246 for (i = 0; i < NR_C_MODES; i++)
8247 if (constant_modes[i] == mode)
8248 break;
8249 gcc_assert (i != NR_C_MODES);
8251 for (c = pool->constants[i]; c != NULL; c = c->next)
8252 if (rtx_equal_p (val, c->value))
8253 break;
8255 if (c == NULL)
8257 c = (struct constant *) xmalloc (sizeof *c);
8258 c->value = val;
8259 c->label = gen_label_rtx ();
8260 c->next = pool->constants[i];
8261 pool->constants[i] = c;
8262 pool->size += GET_MODE_SIZE (mode);
8266 /* Return an rtx that represents the offset of X from the start of
8267 pool POOL. */
8269 static rtx
8270 s390_pool_offset (struct constant_pool *pool, rtx x)
8272 rtx label;
8274 label = gen_rtx_LABEL_REF (GET_MODE (x), pool->label);
8275 x = gen_rtx_UNSPEC (GET_MODE (x), gen_rtvec (2, x, label),
8276 UNSPEC_POOL_OFFSET);
8277 return gen_rtx_CONST (GET_MODE (x), x);
8280 /* Find constant VAL of mode MODE in the constant pool POOL.
8281 Return an RTX describing the distance from the start of
8282 the pool to the location of the new constant. */
8284 static rtx
8285 s390_find_constant (struct constant_pool *pool, rtx val,
8286 machine_mode mode)
8288 struct constant *c;
8289 int i;
8291 for (i = 0; i < NR_C_MODES; i++)
8292 if (constant_modes[i] == mode)
8293 break;
8294 gcc_assert (i != NR_C_MODES);
8296 for (c = pool->constants[i]; c != NULL; c = c->next)
8297 if (rtx_equal_p (val, c->value))
8298 break;
8300 gcc_assert (c);
8302 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
8305 /* Check whether INSN is an execute. Return the label_ref to its
8306 execute target template if so, NULL_RTX otherwise. */
8308 static rtx
8309 s390_execute_label (rtx insn)
8311 if (NONJUMP_INSN_P (insn)
8312 && GET_CODE (PATTERN (insn)) == PARALLEL
8313 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == UNSPEC
8314 && XINT (XVECEXP (PATTERN (insn), 0, 0), 1) == UNSPEC_EXECUTE)
8315 return XVECEXP (XVECEXP (PATTERN (insn), 0, 0), 0, 2);
8317 return NULL_RTX;
8320 /* Add execute target for INSN to the constant pool POOL. */
8322 static void
8323 s390_add_execute (struct constant_pool *pool, rtx insn)
8325 struct constant *c;
8327 for (c = pool->execute; c != NULL; c = c->next)
8328 if (INSN_UID (insn) == INSN_UID (c->value))
8329 break;
8331 if (c == NULL)
8333 c = (struct constant *) xmalloc (sizeof *c);
8334 c->value = insn;
8335 c->label = gen_label_rtx ();
8336 c->next = pool->execute;
8337 pool->execute = c;
8338 pool->size += 6;
8342 /* Find execute target for INSN in the constant pool POOL.
8343 Return an RTX describing the distance from the start of
8344 the pool to the location of the execute target. */
8346 static rtx
8347 s390_find_execute (struct constant_pool *pool, rtx insn)
8349 struct constant *c;
8351 for (c = pool->execute; c != NULL; c = c->next)
8352 if (INSN_UID (insn) == INSN_UID (c->value))
8353 break;
8355 gcc_assert (c);
8357 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
8360 /* For an execute INSN, extract the execute target template. */
8362 static rtx
8363 s390_execute_target (rtx insn)
8365 rtx pattern = PATTERN (insn);
8366 gcc_assert (s390_execute_label (insn));
8368 if (XVECLEN (pattern, 0) == 2)
8370 pattern = copy_rtx (XVECEXP (pattern, 0, 1));
8372 else
8374 rtvec vec = rtvec_alloc (XVECLEN (pattern, 0) - 1);
8375 int i;
8377 for (i = 0; i < XVECLEN (pattern, 0) - 1; i++)
8378 RTVEC_ELT (vec, i) = copy_rtx (XVECEXP (pattern, 0, i + 1));
8380 pattern = gen_rtx_PARALLEL (VOIDmode, vec);
8383 return pattern;
8386 /* Indicate that INSN cannot be duplicated. This is the case for
8387 execute insns that carry a unique label. */
8389 static bool
8390 s390_cannot_copy_insn_p (rtx_insn *insn)
8392 rtx label = s390_execute_label (insn);
8393 return label && label != const0_rtx;
8396 /* Dump out the constants in POOL. If REMOTE_LABEL is true,
8397 do not emit the pool base label. */
8399 static void
8400 s390_dump_pool (struct constant_pool *pool, bool remote_label)
8402 struct constant *c;
8403 rtx_insn *insn = pool->pool_insn;
8404 int i;
8406 /* Switch to rodata section. */
8407 if (TARGET_CPU_ZARCH)
8409 insn = emit_insn_after (gen_pool_section_start (), insn);
8410 INSN_ADDRESSES_NEW (insn, -1);
8413 /* Ensure minimum pool alignment. */
8414 if (TARGET_CPU_ZARCH)
8415 insn = emit_insn_after (gen_pool_align (GEN_INT (8)), insn);
8416 else
8417 insn = emit_insn_after (gen_pool_align (GEN_INT (4)), insn);
8418 INSN_ADDRESSES_NEW (insn, -1);
8420 /* Emit pool base label. */
8421 if (!remote_label)
8423 insn = emit_label_after (pool->label, insn);
8424 INSN_ADDRESSES_NEW (insn, -1);
8427 /* Dump constants in descending alignment requirement order,
8428 ensuring proper alignment for every constant. */
8429 for (i = 0; i < NR_C_MODES; i++)
8430 for (c = pool->constants[i]; c; c = c->next)
8432 /* Convert UNSPEC_LTREL_OFFSET unspecs to pool-relative references. */
8433 rtx value = copy_rtx (c->value);
8434 if (GET_CODE (value) == CONST
8435 && GET_CODE (XEXP (value, 0)) == UNSPEC
8436 && XINT (XEXP (value, 0), 1) == UNSPEC_LTREL_OFFSET
8437 && XVECLEN (XEXP (value, 0), 0) == 1)
8438 value = s390_pool_offset (pool, XVECEXP (XEXP (value, 0), 0, 0));
8440 insn = emit_label_after (c->label, insn);
8441 INSN_ADDRESSES_NEW (insn, -1);
8443 value = gen_rtx_UNSPEC_VOLATILE (constant_modes[i],
8444 gen_rtvec (1, value),
8445 UNSPECV_POOL_ENTRY);
8446 insn = emit_insn_after (value, insn);
8447 INSN_ADDRESSES_NEW (insn, -1);
8450 /* Ensure minimum alignment for instructions. */
8451 insn = emit_insn_after (gen_pool_align (GEN_INT (2)), insn);
8452 INSN_ADDRESSES_NEW (insn, -1);
8454 /* Output in-pool execute template insns. */
8455 for (c = pool->execute; c; c = c->next)
8457 insn = emit_label_after (c->label, insn);
8458 INSN_ADDRESSES_NEW (insn, -1);
8460 insn = emit_insn_after (s390_execute_target (c->value), insn);
8461 INSN_ADDRESSES_NEW (insn, -1);
8464 /* Switch back to previous section. */
8465 if (TARGET_CPU_ZARCH)
8467 insn = emit_insn_after (gen_pool_section_end (), insn);
8468 INSN_ADDRESSES_NEW (insn, -1);
8471 insn = emit_barrier_after (insn);
8472 INSN_ADDRESSES_NEW (insn, -1);
8474 /* Remove placeholder insn. */
8475 remove_insn (pool->pool_insn);
8478 /* Free all memory used by POOL. */
8480 static void
8481 s390_free_pool (struct constant_pool *pool)
8483 struct constant *c, *next;
8484 int i;
8486 for (i = 0; i < NR_C_MODES; i++)
8487 for (c = pool->constants[i]; c; c = next)
8489 next = c->next;
8490 free (c);
8493 for (c = pool->execute; c; c = next)
8495 next = c->next;
8496 free (c);
8499 BITMAP_FREE (pool->insns);
8500 free (pool);
8504 /* Collect main literal pool. Return NULL on overflow. */
8506 static struct constant_pool *
8507 s390_mainpool_start (void)
8509 struct constant_pool *pool;
8510 rtx_insn *insn;
8512 pool = s390_alloc_pool ();
8514 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8516 if (NONJUMP_INSN_P (insn)
8517 && GET_CODE (PATTERN (insn)) == SET
8518 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC_VOLATILE
8519 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPECV_MAIN_POOL)
8521 /* There might be two main_pool instructions if base_reg
8522 is call-clobbered; one for shrink-wrapped code and one
8523 for the rest. We want to keep the first. */
8524 if (pool->pool_insn)
8526 insn = PREV_INSN (insn);
8527 delete_insn (NEXT_INSN (insn));
8528 continue;
8530 pool->pool_insn = insn;
8533 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
8535 s390_add_execute (pool, insn);
8537 else if (NONJUMP_INSN_P (insn) || CALL_P (insn))
8539 rtx pool_ref = NULL_RTX;
8540 find_constant_pool_ref (PATTERN (insn), &pool_ref);
8541 if (pool_ref)
8543 rtx constant = get_pool_constant (pool_ref);
8544 machine_mode mode = get_pool_mode (pool_ref);
8545 s390_add_constant (pool, constant, mode);
8549 /* If hot/cold partitioning is enabled we have to make sure that
8550 the literal pool is emitted in the same section where the
8551 initialization of the literal pool base pointer takes place.
8552 emit_pool_after is only used in the non-overflow case on non
8553 Z cpus where we can emit the literal pool at the end of the
8554 function body within the text section. */
8555 if (NOTE_P (insn)
8556 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS
8557 && !pool->emit_pool_after)
8558 pool->emit_pool_after = PREV_INSN (insn);
8561 gcc_assert (pool->pool_insn || pool->size == 0);
8563 if (pool->size >= 4096)
8565 /* We're going to chunkify the pool, so remove the main
8566 pool placeholder insn. */
8567 remove_insn (pool->pool_insn);
8569 s390_free_pool (pool);
8570 pool = NULL;
8573 /* If the functions ends with the section where the literal pool
8574 should be emitted set the marker to its end. */
8575 if (pool && !pool->emit_pool_after)
8576 pool->emit_pool_after = get_last_insn ();
8578 return pool;
8581 /* POOL holds the main literal pool as collected by s390_mainpool_start.
8582 Modify the current function to output the pool constants as well as
8583 the pool register setup instruction. */
8585 static void
8586 s390_mainpool_finish (struct constant_pool *pool)
8588 rtx base_reg = cfun->machine->base_reg;
8590 /* If the pool is empty, we're done. */
8591 if (pool->size == 0)
8593 /* We don't actually need a base register after all. */
8594 cfun->machine->base_reg = NULL_RTX;
8596 if (pool->pool_insn)
8597 remove_insn (pool->pool_insn);
8598 s390_free_pool (pool);
8599 return;
8602 /* We need correct insn addresses. */
8603 shorten_branches (get_insns ());
8605 /* On zSeries, we use a LARL to load the pool register. The pool is
8606 located in the .rodata section, so we emit it after the function. */
8607 if (TARGET_CPU_ZARCH)
8609 rtx set = gen_main_base_64 (base_reg, pool->label);
8610 rtx_insn *insn = emit_insn_after (set, pool->pool_insn);
8611 INSN_ADDRESSES_NEW (insn, -1);
8612 remove_insn (pool->pool_insn);
8614 insn = get_last_insn ();
8615 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
8616 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8618 s390_dump_pool (pool, 0);
8621 /* On S/390, if the total size of the function's code plus literal pool
8622 does not exceed 4096 bytes, we use BASR to set up a function base
8623 pointer, and emit the literal pool at the end of the function. */
8624 else if (INSN_ADDRESSES (INSN_UID (pool->emit_pool_after))
8625 + pool->size + 8 /* alignment slop */ < 4096)
8627 rtx set = gen_main_base_31_small (base_reg, pool->label);
8628 rtx_insn *insn = emit_insn_after (set, pool->pool_insn);
8629 INSN_ADDRESSES_NEW (insn, -1);
8630 remove_insn (pool->pool_insn);
8632 insn = emit_label_after (pool->label, insn);
8633 INSN_ADDRESSES_NEW (insn, -1);
8635 /* emit_pool_after will be set by s390_mainpool_start to the
8636 last insn of the section where the literal pool should be
8637 emitted. */
8638 insn = pool->emit_pool_after;
8640 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
8641 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8643 s390_dump_pool (pool, 1);
8646 /* Otherwise, we emit an inline literal pool and use BASR to branch
8647 over it, setting up the pool register at the same time. */
8648 else
8650 rtx_code_label *pool_end = gen_label_rtx ();
8652 rtx pat = gen_main_base_31_large (base_reg, pool->label, pool_end);
8653 rtx_insn *insn = emit_jump_insn_after (pat, pool->pool_insn);
8654 JUMP_LABEL (insn) = pool_end;
8655 INSN_ADDRESSES_NEW (insn, -1);
8656 remove_insn (pool->pool_insn);
8658 insn = emit_label_after (pool->label, insn);
8659 INSN_ADDRESSES_NEW (insn, -1);
8661 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
8662 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8664 insn = emit_label_after (pool_end, pool->pool_insn);
8665 INSN_ADDRESSES_NEW (insn, -1);
8667 s390_dump_pool (pool, 1);
8671 /* Replace all literal pool references. */
8673 for (rtx_insn *insn = get_insns (); insn; insn = NEXT_INSN (insn))
8675 if (INSN_P (insn))
8676 replace_ltrel_base (&PATTERN (insn));
8678 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
8680 rtx addr, pool_ref = NULL_RTX;
8681 find_constant_pool_ref (PATTERN (insn), &pool_ref);
8682 if (pool_ref)
8684 if (s390_execute_label (insn))
8685 addr = s390_find_execute (pool, insn);
8686 else
8687 addr = s390_find_constant (pool, get_pool_constant (pool_ref),
8688 get_pool_mode (pool_ref));
8690 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
8691 INSN_CODE (insn) = -1;
8697 /* Free the pool. */
8698 s390_free_pool (pool);
8701 /* POOL holds the main literal pool as collected by s390_mainpool_start.
8702 We have decided we cannot use this pool, so revert all changes
8703 to the current function that were done by s390_mainpool_start. */
8704 static void
8705 s390_mainpool_cancel (struct constant_pool *pool)
8707 /* We didn't actually change the instruction stream, so simply
8708 free the pool memory. */
8709 s390_free_pool (pool);
8713 /* Chunkify the literal pool. */
8715 #define S390_POOL_CHUNK_MIN 0xc00
8716 #define S390_POOL_CHUNK_MAX 0xe00
8718 static struct constant_pool *
8719 s390_chunkify_start (void)
8721 struct constant_pool *curr_pool = NULL, *pool_list = NULL;
8722 int extra_size = 0;
8723 bitmap far_labels;
8724 rtx pending_ltrel = NULL_RTX;
8725 rtx_insn *insn;
8727 rtx (*gen_reload_base) (rtx, rtx) =
8728 TARGET_CPU_ZARCH? gen_reload_base_64 : gen_reload_base_31;
8731 /* We need correct insn addresses. */
8733 shorten_branches (get_insns ());
8735 /* Scan all insns and move literals to pool chunks. */
8737 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8739 bool section_switch_p = false;
8741 /* Check for pending LTREL_BASE. */
8742 if (INSN_P (insn))
8744 rtx ltrel_base = find_ltrel_base (PATTERN (insn));
8745 if (ltrel_base)
8747 gcc_assert (ltrel_base == pending_ltrel);
8748 pending_ltrel = NULL_RTX;
8752 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
8754 if (!curr_pool)
8755 curr_pool = s390_start_pool (&pool_list, insn);
8757 s390_add_execute (curr_pool, insn);
8758 s390_add_pool_insn (curr_pool, insn);
8760 else if (NONJUMP_INSN_P (insn) || CALL_P (insn))
8762 rtx pool_ref = NULL_RTX;
8763 find_constant_pool_ref (PATTERN (insn), &pool_ref);
8764 if (pool_ref)
8766 rtx constant = get_pool_constant (pool_ref);
8767 machine_mode mode = get_pool_mode (pool_ref);
8769 if (!curr_pool)
8770 curr_pool = s390_start_pool (&pool_list, insn);
8772 s390_add_constant (curr_pool, constant, mode);
8773 s390_add_pool_insn (curr_pool, insn);
8775 /* Don't split the pool chunk between a LTREL_OFFSET load
8776 and the corresponding LTREL_BASE. */
8777 if (GET_CODE (constant) == CONST
8778 && GET_CODE (XEXP (constant, 0)) == UNSPEC
8779 && XINT (XEXP (constant, 0), 1) == UNSPEC_LTREL_OFFSET)
8781 gcc_assert (!pending_ltrel);
8782 pending_ltrel = pool_ref;
8787 if (JUMP_P (insn) || JUMP_TABLE_DATA_P (insn) || LABEL_P (insn))
8789 if (curr_pool)
8790 s390_add_pool_insn (curr_pool, insn);
8791 /* An LTREL_BASE must follow within the same basic block. */
8792 gcc_assert (!pending_ltrel);
8795 if (NOTE_P (insn))
8796 switch (NOTE_KIND (insn))
8798 case NOTE_INSN_SWITCH_TEXT_SECTIONS:
8799 section_switch_p = true;
8800 break;
8801 case NOTE_INSN_VAR_LOCATION:
8802 case NOTE_INSN_CALL_ARG_LOCATION:
8803 continue;
8804 default:
8805 break;
8808 if (!curr_pool
8809 || INSN_ADDRESSES_SIZE () <= (size_t) INSN_UID (insn)
8810 || INSN_ADDRESSES (INSN_UID (insn)) == -1)
8811 continue;
8813 if (TARGET_CPU_ZARCH)
8815 if (curr_pool->size < S390_POOL_CHUNK_MAX)
8816 continue;
8818 s390_end_pool (curr_pool, NULL);
8819 curr_pool = NULL;
8821 else
8823 int chunk_size = INSN_ADDRESSES (INSN_UID (insn))
8824 - INSN_ADDRESSES (INSN_UID (curr_pool->first_insn))
8825 + extra_size;
8827 /* We will later have to insert base register reload insns.
8828 Those will have an effect on code size, which we need to
8829 consider here. This calculation makes rather pessimistic
8830 worst-case assumptions. */
8831 if (LABEL_P (insn))
8832 extra_size += 6;
8834 if (chunk_size < S390_POOL_CHUNK_MIN
8835 && curr_pool->size < S390_POOL_CHUNK_MIN
8836 && !section_switch_p)
8837 continue;
8839 /* Pool chunks can only be inserted after BARRIERs ... */
8840 if (BARRIER_P (insn))
8842 s390_end_pool (curr_pool, insn);
8843 curr_pool = NULL;
8844 extra_size = 0;
8847 /* ... so if we don't find one in time, create one. */
8848 else if (chunk_size > S390_POOL_CHUNK_MAX
8849 || curr_pool->size > S390_POOL_CHUNK_MAX
8850 || section_switch_p)
8852 rtx_insn *label, *jump, *barrier, *next, *prev;
8854 if (!section_switch_p)
8856 /* We can insert the barrier only after a 'real' insn. */
8857 if (! NONJUMP_INSN_P (insn) && ! CALL_P (insn))
8858 continue;
8859 if (get_attr_length (insn) == 0)
8860 continue;
8861 /* Don't separate LTREL_BASE from the corresponding
8862 LTREL_OFFSET load. */
8863 if (pending_ltrel)
8864 continue;
8865 next = insn;
8868 insn = next;
8869 next = NEXT_INSN (insn);
8871 while (next
8872 && NOTE_P (next)
8873 && (NOTE_KIND (next) == NOTE_INSN_VAR_LOCATION
8874 || NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION));
8876 else
8878 gcc_assert (!pending_ltrel);
8880 /* The old pool has to end before the section switch
8881 note in order to make it part of the current
8882 section. */
8883 insn = PREV_INSN (insn);
8886 label = gen_label_rtx ();
8887 prev = insn;
8888 if (prev && NOTE_P (prev))
8889 prev = prev_nonnote_insn (prev);
8890 if (prev)
8891 jump = emit_jump_insn_after_setloc (gen_jump (label), insn,
8892 INSN_LOCATION (prev));
8893 else
8894 jump = emit_jump_insn_after_noloc (gen_jump (label), insn);
8895 barrier = emit_barrier_after (jump);
8896 insn = emit_label_after (label, barrier);
8897 JUMP_LABEL (jump) = label;
8898 LABEL_NUSES (label) = 1;
8900 INSN_ADDRESSES_NEW (jump, -1);
8901 INSN_ADDRESSES_NEW (barrier, -1);
8902 INSN_ADDRESSES_NEW (insn, -1);
8904 s390_end_pool (curr_pool, barrier);
8905 curr_pool = NULL;
8906 extra_size = 0;
8911 if (curr_pool)
8912 s390_end_pool (curr_pool, NULL);
8913 gcc_assert (!pending_ltrel);
8915 /* Find all labels that are branched into
8916 from an insn belonging to a different chunk. */
8918 far_labels = BITMAP_ALLOC (NULL);
8920 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8922 rtx_jump_table_data *table;
8924 /* Labels marked with LABEL_PRESERVE_P can be target
8925 of non-local jumps, so we have to mark them.
8926 The same holds for named labels.
8928 Don't do that, however, if it is the label before
8929 a jump table. */
8931 if (LABEL_P (insn)
8932 && (LABEL_PRESERVE_P (insn) || LABEL_NAME (insn)))
8934 rtx_insn *vec_insn = NEXT_INSN (insn);
8935 if (! vec_insn || ! JUMP_TABLE_DATA_P (vec_insn))
8936 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (insn));
8938 /* Check potential targets in a table jump (casesi_jump). */
8939 else if (tablejump_p (insn, NULL, &table))
8941 rtx vec_pat = PATTERN (table);
8942 int i, diff_p = GET_CODE (vec_pat) == ADDR_DIFF_VEC;
8944 for (i = 0; i < XVECLEN (vec_pat, diff_p); i++)
8946 rtx label = XEXP (XVECEXP (vec_pat, diff_p, i), 0);
8948 if (s390_find_pool (pool_list, label)
8949 != s390_find_pool (pool_list, insn))
8950 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
8953 /* If we have a direct jump (conditional or unconditional),
8954 check all potential targets. */
8955 else if (JUMP_P (insn))
8957 rtx pat = PATTERN (insn);
8959 if (GET_CODE (pat) == PARALLEL)
8960 pat = XVECEXP (pat, 0, 0);
8962 if (GET_CODE (pat) == SET)
8964 rtx label = JUMP_LABEL (insn);
8965 if (label && !ANY_RETURN_P (label))
8967 if (s390_find_pool (pool_list, label)
8968 != s390_find_pool (pool_list, insn))
8969 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
8975 /* Insert base register reload insns before every pool. */
8977 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
8979 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
8980 curr_pool->label);
8981 rtx_insn *insn = curr_pool->first_insn;
8982 INSN_ADDRESSES_NEW (emit_insn_before (new_insn, insn), -1);
8985 /* Insert base register reload insns at every far label. */
8987 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8988 if (LABEL_P (insn)
8989 && bitmap_bit_p (far_labels, CODE_LABEL_NUMBER (insn)))
8991 struct constant_pool *pool = s390_find_pool (pool_list, insn);
8992 if (pool)
8994 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
8995 pool->label);
8996 INSN_ADDRESSES_NEW (emit_insn_after (new_insn, insn), -1);
9001 BITMAP_FREE (far_labels);
9004 /* Recompute insn addresses. */
9006 init_insn_lengths ();
9007 shorten_branches (get_insns ());
9009 return pool_list;
9012 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
9013 After we have decided to use this list, finish implementing
9014 all changes to the current function as required. */
9016 static void
9017 s390_chunkify_finish (struct constant_pool *pool_list)
9019 struct constant_pool *curr_pool = NULL;
9020 rtx_insn *insn;
9023 /* Replace all literal pool references. */
9025 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9027 if (INSN_P (insn))
9028 replace_ltrel_base (&PATTERN (insn));
9030 curr_pool = s390_find_pool (pool_list, insn);
9031 if (!curr_pool)
9032 continue;
9034 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
9036 rtx addr, pool_ref = NULL_RTX;
9037 find_constant_pool_ref (PATTERN (insn), &pool_ref);
9038 if (pool_ref)
9040 if (s390_execute_label (insn))
9041 addr = s390_find_execute (curr_pool, insn);
9042 else
9043 addr = s390_find_constant (curr_pool,
9044 get_pool_constant (pool_ref),
9045 get_pool_mode (pool_ref));
9047 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
9048 INSN_CODE (insn) = -1;
9053 /* Dump out all literal pools. */
9055 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
9056 s390_dump_pool (curr_pool, 0);
9058 /* Free pool list. */
9060 while (pool_list)
9062 struct constant_pool *next = pool_list->next;
9063 s390_free_pool (pool_list);
9064 pool_list = next;
9068 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
9069 We have decided we cannot use this list, so revert all changes
9070 to the current function that were done by s390_chunkify_start. */
9072 static void
9073 s390_chunkify_cancel (struct constant_pool *pool_list)
9075 struct constant_pool *curr_pool = NULL;
9076 rtx_insn *insn;
9078 /* Remove all pool placeholder insns. */
9080 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
9082 /* Did we insert an extra barrier? Remove it. */
9083 rtx_insn *barrier = PREV_INSN (curr_pool->pool_insn);
9084 rtx_insn *jump = barrier? PREV_INSN (barrier) : NULL;
9085 rtx_insn *label = NEXT_INSN (curr_pool->pool_insn);
9087 if (jump && JUMP_P (jump)
9088 && barrier && BARRIER_P (barrier)
9089 && label && LABEL_P (label)
9090 && GET_CODE (PATTERN (jump)) == SET
9091 && SET_DEST (PATTERN (jump)) == pc_rtx
9092 && GET_CODE (SET_SRC (PATTERN (jump))) == LABEL_REF
9093 && XEXP (SET_SRC (PATTERN (jump)), 0) == label)
9095 remove_insn (jump);
9096 remove_insn (barrier);
9097 remove_insn (label);
9100 remove_insn (curr_pool->pool_insn);
9103 /* Remove all base register reload insns. */
9105 for (insn = get_insns (); insn; )
9107 rtx_insn *next_insn = NEXT_INSN (insn);
9109 if (NONJUMP_INSN_P (insn)
9110 && GET_CODE (PATTERN (insn)) == SET
9111 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
9112 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_RELOAD_BASE)
9113 remove_insn (insn);
9115 insn = next_insn;
9118 /* Free pool list. */
9120 while (pool_list)
9122 struct constant_pool *next = pool_list->next;
9123 s390_free_pool (pool_list);
9124 pool_list = next;
9128 /* Output the constant pool entry EXP in mode MODE with alignment ALIGN. */
9130 void
9131 s390_output_pool_entry (rtx exp, machine_mode mode, unsigned int align)
9133 switch (GET_MODE_CLASS (mode))
9135 case MODE_FLOAT:
9136 case MODE_DECIMAL_FLOAT:
9137 gcc_assert (GET_CODE (exp) == CONST_DOUBLE);
9139 assemble_real (*CONST_DOUBLE_REAL_VALUE (exp), mode, align);
9140 break;
9142 case MODE_INT:
9143 assemble_integer (exp, GET_MODE_SIZE (mode), align, 1);
9144 mark_symbol_refs_as_used (exp);
9145 break;
9147 case MODE_VECTOR_INT:
9148 case MODE_VECTOR_FLOAT:
9150 int i;
9151 machine_mode inner_mode;
9152 gcc_assert (GET_CODE (exp) == CONST_VECTOR);
9154 inner_mode = GET_MODE_INNER (GET_MODE (exp));
9155 for (i = 0; i < XVECLEN (exp, 0); i++)
9156 s390_output_pool_entry (XVECEXP (exp, 0, i),
9157 inner_mode,
9158 i == 0
9159 ? align
9160 : GET_MODE_BITSIZE (inner_mode));
9162 break;
9164 default:
9165 gcc_unreachable ();
9170 /* Return an RTL expression representing the value of the return address
9171 for the frame COUNT steps up from the current frame. FRAME is the
9172 frame pointer of that frame. */
9175 s390_return_addr_rtx (int count, rtx frame ATTRIBUTE_UNUSED)
9177 int offset;
9178 rtx addr;
9180 /* Without backchain, we fail for all but the current frame. */
9182 if (!TARGET_BACKCHAIN && count > 0)
9183 return NULL_RTX;
9185 /* For the current frame, we need to make sure the initial
9186 value of RETURN_REGNUM is actually saved. */
9188 if (count == 0)
9190 /* On non-z architectures branch splitting could overwrite r14. */
9191 if (TARGET_CPU_ZARCH)
9192 return get_hard_reg_initial_val (Pmode, RETURN_REGNUM);
9193 else
9195 cfun_frame_layout.save_return_addr_p = true;
9196 return gen_rtx_MEM (Pmode, return_address_pointer_rtx);
9200 if (TARGET_PACKED_STACK)
9201 offset = -2 * UNITS_PER_LONG;
9202 else
9203 offset = RETURN_REGNUM * UNITS_PER_LONG;
9205 addr = plus_constant (Pmode, frame, offset);
9206 addr = memory_address (Pmode, addr);
9207 return gen_rtx_MEM (Pmode, addr);
9210 /* Return an RTL expression representing the back chain stored in
9211 the current stack frame. */
9214 s390_back_chain_rtx (void)
9216 rtx chain;
9218 gcc_assert (TARGET_BACKCHAIN);
9220 if (TARGET_PACKED_STACK)
9221 chain = plus_constant (Pmode, stack_pointer_rtx,
9222 STACK_POINTER_OFFSET - UNITS_PER_LONG);
9223 else
9224 chain = stack_pointer_rtx;
9226 chain = gen_rtx_MEM (Pmode, chain);
9227 return chain;
9230 /* Find first call clobbered register unused in a function.
9231 This could be used as base register in a leaf function
9232 or for holding the return address before epilogue. */
9234 static int
9235 find_unused_clobbered_reg (void)
9237 int i;
9238 for (i = 0; i < 6; i++)
9239 if (!df_regs_ever_live_p (i))
9240 return i;
9241 return 0;
9245 /* Helper function for s390_regs_ever_clobbered. Sets the fields in DATA for all
9246 clobbered hard regs in SETREG. */
9248 static void
9249 s390_reg_clobbered_rtx (rtx setreg, const_rtx set_insn ATTRIBUTE_UNUSED, void *data)
9251 char *regs_ever_clobbered = (char *)data;
9252 unsigned int i, regno;
9253 machine_mode mode = GET_MODE (setreg);
9255 if (GET_CODE (setreg) == SUBREG)
9257 rtx inner = SUBREG_REG (setreg);
9258 if (!GENERAL_REG_P (inner) && !FP_REG_P (inner))
9259 return;
9260 regno = subreg_regno (setreg);
9262 else if (GENERAL_REG_P (setreg) || FP_REG_P (setreg))
9263 regno = REGNO (setreg);
9264 else
9265 return;
9267 for (i = regno;
9268 i < regno + HARD_REGNO_NREGS (regno, mode);
9269 i++)
9270 regs_ever_clobbered[i] = 1;
9273 /* Walks through all basic blocks of the current function looking
9274 for clobbered hard regs using s390_reg_clobbered_rtx. The fields
9275 of the passed integer array REGS_EVER_CLOBBERED are set to one for
9276 each of those regs. */
9278 static void
9279 s390_regs_ever_clobbered (char regs_ever_clobbered[])
9281 basic_block cur_bb;
9282 rtx_insn *cur_insn;
9283 unsigned int i;
9285 memset (regs_ever_clobbered, 0, 32);
9287 /* For non-leaf functions we have to consider all call clobbered regs to be
9288 clobbered. */
9289 if (!crtl->is_leaf)
9291 for (i = 0; i < 32; i++)
9292 regs_ever_clobbered[i] = call_really_used_regs[i];
9295 /* Make the "magic" eh_return registers live if necessary. For regs_ever_live
9296 this work is done by liveness analysis (mark_regs_live_at_end).
9297 Special care is needed for functions containing landing pads. Landing pads
9298 may use the eh registers, but the code which sets these registers is not
9299 contained in that function. Hence s390_regs_ever_clobbered is not able to
9300 deal with this automatically. */
9301 if (crtl->calls_eh_return || cfun->machine->has_landing_pad_p)
9302 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM ; i++)
9303 if (crtl->calls_eh_return
9304 || (cfun->machine->has_landing_pad_p
9305 && df_regs_ever_live_p (EH_RETURN_DATA_REGNO (i))))
9306 regs_ever_clobbered[EH_RETURN_DATA_REGNO (i)] = 1;
9308 /* For nonlocal gotos all call-saved registers have to be saved.
9309 This flag is also set for the unwinding code in libgcc.
9310 See expand_builtin_unwind_init. For regs_ever_live this is done by
9311 reload. */
9312 if (crtl->saves_all_registers)
9313 for (i = 0; i < 32; i++)
9314 if (!call_really_used_regs[i])
9315 regs_ever_clobbered[i] = 1;
9317 FOR_EACH_BB_FN (cur_bb, cfun)
9319 FOR_BB_INSNS (cur_bb, cur_insn)
9321 rtx pat;
9323 if (!INSN_P (cur_insn))
9324 continue;
9326 pat = PATTERN (cur_insn);
9328 /* Ignore GPR restore insns. */
9329 if (epilogue_completed && RTX_FRAME_RELATED_P (cur_insn))
9331 if (GET_CODE (pat) == SET
9332 && GENERAL_REG_P (SET_DEST (pat)))
9334 /* lgdr */
9335 if (GET_MODE (SET_SRC (pat)) == DImode
9336 && FP_REG_P (SET_SRC (pat)))
9337 continue;
9339 /* l / lg */
9340 if (GET_CODE (SET_SRC (pat)) == MEM)
9341 continue;
9344 /* lm / lmg */
9345 if (GET_CODE (pat) == PARALLEL
9346 && load_multiple_operation (pat, VOIDmode))
9347 continue;
9350 note_stores (pat,
9351 s390_reg_clobbered_rtx,
9352 regs_ever_clobbered);
9357 /* Determine the frame area which actually has to be accessed
9358 in the function epilogue. The values are stored at the
9359 given pointers AREA_BOTTOM (address of the lowest used stack
9360 address) and AREA_TOP (address of the first item which does
9361 not belong to the stack frame). */
9363 static void
9364 s390_frame_area (int *area_bottom, int *area_top)
9366 int b, t;
9368 b = INT_MAX;
9369 t = INT_MIN;
9371 if (cfun_frame_layout.first_restore_gpr != -1)
9373 b = (cfun_frame_layout.gprs_offset
9374 + cfun_frame_layout.first_restore_gpr * UNITS_PER_LONG);
9375 t = b + (cfun_frame_layout.last_restore_gpr
9376 - cfun_frame_layout.first_restore_gpr + 1) * UNITS_PER_LONG;
9379 if (TARGET_64BIT && cfun_save_high_fprs_p)
9381 b = MIN (b, cfun_frame_layout.f8_offset);
9382 t = MAX (t, (cfun_frame_layout.f8_offset
9383 + cfun_frame_layout.high_fprs * 8));
9386 if (!TARGET_64BIT)
9388 if (cfun_fpr_save_p (FPR4_REGNUM))
9390 b = MIN (b, cfun_frame_layout.f4_offset);
9391 t = MAX (t, cfun_frame_layout.f4_offset + 8);
9393 if (cfun_fpr_save_p (FPR6_REGNUM))
9395 b = MIN (b, cfun_frame_layout.f4_offset + 8);
9396 t = MAX (t, cfun_frame_layout.f4_offset + 16);
9399 *area_bottom = b;
9400 *area_top = t;
9402 /* Update gpr_save_slots in the frame layout trying to make use of
9403 FPRs as GPR save slots.
9404 This is a helper routine of s390_register_info. */
9406 static void
9407 s390_register_info_gprtofpr ()
9409 int save_reg_slot = FPR0_REGNUM;
9410 int i, j;
9412 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
9413 return;
9415 for (i = 15; i >= 6; i--)
9417 if (cfun_gpr_save_slot (i) == SAVE_SLOT_NONE)
9418 continue;
9420 /* Advance to the next FP register which can be used as a
9421 GPR save slot. */
9422 while ((!call_really_used_regs[save_reg_slot]
9423 || df_regs_ever_live_p (save_reg_slot)
9424 || cfun_fpr_save_p (save_reg_slot))
9425 && FP_REGNO_P (save_reg_slot))
9426 save_reg_slot++;
9427 if (!FP_REGNO_P (save_reg_slot))
9429 /* We only want to use ldgr/lgdr if we can get rid of
9430 stm/lm entirely. So undo the gpr slot allocation in
9431 case we ran out of FPR save slots. */
9432 for (j = 6; j <= 15; j++)
9433 if (FP_REGNO_P (cfun_gpr_save_slot (j)))
9434 cfun_gpr_save_slot (j) = SAVE_SLOT_STACK;
9435 break;
9437 cfun_gpr_save_slot (i) = save_reg_slot++;
9441 /* Set the bits in fpr_bitmap for FPRs which need to be saved due to
9442 stdarg.
9443 This is a helper routine for s390_register_info. */
9445 static void
9446 s390_register_info_stdarg_fpr ()
9448 int i;
9449 int min_fpr;
9450 int max_fpr;
9452 /* Save the FP argument regs for stdarg. f0, f2 for 31 bit and
9453 f0-f4 for 64 bit. */
9454 if (!cfun->stdarg
9455 || !TARGET_HARD_FLOAT
9456 || !cfun->va_list_fpr_size
9457 || crtl->args.info.fprs >= FP_ARG_NUM_REG)
9458 return;
9460 min_fpr = crtl->args.info.fprs;
9461 max_fpr = min_fpr + cfun->va_list_fpr_size - 1;
9462 if (max_fpr >= FP_ARG_NUM_REG)
9463 max_fpr = FP_ARG_NUM_REG - 1;
9465 /* FPR argument regs start at f0. */
9466 min_fpr += FPR0_REGNUM;
9467 max_fpr += FPR0_REGNUM;
9469 for (i = min_fpr; i <= max_fpr; i++)
9470 cfun_set_fpr_save (i);
9473 /* Reserve the GPR save slots for GPRs which need to be saved due to
9474 stdarg.
9475 This is a helper routine for s390_register_info. */
9477 static void
9478 s390_register_info_stdarg_gpr ()
9480 int i;
9481 int min_gpr;
9482 int max_gpr;
9484 if (!cfun->stdarg
9485 || !cfun->va_list_gpr_size
9486 || crtl->args.info.gprs >= GP_ARG_NUM_REG)
9487 return;
9489 min_gpr = crtl->args.info.gprs;
9490 max_gpr = min_gpr + cfun->va_list_gpr_size - 1;
9491 if (max_gpr >= GP_ARG_NUM_REG)
9492 max_gpr = GP_ARG_NUM_REG - 1;
9494 /* GPR argument regs start at r2. */
9495 min_gpr += GPR2_REGNUM;
9496 max_gpr += GPR2_REGNUM;
9498 /* If r6 was supposed to be saved into an FPR and now needs to go to
9499 the stack for vararg we have to adjust the restore range to make
9500 sure that the restore is done from stack as well. */
9501 if (FP_REGNO_P (cfun_gpr_save_slot (GPR6_REGNUM))
9502 && min_gpr <= GPR6_REGNUM
9503 && max_gpr >= GPR6_REGNUM)
9505 if (cfun_frame_layout.first_restore_gpr == -1
9506 || cfun_frame_layout.first_restore_gpr > GPR6_REGNUM)
9507 cfun_frame_layout.first_restore_gpr = GPR6_REGNUM;
9508 if (cfun_frame_layout.last_restore_gpr == -1
9509 || cfun_frame_layout.last_restore_gpr < GPR6_REGNUM)
9510 cfun_frame_layout.last_restore_gpr = GPR6_REGNUM;
9513 if (cfun_frame_layout.first_save_gpr == -1
9514 || cfun_frame_layout.first_save_gpr > min_gpr)
9515 cfun_frame_layout.first_save_gpr = min_gpr;
9517 if (cfun_frame_layout.last_save_gpr == -1
9518 || cfun_frame_layout.last_save_gpr < max_gpr)
9519 cfun_frame_layout.last_save_gpr = max_gpr;
9521 for (i = min_gpr; i <= max_gpr; i++)
9522 cfun_gpr_save_slot (i) = SAVE_SLOT_STACK;
9525 /* Calculate the save and restore ranges for stm(g) and lm(g) in the
9526 prologue and epilogue. */
9528 static void
9529 s390_register_info_set_ranges ()
9531 int i, j;
9533 /* Find the first and the last save slot supposed to use the stack
9534 to set the restore range.
9535 Vararg regs might be marked as save to stack but only the
9536 call-saved regs really need restoring (i.e. r6). This code
9537 assumes that the vararg regs have not yet been recorded in
9538 cfun_gpr_save_slot. */
9539 for (i = 0; i < 16 && cfun_gpr_save_slot (i) != SAVE_SLOT_STACK; i++);
9540 for (j = 15; j > i && cfun_gpr_save_slot (j) != SAVE_SLOT_STACK; j--);
9541 cfun_frame_layout.first_restore_gpr = (i == 16) ? -1 : i;
9542 cfun_frame_layout.last_restore_gpr = (i == 16) ? -1 : j;
9543 cfun_frame_layout.first_save_gpr = (i == 16) ? -1 : i;
9544 cfun_frame_layout.last_save_gpr = (i == 16) ? -1 : j;
9547 /* The GPR and FPR save slots in cfun->machine->frame_layout are set
9548 for registers which need to be saved in function prologue.
9549 This function can be used until the insns emitted for save/restore
9550 of the regs are visible in the RTL stream. */
9552 static void
9553 s390_register_info ()
9555 int i;
9556 char clobbered_regs[32];
9558 gcc_assert (!epilogue_completed);
9560 if (reload_completed)
9561 /* After reload we rely on our own routine to determine which
9562 registers need saving. */
9563 s390_regs_ever_clobbered (clobbered_regs);
9564 else
9565 /* During reload we use regs_ever_live as a base since reload
9566 does changes in there which we otherwise would not be aware
9567 of. */
9568 for (i = 0; i < 32; i++)
9569 clobbered_regs[i] = df_regs_ever_live_p (i);
9571 for (i = 0; i < 32; i++)
9572 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i];
9574 /* Mark the call-saved FPRs which need to be saved.
9575 This needs to be done before checking the special GPRs since the
9576 stack pointer usage depends on whether high FPRs have to be saved
9577 or not. */
9578 cfun_frame_layout.fpr_bitmap = 0;
9579 cfun_frame_layout.high_fprs = 0;
9580 for (i = FPR0_REGNUM; i <= FPR15_REGNUM; i++)
9581 if (clobbered_regs[i] && !call_really_used_regs[i])
9583 cfun_set_fpr_save (i);
9584 if (i >= FPR8_REGNUM)
9585 cfun_frame_layout.high_fprs++;
9588 /* Register 12 is used for GOT address, but also as temp in prologue
9589 for split-stack stdarg functions (unless r14 is available). */
9590 clobbered_regs[12]
9591 |= ((flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
9592 || (flag_split_stack && cfun->stdarg
9593 && (crtl->is_leaf || TARGET_TPF_PROFILING
9594 || has_hard_reg_initial_val (Pmode, RETURN_REGNUM))));
9596 clobbered_regs[BASE_REGNUM]
9597 |= (cfun->machine->base_reg
9598 && REGNO (cfun->machine->base_reg) == BASE_REGNUM);
9600 clobbered_regs[HARD_FRAME_POINTER_REGNUM]
9601 |= !!frame_pointer_needed;
9603 /* On pre z900 machines this might take until machine dependent
9604 reorg to decide.
9605 save_return_addr_p will only be set on non-zarch machines so
9606 there is no risk that r14 goes into an FPR instead of a stack
9607 slot. */
9608 clobbered_regs[RETURN_REGNUM]
9609 |= (!crtl->is_leaf
9610 || TARGET_TPF_PROFILING
9611 || cfun->machine->split_branches_pending_p
9612 || cfun_frame_layout.save_return_addr_p
9613 || crtl->calls_eh_return);
9615 clobbered_regs[STACK_POINTER_REGNUM]
9616 |= (!crtl->is_leaf
9617 || TARGET_TPF_PROFILING
9618 || cfun_save_high_fprs_p
9619 || get_frame_size () > 0
9620 || (reload_completed && cfun_frame_layout.frame_size > 0)
9621 || cfun->calls_alloca);
9623 memset (cfun_frame_layout.gpr_save_slots, SAVE_SLOT_NONE, 16);
9625 for (i = 6; i < 16; i++)
9626 if (clobbered_regs[i])
9627 cfun_gpr_save_slot (i) = SAVE_SLOT_STACK;
9629 s390_register_info_stdarg_fpr ();
9630 s390_register_info_gprtofpr ();
9631 s390_register_info_set_ranges ();
9632 /* stdarg functions might need to save GPRs 2 to 6. This might
9633 override the GPR->FPR save decision made by
9634 s390_register_info_gprtofpr for r6 since vararg regs must go to
9635 the stack. */
9636 s390_register_info_stdarg_gpr ();
9639 /* This function is called by s390_optimize_prologue in order to get
9640 rid of unnecessary GPR save/restore instructions. The register info
9641 for the GPRs is re-computed and the ranges are re-calculated. */
9643 static void
9644 s390_optimize_register_info ()
9646 char clobbered_regs[32];
9647 int i;
9649 gcc_assert (epilogue_completed);
9650 gcc_assert (!cfun->machine->split_branches_pending_p);
9652 s390_regs_ever_clobbered (clobbered_regs);
9654 for (i = 0; i < 32; i++)
9655 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i];
9657 /* There is still special treatment needed for cases invisible to
9658 s390_regs_ever_clobbered. */
9659 clobbered_regs[RETURN_REGNUM]
9660 |= (TARGET_TPF_PROFILING
9661 /* When expanding builtin_return_addr in ESA mode we do not
9662 know whether r14 will later be needed as scratch reg when
9663 doing branch splitting. So the builtin always accesses the
9664 r14 save slot and we need to stick to the save/restore
9665 decision for r14 even if it turns out that it didn't get
9666 clobbered. */
9667 || cfun_frame_layout.save_return_addr_p
9668 || crtl->calls_eh_return);
9670 memset (cfun_frame_layout.gpr_save_slots, SAVE_SLOT_NONE, 6);
9672 for (i = 6; i < 16; i++)
9673 if (!clobbered_regs[i])
9674 cfun_gpr_save_slot (i) = SAVE_SLOT_NONE;
9676 s390_register_info_set_ranges ();
9677 s390_register_info_stdarg_gpr ();
9680 /* Fill cfun->machine with info about frame of current function. */
9682 static void
9683 s390_frame_info (void)
9685 HOST_WIDE_INT lowest_offset;
9687 cfun_frame_layout.first_save_gpr_slot = cfun_frame_layout.first_save_gpr;
9688 cfun_frame_layout.last_save_gpr_slot = cfun_frame_layout.last_save_gpr;
9690 /* The va_arg builtin uses a constant distance of 16 *
9691 UNITS_PER_LONG (r0-r15) to reach the FPRs from the reg_save_area
9692 pointer. So even if we are going to save the stack pointer in an
9693 FPR we need the stack space in order to keep the offsets
9694 correct. */
9695 if (cfun->stdarg && cfun_save_arg_fprs_p)
9697 cfun_frame_layout.last_save_gpr_slot = STACK_POINTER_REGNUM;
9699 if (cfun_frame_layout.first_save_gpr_slot == -1)
9700 cfun_frame_layout.first_save_gpr_slot = STACK_POINTER_REGNUM;
9703 cfun_frame_layout.frame_size = get_frame_size ();
9704 if (!TARGET_64BIT && cfun_frame_layout.frame_size > 0x7fff0000)
9705 fatal_error (input_location,
9706 "total size of local variables exceeds architecture limit");
9708 if (!TARGET_PACKED_STACK)
9710 /* Fixed stack layout. */
9711 cfun_frame_layout.backchain_offset = 0;
9712 cfun_frame_layout.f0_offset = 16 * UNITS_PER_LONG;
9713 cfun_frame_layout.f4_offset = cfun_frame_layout.f0_offset + 2 * 8;
9714 cfun_frame_layout.f8_offset = -cfun_frame_layout.high_fprs * 8;
9715 cfun_frame_layout.gprs_offset = (cfun_frame_layout.first_save_gpr_slot
9716 * UNITS_PER_LONG);
9718 else if (TARGET_BACKCHAIN)
9720 /* Kernel stack layout - packed stack, backchain, no float */
9721 gcc_assert (TARGET_SOFT_FLOAT);
9722 cfun_frame_layout.backchain_offset = (STACK_POINTER_OFFSET
9723 - UNITS_PER_LONG);
9725 /* The distance between the backchain and the return address
9726 save slot must not change. So we always need a slot for the
9727 stack pointer which resides in between. */
9728 cfun_frame_layout.last_save_gpr_slot = STACK_POINTER_REGNUM;
9730 cfun_frame_layout.gprs_offset
9731 = cfun_frame_layout.backchain_offset - cfun_gprs_save_area_size;
9733 /* FPRs will not be saved. Nevertheless pick sane values to
9734 keep area calculations valid. */
9735 cfun_frame_layout.f0_offset =
9736 cfun_frame_layout.f4_offset =
9737 cfun_frame_layout.f8_offset = cfun_frame_layout.gprs_offset;
9739 else
9741 int num_fprs;
9743 /* Packed stack layout without backchain. */
9745 /* With stdarg FPRs need their dedicated slots. */
9746 num_fprs = (TARGET_64BIT && cfun->stdarg ? 2
9747 : (cfun_fpr_save_p (FPR4_REGNUM) +
9748 cfun_fpr_save_p (FPR6_REGNUM)));
9749 cfun_frame_layout.f4_offset = STACK_POINTER_OFFSET - 8 * num_fprs;
9751 num_fprs = (cfun->stdarg ? 2
9752 : (cfun_fpr_save_p (FPR0_REGNUM)
9753 + cfun_fpr_save_p (FPR2_REGNUM)));
9754 cfun_frame_layout.f0_offset = cfun_frame_layout.f4_offset - 8 * num_fprs;
9756 cfun_frame_layout.gprs_offset
9757 = cfun_frame_layout.f0_offset - cfun_gprs_save_area_size;
9759 cfun_frame_layout.f8_offset = (cfun_frame_layout.gprs_offset
9760 - cfun_frame_layout.high_fprs * 8);
9763 if (cfun_save_high_fprs_p)
9764 cfun_frame_layout.frame_size += cfun_frame_layout.high_fprs * 8;
9766 if (!crtl->is_leaf)
9767 cfun_frame_layout.frame_size += crtl->outgoing_args_size;
9769 /* In the following cases we have to allocate a STACK_POINTER_OFFSET
9770 sized area at the bottom of the stack. This is required also for
9771 leaf functions. When GCC generates a local stack reference it
9772 will always add STACK_POINTER_OFFSET to all these references. */
9773 if (crtl->is_leaf
9774 && !TARGET_TPF_PROFILING
9775 && cfun_frame_layout.frame_size == 0
9776 && !cfun->calls_alloca)
9777 return;
9779 /* Calculate the number of bytes we have used in our own register
9780 save area. With the packed stack layout we can re-use the
9781 remaining bytes for normal stack elements. */
9783 if (TARGET_PACKED_STACK)
9784 lowest_offset = MIN (MIN (cfun_frame_layout.f0_offset,
9785 cfun_frame_layout.f4_offset),
9786 cfun_frame_layout.gprs_offset);
9787 else
9788 lowest_offset = 0;
9790 if (TARGET_BACKCHAIN)
9791 lowest_offset = MIN (lowest_offset, cfun_frame_layout.backchain_offset);
9793 cfun_frame_layout.frame_size += STACK_POINTER_OFFSET - lowest_offset;
9795 /* If under 31 bit an odd number of gprs has to be saved we have to
9796 adjust the frame size to sustain 8 byte alignment of stack
9797 frames. */
9798 cfun_frame_layout.frame_size = ((cfun_frame_layout.frame_size +
9799 STACK_BOUNDARY / BITS_PER_UNIT - 1)
9800 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1));
9803 /* Generate frame layout. Fills in register and frame data for the current
9804 function in cfun->machine. This routine can be called multiple times;
9805 it will re-do the complete frame layout every time. */
9807 static void
9808 s390_init_frame_layout (void)
9810 HOST_WIDE_INT frame_size;
9811 int base_used;
9813 /* After LRA the frame layout is supposed to be read-only and should
9814 not be re-computed. */
9815 if (reload_completed)
9816 return;
9818 /* On S/390 machines, we may need to perform branch splitting, which
9819 will require both base and return address register. We have no
9820 choice but to assume we're going to need them until right at the
9821 end of the machine dependent reorg phase. */
9822 if (!TARGET_CPU_ZARCH)
9823 cfun->machine->split_branches_pending_p = true;
9827 frame_size = cfun_frame_layout.frame_size;
9829 /* Try to predict whether we'll need the base register. */
9830 base_used = cfun->machine->split_branches_pending_p
9831 || crtl->uses_const_pool
9832 || (!DISP_IN_RANGE (frame_size)
9833 && !CONST_OK_FOR_K (frame_size));
9835 /* Decide which register to use as literal pool base. In small
9836 leaf functions, try to use an unused call-clobbered register
9837 as base register to avoid save/restore overhead. */
9838 if (!base_used)
9839 cfun->machine->base_reg = NULL_RTX;
9840 else
9842 int br = 0;
9844 if (crtl->is_leaf)
9845 /* Prefer r5 (most likely to be free). */
9846 for (br = 5; br >= 2 && df_regs_ever_live_p (br); br--)
9848 cfun->machine->base_reg =
9849 gen_rtx_REG (Pmode, (br >= 2) ? br : BASE_REGNUM);
9852 s390_register_info ();
9853 s390_frame_info ();
9855 while (frame_size != cfun_frame_layout.frame_size);
9858 /* Remove the FPR clobbers from a tbegin insn if it can be proven that
9859 the TX is nonescaping. A transaction is considered escaping if
9860 there is at least one path from tbegin returning CC0 to the
9861 function exit block without an tend.
9863 The check so far has some limitations:
9864 - only single tbegin/tend BBs are supported
9865 - the first cond jump after tbegin must separate the CC0 path from ~CC0
9866 - when CC is copied to a GPR and the CC0 check is done with the GPR
9867 this is not supported
9870 static void
9871 s390_optimize_nonescaping_tx (void)
9873 const unsigned int CC0 = 1 << 3;
9874 basic_block tbegin_bb = NULL;
9875 basic_block tend_bb = NULL;
9876 basic_block bb;
9877 rtx_insn *insn;
9878 bool result = true;
9879 int bb_index;
9880 rtx_insn *tbegin_insn = NULL;
9882 if (!cfun->machine->tbegin_p)
9883 return;
9885 for (bb_index = 0; bb_index < n_basic_blocks_for_fn (cfun); bb_index++)
9887 bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
9889 if (!bb)
9890 continue;
9892 FOR_BB_INSNS (bb, insn)
9894 rtx ite, cc, pat, target;
9895 unsigned HOST_WIDE_INT mask;
9897 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
9898 continue;
9900 pat = PATTERN (insn);
9902 if (GET_CODE (pat) == PARALLEL)
9903 pat = XVECEXP (pat, 0, 0);
9905 if (GET_CODE (pat) != SET
9906 || GET_CODE (SET_SRC (pat)) != UNSPEC_VOLATILE)
9907 continue;
9909 if (XINT (SET_SRC (pat), 1) == UNSPECV_TBEGIN)
9911 rtx_insn *tmp;
9913 tbegin_insn = insn;
9915 /* Just return if the tbegin doesn't have clobbers. */
9916 if (GET_CODE (PATTERN (insn)) != PARALLEL)
9917 return;
9919 if (tbegin_bb != NULL)
9920 return;
9922 /* Find the next conditional jump. */
9923 for (tmp = NEXT_INSN (insn);
9924 tmp != NULL_RTX;
9925 tmp = NEXT_INSN (tmp))
9927 if (reg_set_p (gen_rtx_REG (CCmode, CC_REGNUM), tmp))
9928 return;
9929 if (!JUMP_P (tmp))
9930 continue;
9932 ite = SET_SRC (PATTERN (tmp));
9933 if (GET_CODE (ite) != IF_THEN_ELSE)
9934 continue;
9936 cc = XEXP (XEXP (ite, 0), 0);
9937 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc))
9938 || GET_MODE (cc) != CCRAWmode
9939 || GET_CODE (XEXP (XEXP (ite, 0), 1)) != CONST_INT)
9940 return;
9942 if (bb->succs->length () != 2)
9943 return;
9945 mask = INTVAL (XEXP (XEXP (ite, 0), 1));
9946 if (GET_CODE (XEXP (ite, 0)) == NE)
9947 mask ^= 0xf;
9949 if (mask == CC0)
9950 target = XEXP (ite, 1);
9951 else if (mask == (CC0 ^ 0xf))
9952 target = XEXP (ite, 2);
9953 else
9954 return;
9957 edge_iterator ei;
9958 edge e1, e2;
9960 ei = ei_start (bb->succs);
9961 e1 = ei_safe_edge (ei);
9962 ei_next (&ei);
9963 e2 = ei_safe_edge (ei);
9965 if (e2->flags & EDGE_FALLTHRU)
9967 e2 = e1;
9968 e1 = ei_safe_edge (ei);
9971 if (!(e1->flags & EDGE_FALLTHRU))
9972 return;
9974 tbegin_bb = (target == pc_rtx) ? e1->dest : e2->dest;
9976 if (tmp == BB_END (bb))
9977 break;
9981 if (XINT (SET_SRC (pat), 1) == UNSPECV_TEND)
9983 if (tend_bb != NULL)
9984 return;
9985 tend_bb = bb;
9990 /* Either we successfully remove the FPR clobbers here or we are not
9991 able to do anything for this TX. Both cases don't qualify for
9992 another look. */
9993 cfun->machine->tbegin_p = false;
9995 if (tbegin_bb == NULL || tend_bb == NULL)
9996 return;
9998 calculate_dominance_info (CDI_POST_DOMINATORS);
9999 result = dominated_by_p (CDI_POST_DOMINATORS, tbegin_bb, tend_bb);
10000 free_dominance_info (CDI_POST_DOMINATORS);
10002 if (!result)
10003 return;
10005 PATTERN (tbegin_insn) = gen_rtx_PARALLEL (VOIDmode,
10006 gen_rtvec (2,
10007 XVECEXP (PATTERN (tbegin_insn), 0, 0),
10008 XVECEXP (PATTERN (tbegin_insn), 0, 1)));
10009 INSN_CODE (tbegin_insn) = -1;
10010 df_insn_rescan (tbegin_insn);
10012 return;
10015 /* Return true if it is legal to put a value with MODE into REGNO. */
10017 bool
10018 s390_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
10020 if (!TARGET_VX && VECTOR_NOFP_REGNO_P (regno))
10021 return false;
10023 switch (REGNO_REG_CLASS (regno))
10025 case VEC_REGS:
10026 return ((GET_MODE_CLASS (mode) == MODE_INT
10027 && s390_class_max_nregs (VEC_REGS, mode) == 1)
10028 || mode == DFmode
10029 || s390_vector_mode_supported_p (mode));
10030 break;
10031 case FP_REGS:
10032 if (TARGET_VX
10033 && ((GET_MODE_CLASS (mode) == MODE_INT
10034 && s390_class_max_nregs (FP_REGS, mode) == 1)
10035 || mode == DFmode
10036 || s390_vector_mode_supported_p (mode)))
10037 return true;
10039 if (REGNO_PAIR_OK (regno, mode))
10041 if (mode == SImode || mode == DImode)
10042 return true;
10044 if (FLOAT_MODE_P (mode) && GET_MODE_CLASS (mode) != MODE_VECTOR_FLOAT)
10045 return true;
10047 break;
10048 case ADDR_REGS:
10049 if (FRAME_REGNO_P (regno) && mode == Pmode)
10050 return true;
10052 /* fallthrough */
10053 case GENERAL_REGS:
10054 if (REGNO_PAIR_OK (regno, mode))
10056 if (TARGET_ZARCH
10057 || (mode != TFmode && mode != TCmode && mode != TDmode))
10058 return true;
10060 break;
10061 case CC_REGS:
10062 if (GET_MODE_CLASS (mode) == MODE_CC)
10063 return true;
10064 break;
10065 case ACCESS_REGS:
10066 if (REGNO_PAIR_OK (regno, mode))
10068 if (mode == SImode || mode == Pmode)
10069 return true;
10071 break;
10072 default:
10073 return false;
10076 return false;
10079 /* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */
10081 bool
10082 s390_hard_regno_rename_ok (unsigned int old_reg, unsigned int new_reg)
10084 /* Once we've decided upon a register to use as base register, it must
10085 no longer be used for any other purpose. */
10086 if (cfun->machine->base_reg)
10087 if (REGNO (cfun->machine->base_reg) == old_reg
10088 || REGNO (cfun->machine->base_reg) == new_reg)
10089 return false;
10091 /* Prevent regrename from using call-saved regs which haven't
10092 actually been saved. This is necessary since regrename assumes
10093 the backend save/restore decisions are based on
10094 df_regs_ever_live. Since we have our own routine we have to tell
10095 regrename manually about it. */
10096 if (GENERAL_REGNO_P (new_reg)
10097 && !call_really_used_regs[new_reg]
10098 && cfun_gpr_save_slot (new_reg) == SAVE_SLOT_NONE)
10099 return false;
10101 return true;
10104 /* Return nonzero if register REGNO can be used as a scratch register
10105 in peephole2. */
10107 static bool
10108 s390_hard_regno_scratch_ok (unsigned int regno)
10110 /* See s390_hard_regno_rename_ok. */
10111 if (GENERAL_REGNO_P (regno)
10112 && !call_really_used_regs[regno]
10113 && cfun_gpr_save_slot (regno) == SAVE_SLOT_NONE)
10114 return false;
10116 return true;
10119 /* Maximum number of registers to represent a value of mode MODE
10120 in a register of class RCLASS. */
10123 s390_class_max_nregs (enum reg_class rclass, machine_mode mode)
10125 int reg_size;
10126 bool reg_pair_required_p = false;
10128 switch (rclass)
10130 case FP_REGS:
10131 case VEC_REGS:
10132 reg_size = TARGET_VX ? 16 : 8;
10134 /* TF and TD modes would fit into a VR but we put them into a
10135 register pair since we do not have 128bit FP instructions on
10136 full VRs. */
10137 if (TARGET_VX
10138 && SCALAR_FLOAT_MODE_P (mode)
10139 && GET_MODE_SIZE (mode) >= 16)
10140 reg_pair_required_p = true;
10142 /* Even if complex types would fit into a single FPR/VR we force
10143 them into a register pair to deal with the parts more easily.
10144 (FIXME: What about complex ints?) */
10145 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
10146 reg_pair_required_p = true;
10147 break;
10148 case ACCESS_REGS:
10149 reg_size = 4;
10150 break;
10151 default:
10152 reg_size = UNITS_PER_WORD;
10153 break;
10156 if (reg_pair_required_p)
10157 return 2 * ((GET_MODE_SIZE (mode) / 2 + reg_size - 1) / reg_size);
10159 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
10162 /* Return TRUE if changing mode from FROM to TO should not be allowed
10163 for register class CLASS. */
10166 s390_cannot_change_mode_class (machine_mode from_mode,
10167 machine_mode to_mode,
10168 enum reg_class rclass)
10170 machine_mode small_mode;
10171 machine_mode big_mode;
10173 if (GET_MODE_SIZE (from_mode) == GET_MODE_SIZE (to_mode))
10174 return 0;
10176 if (GET_MODE_SIZE (from_mode) < GET_MODE_SIZE (to_mode))
10178 small_mode = from_mode;
10179 big_mode = to_mode;
10181 else
10183 small_mode = to_mode;
10184 big_mode = from_mode;
10187 /* Values residing in VRs are little-endian style. All modes are
10188 placed left-aligned in an VR. This means that we cannot allow
10189 switching between modes with differing sizes. Also if the vector
10190 facility is available we still place TFmode values in VR register
10191 pairs, since the only instructions we have operating on TFmodes
10192 only deal with register pairs. Therefore we have to allow DFmode
10193 subregs of TFmodes to enable the TFmode splitters. */
10194 if (reg_classes_intersect_p (VEC_REGS, rclass)
10195 && (GET_MODE_SIZE (small_mode) < 8
10196 || s390_class_max_nregs (VEC_REGS, big_mode) == 1))
10197 return 1;
10199 /* Likewise for access registers, since they have only half the
10200 word size on 64-bit. */
10201 if (reg_classes_intersect_p (ACCESS_REGS, rclass))
10202 return 1;
10204 return 0;
10207 /* Return true if we use LRA instead of reload pass. */
10208 static bool
10209 s390_lra_p (void)
10211 return s390_lra_flag;
10214 /* Return true if register FROM can be eliminated via register TO. */
10216 static bool
10217 s390_can_eliminate (const int from, const int to)
10219 /* On zSeries machines, we have not marked the base register as fixed.
10220 Instead, we have an elimination rule BASE_REGNUM -> BASE_REGNUM.
10221 If a function requires the base register, we say here that this
10222 elimination cannot be performed. This will cause reload to free
10223 up the base register (as if it were fixed). On the other hand,
10224 if the current function does *not* require the base register, we
10225 say here the elimination succeeds, which in turn allows reload
10226 to allocate the base register for any other purpose. */
10227 if (from == BASE_REGNUM && to == BASE_REGNUM)
10229 if (TARGET_CPU_ZARCH)
10231 s390_init_frame_layout ();
10232 return cfun->machine->base_reg == NULL_RTX;
10235 return false;
10238 /* Everything else must point into the stack frame. */
10239 gcc_assert (to == STACK_POINTER_REGNUM
10240 || to == HARD_FRAME_POINTER_REGNUM);
10242 gcc_assert (from == FRAME_POINTER_REGNUM
10243 || from == ARG_POINTER_REGNUM
10244 || from == RETURN_ADDRESS_POINTER_REGNUM);
10246 /* Make sure we actually saved the return address. */
10247 if (from == RETURN_ADDRESS_POINTER_REGNUM)
10248 if (!crtl->calls_eh_return
10249 && !cfun->stdarg
10250 && !cfun_frame_layout.save_return_addr_p)
10251 return false;
10253 return true;
10256 /* Return offset between register FROM and TO initially after prolog. */
10258 HOST_WIDE_INT
10259 s390_initial_elimination_offset (int from, int to)
10261 HOST_WIDE_INT offset;
10263 /* ??? Why are we called for non-eliminable pairs? */
10264 if (!s390_can_eliminate (from, to))
10265 return 0;
10267 switch (from)
10269 case FRAME_POINTER_REGNUM:
10270 offset = (get_frame_size()
10271 + STACK_POINTER_OFFSET
10272 + crtl->outgoing_args_size);
10273 break;
10275 case ARG_POINTER_REGNUM:
10276 s390_init_frame_layout ();
10277 offset = cfun_frame_layout.frame_size + STACK_POINTER_OFFSET;
10278 break;
10280 case RETURN_ADDRESS_POINTER_REGNUM:
10281 s390_init_frame_layout ();
10283 if (cfun_frame_layout.first_save_gpr_slot == -1)
10285 /* If it turns out that for stdarg nothing went into the reg
10286 save area we also do not need the return address
10287 pointer. */
10288 if (cfun->stdarg && !cfun_save_arg_fprs_p)
10289 return 0;
10291 gcc_unreachable ();
10294 /* In order to make the following work it is not necessary for
10295 r14 to have a save slot. It is sufficient if one other GPR
10296 got one. Since the GPRs are always stored without gaps we
10297 are able to calculate where the r14 save slot would
10298 reside. */
10299 offset = (cfun_frame_layout.frame_size + cfun_frame_layout.gprs_offset +
10300 (RETURN_REGNUM - cfun_frame_layout.first_save_gpr_slot) *
10301 UNITS_PER_LONG);
10302 break;
10304 case BASE_REGNUM:
10305 offset = 0;
10306 break;
10308 default:
10309 gcc_unreachable ();
10312 return offset;
10315 /* Emit insn to save fpr REGNUM at offset OFFSET relative
10316 to register BASE. Return generated insn. */
10318 static rtx
10319 save_fpr (rtx base, int offset, int regnum)
10321 rtx addr;
10322 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
10324 if (regnum >= 16 && regnum <= (16 + FP_ARG_NUM_REG))
10325 set_mem_alias_set (addr, get_varargs_alias_set ());
10326 else
10327 set_mem_alias_set (addr, get_frame_alias_set ());
10329 return emit_move_insn (addr, gen_rtx_REG (DFmode, regnum));
10332 /* Emit insn to restore fpr REGNUM from offset OFFSET relative
10333 to register BASE. Return generated insn. */
10335 static rtx
10336 restore_fpr (rtx base, int offset, int regnum)
10338 rtx addr;
10339 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
10340 set_mem_alias_set (addr, get_frame_alias_set ());
10342 return emit_move_insn (gen_rtx_REG (DFmode, regnum), addr);
10345 /* Return true if REGNO is a global register, but not one
10346 of the special ones that need to be saved/restored in anyway. */
10348 static inline bool
10349 global_not_special_regno_p (int regno)
10351 return (global_regs[regno]
10352 /* These registers are special and need to be
10353 restored in any case. */
10354 && !(regno == STACK_POINTER_REGNUM
10355 || regno == RETURN_REGNUM
10356 || regno == BASE_REGNUM
10357 || (flag_pic && regno == (int)PIC_OFFSET_TABLE_REGNUM)));
10360 /* Generate insn to save registers FIRST to LAST into
10361 the register save area located at offset OFFSET
10362 relative to register BASE. */
10364 static rtx
10365 save_gprs (rtx base, int offset, int first, int last)
10367 rtx addr, insn, note;
10368 int i;
10370 addr = plus_constant (Pmode, base, offset);
10371 addr = gen_rtx_MEM (Pmode, addr);
10373 set_mem_alias_set (addr, get_frame_alias_set ());
10375 /* Special-case single register. */
10376 if (first == last)
10378 if (TARGET_64BIT)
10379 insn = gen_movdi (addr, gen_rtx_REG (Pmode, first));
10380 else
10381 insn = gen_movsi (addr, gen_rtx_REG (Pmode, first));
10383 if (!global_not_special_regno_p (first))
10384 RTX_FRAME_RELATED_P (insn) = 1;
10385 return insn;
10389 insn = gen_store_multiple (addr,
10390 gen_rtx_REG (Pmode, first),
10391 GEN_INT (last - first + 1));
10393 if (first <= 6 && cfun->stdarg)
10394 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
10396 rtx mem = XEXP (XVECEXP (PATTERN (insn), 0, i), 0);
10398 if (first + i <= 6)
10399 set_mem_alias_set (mem, get_varargs_alias_set ());
10402 /* We need to set the FRAME_RELATED flag on all SETs
10403 inside the store-multiple pattern.
10405 However, we must not emit DWARF records for registers 2..5
10406 if they are stored for use by variable arguments ...
10408 ??? Unfortunately, it is not enough to simply not the
10409 FRAME_RELATED flags for those SETs, because the first SET
10410 of the PARALLEL is always treated as if it had the flag
10411 set, even if it does not. Therefore we emit a new pattern
10412 without those registers as REG_FRAME_RELATED_EXPR note. */
10414 if (first >= 6 && !global_not_special_regno_p (first))
10416 rtx pat = PATTERN (insn);
10418 for (i = 0; i < XVECLEN (pat, 0); i++)
10419 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
10420 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (pat,
10421 0, i)))))
10422 RTX_FRAME_RELATED_P (XVECEXP (pat, 0, i)) = 1;
10424 RTX_FRAME_RELATED_P (insn) = 1;
10426 else if (last >= 6)
10428 int start;
10430 for (start = first >= 6 ? first : 6; start <= last; start++)
10431 if (!global_not_special_regno_p (start))
10432 break;
10434 if (start > last)
10435 return insn;
10437 addr = plus_constant (Pmode, base,
10438 offset + (start - first) * UNITS_PER_LONG);
10440 if (start == last)
10442 if (TARGET_64BIT)
10443 note = gen_movdi (gen_rtx_MEM (Pmode, addr),
10444 gen_rtx_REG (Pmode, start));
10445 else
10446 note = gen_movsi (gen_rtx_MEM (Pmode, addr),
10447 gen_rtx_REG (Pmode, start));
10448 note = PATTERN (note);
10450 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
10451 RTX_FRAME_RELATED_P (insn) = 1;
10453 return insn;
10456 note = gen_store_multiple (gen_rtx_MEM (Pmode, addr),
10457 gen_rtx_REG (Pmode, start),
10458 GEN_INT (last - start + 1));
10459 note = PATTERN (note);
10461 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
10463 for (i = 0; i < XVECLEN (note, 0); i++)
10464 if (GET_CODE (XVECEXP (note, 0, i)) == SET
10465 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (note,
10466 0, i)))))
10467 RTX_FRAME_RELATED_P (XVECEXP (note, 0, i)) = 1;
10469 RTX_FRAME_RELATED_P (insn) = 1;
10472 return insn;
10475 /* Generate insn to restore registers FIRST to LAST from
10476 the register save area located at offset OFFSET
10477 relative to register BASE. */
10479 static rtx
10480 restore_gprs (rtx base, int offset, int first, int last)
10482 rtx addr, insn;
10484 addr = plus_constant (Pmode, base, offset);
10485 addr = gen_rtx_MEM (Pmode, addr);
10486 set_mem_alias_set (addr, get_frame_alias_set ());
10488 /* Special-case single register. */
10489 if (first == last)
10491 if (TARGET_64BIT)
10492 insn = gen_movdi (gen_rtx_REG (Pmode, first), addr);
10493 else
10494 insn = gen_movsi (gen_rtx_REG (Pmode, first), addr);
10496 RTX_FRAME_RELATED_P (insn) = 1;
10497 return insn;
10500 insn = gen_load_multiple (gen_rtx_REG (Pmode, first),
10501 addr,
10502 GEN_INT (last - first + 1));
10503 RTX_FRAME_RELATED_P (insn) = 1;
10504 return insn;
10507 /* Return insn sequence to load the GOT register. */
10509 static GTY(()) rtx got_symbol;
10510 rtx_insn *
10511 s390_load_got (void)
10513 rtx_insn *insns;
10515 /* We cannot use pic_offset_table_rtx here since we use this
10516 function also for non-pic if __tls_get_offset is called and in
10517 that case PIC_OFFSET_TABLE_REGNUM as well as pic_offset_table_rtx
10518 aren't usable. */
10519 rtx got_rtx = gen_rtx_REG (Pmode, 12);
10521 if (!got_symbol)
10523 got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
10524 SYMBOL_REF_FLAGS (got_symbol) = SYMBOL_FLAG_LOCAL;
10527 start_sequence ();
10529 if (TARGET_CPU_ZARCH)
10531 emit_move_insn (got_rtx, got_symbol);
10533 else
10535 rtx offset;
10537 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, got_symbol),
10538 UNSPEC_LTREL_OFFSET);
10539 offset = gen_rtx_CONST (Pmode, offset);
10540 offset = force_const_mem (Pmode, offset);
10542 emit_move_insn (got_rtx, offset);
10544 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (offset, 0)),
10545 UNSPEC_LTREL_BASE);
10546 offset = gen_rtx_PLUS (Pmode, got_rtx, offset);
10548 emit_move_insn (got_rtx, offset);
10551 insns = get_insns ();
10552 end_sequence ();
10553 return insns;
10556 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
10557 and the change to the stack pointer. */
10559 static void
10560 s390_emit_stack_tie (void)
10562 rtx mem = gen_frame_mem (BLKmode,
10563 gen_rtx_REG (Pmode, STACK_POINTER_REGNUM));
10565 emit_insn (gen_stack_tie (mem));
10568 /* Copy GPRS into FPR save slots. */
10570 static void
10571 s390_save_gprs_to_fprs (void)
10573 int i;
10575 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
10576 return;
10578 for (i = 6; i < 16; i++)
10580 if (FP_REGNO_P (cfun_gpr_save_slot (i)))
10582 rtx_insn *insn =
10583 emit_move_insn (gen_rtx_REG (DImode, cfun_gpr_save_slot (i)),
10584 gen_rtx_REG (DImode, i));
10585 RTX_FRAME_RELATED_P (insn) = 1;
10586 /* This prevents dwarf2cfi from interpreting the set. Doing
10587 so it might emit def_cfa_register infos setting an FPR as
10588 new CFA. */
10589 add_reg_note (insn, REG_CFA_REGISTER, PATTERN (insn));
10594 /* Restore GPRs from FPR save slots. */
10596 static void
10597 s390_restore_gprs_from_fprs (void)
10599 int i;
10601 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
10602 return;
10604 for (i = 6; i < 16; i++)
10606 rtx_insn *insn;
10608 if (!FP_REGNO_P (cfun_gpr_save_slot (i)))
10609 continue;
10611 rtx fpr = gen_rtx_REG (DImode, cfun_gpr_save_slot (i));
10613 if (i == STACK_POINTER_REGNUM)
10614 insn = emit_insn (gen_stack_restore_from_fpr (fpr));
10615 else
10616 insn = emit_move_insn (gen_rtx_REG (DImode, i), fpr);
10618 df_set_regs_ever_live (i, true);
10619 add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DImode, i));
10620 if (i == STACK_POINTER_REGNUM)
10621 add_reg_note (insn, REG_CFA_DEF_CFA,
10622 plus_constant (Pmode, stack_pointer_rtx,
10623 STACK_POINTER_OFFSET));
10624 RTX_FRAME_RELATED_P (insn) = 1;
10629 /* A pass run immediately before shrink-wrapping and prologue and epilogue
10630 generation. */
10632 namespace {
10634 const pass_data pass_data_s390_early_mach =
10636 RTL_PASS, /* type */
10637 "early_mach", /* name */
10638 OPTGROUP_NONE, /* optinfo_flags */
10639 TV_MACH_DEP, /* tv_id */
10640 0, /* properties_required */
10641 0, /* properties_provided */
10642 0, /* properties_destroyed */
10643 0, /* todo_flags_start */
10644 ( TODO_df_verify | TODO_df_finish ), /* todo_flags_finish */
10647 class pass_s390_early_mach : public rtl_opt_pass
10649 public:
10650 pass_s390_early_mach (gcc::context *ctxt)
10651 : rtl_opt_pass (pass_data_s390_early_mach, ctxt)
10654 /* opt_pass methods: */
10655 virtual unsigned int execute (function *);
10657 }; // class pass_s390_early_mach
10659 unsigned int
10660 pass_s390_early_mach::execute (function *fun)
10662 rtx_insn *insn;
10664 /* Try to get rid of the FPR clobbers. */
10665 s390_optimize_nonescaping_tx ();
10667 /* Re-compute register info. */
10668 s390_register_info ();
10670 /* If we're using a base register, ensure that it is always valid for
10671 the first non-prologue instruction. */
10672 if (fun->machine->base_reg)
10673 emit_insn_at_entry (gen_main_pool (fun->machine->base_reg));
10675 /* Annotate all constant pool references to let the scheduler know
10676 they implicitly use the base register. */
10677 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10678 if (INSN_P (insn))
10680 annotate_constant_pool_refs (&PATTERN (insn));
10681 df_insn_rescan (insn);
10683 return 0;
10686 } // anon namespace
10688 /* Expand the prologue into a bunch of separate insns. */
10690 void
10691 s390_emit_prologue (void)
10693 rtx insn, addr;
10694 rtx temp_reg;
10695 int i;
10696 int offset;
10697 int next_fpr = 0;
10699 /* Choose best register to use for temp use within prologue.
10700 TPF with profiling must avoid the register 14 - the tracing function
10701 needs the original contents of r14 to be preserved. */
10703 if (!has_hard_reg_initial_val (Pmode, RETURN_REGNUM)
10704 && !crtl->is_leaf
10705 && !TARGET_TPF_PROFILING)
10706 temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
10707 else if (flag_split_stack && cfun->stdarg)
10708 temp_reg = gen_rtx_REG (Pmode, 12);
10709 else
10710 temp_reg = gen_rtx_REG (Pmode, 1);
10712 s390_save_gprs_to_fprs ();
10714 /* Save call saved gprs. */
10715 if (cfun_frame_layout.first_save_gpr != -1)
10717 insn = save_gprs (stack_pointer_rtx,
10718 cfun_frame_layout.gprs_offset +
10719 UNITS_PER_LONG * (cfun_frame_layout.first_save_gpr
10720 - cfun_frame_layout.first_save_gpr_slot),
10721 cfun_frame_layout.first_save_gpr,
10722 cfun_frame_layout.last_save_gpr);
10723 emit_insn (insn);
10726 /* Dummy insn to mark literal pool slot. */
10728 if (cfun->machine->base_reg)
10729 emit_insn (gen_main_pool (cfun->machine->base_reg));
10731 offset = cfun_frame_layout.f0_offset;
10733 /* Save f0 and f2. */
10734 for (i = FPR0_REGNUM; i <= FPR0_REGNUM + 1; i++)
10736 if (cfun_fpr_save_p (i))
10738 save_fpr (stack_pointer_rtx, offset, i);
10739 offset += 8;
10741 else if (!TARGET_PACKED_STACK || cfun->stdarg)
10742 offset += 8;
10745 /* Save f4 and f6. */
10746 offset = cfun_frame_layout.f4_offset;
10747 for (i = FPR4_REGNUM; i <= FPR4_REGNUM + 1; i++)
10749 if (cfun_fpr_save_p (i))
10751 insn = save_fpr (stack_pointer_rtx, offset, i);
10752 offset += 8;
10754 /* If f4 and f6 are call clobbered they are saved due to
10755 stdargs and therefore are not frame related. */
10756 if (!call_really_used_regs[i])
10757 RTX_FRAME_RELATED_P (insn) = 1;
10759 else if (!TARGET_PACKED_STACK || call_really_used_regs[i])
10760 offset += 8;
10763 if (TARGET_PACKED_STACK
10764 && cfun_save_high_fprs_p
10765 && cfun_frame_layout.f8_offset + cfun_frame_layout.high_fprs * 8 > 0)
10767 offset = (cfun_frame_layout.f8_offset
10768 + (cfun_frame_layout.high_fprs - 1) * 8);
10770 for (i = FPR15_REGNUM; i >= FPR8_REGNUM && offset >= 0; i--)
10771 if (cfun_fpr_save_p (i))
10773 insn = save_fpr (stack_pointer_rtx, offset, i);
10775 RTX_FRAME_RELATED_P (insn) = 1;
10776 offset -= 8;
10778 if (offset >= cfun_frame_layout.f8_offset)
10779 next_fpr = i;
10782 if (!TARGET_PACKED_STACK)
10783 next_fpr = cfun_save_high_fprs_p ? FPR15_REGNUM : 0;
10785 if (flag_stack_usage_info)
10786 current_function_static_stack_size = cfun_frame_layout.frame_size;
10788 /* Decrement stack pointer. */
10790 if (cfun_frame_layout.frame_size > 0)
10792 rtx frame_off = GEN_INT (-cfun_frame_layout.frame_size);
10793 rtx real_frame_off;
10795 if (s390_stack_size)
10797 HOST_WIDE_INT stack_guard;
10799 if (s390_stack_guard)
10800 stack_guard = s390_stack_guard;
10801 else
10803 /* If no value for stack guard is provided the smallest power of 2
10804 larger than the current frame size is chosen. */
10805 stack_guard = 1;
10806 while (stack_guard < cfun_frame_layout.frame_size)
10807 stack_guard <<= 1;
10810 if (cfun_frame_layout.frame_size >= s390_stack_size)
10812 warning (0, "frame size of function %qs is %wd"
10813 " bytes exceeding user provided stack limit of "
10814 "%d bytes. "
10815 "An unconditional trap is added.",
10816 current_function_name(), cfun_frame_layout.frame_size,
10817 s390_stack_size);
10818 emit_insn (gen_trap ());
10819 emit_barrier ();
10821 else
10823 /* stack_guard has to be smaller than s390_stack_size.
10824 Otherwise we would emit an AND with zero which would
10825 not match the test under mask pattern. */
10826 if (stack_guard >= s390_stack_size)
10828 warning (0, "frame size of function %qs is %wd"
10829 " bytes which is more than half the stack size. "
10830 "The dynamic check would not be reliable. "
10831 "No check emitted for this function.",
10832 current_function_name(),
10833 cfun_frame_layout.frame_size);
10835 else
10837 HOST_WIDE_INT stack_check_mask = ((s390_stack_size - 1)
10838 & ~(stack_guard - 1));
10840 rtx t = gen_rtx_AND (Pmode, stack_pointer_rtx,
10841 GEN_INT (stack_check_mask));
10842 if (TARGET_64BIT)
10843 emit_insn (gen_ctrapdi4 (gen_rtx_EQ (VOIDmode,
10844 t, const0_rtx),
10845 t, const0_rtx, const0_rtx));
10846 else
10847 emit_insn (gen_ctrapsi4 (gen_rtx_EQ (VOIDmode,
10848 t, const0_rtx),
10849 t, const0_rtx, const0_rtx));
10854 if (s390_warn_framesize > 0
10855 && cfun_frame_layout.frame_size >= s390_warn_framesize)
10856 warning (0, "frame size of %qs is %wd bytes",
10857 current_function_name (), cfun_frame_layout.frame_size);
10859 if (s390_warn_dynamicstack_p && cfun->calls_alloca)
10860 warning (0, "%qs uses dynamic stack allocation", current_function_name ());
10862 /* Save incoming stack pointer into temp reg. */
10863 if (TARGET_BACKCHAIN || next_fpr)
10864 insn = emit_insn (gen_move_insn (temp_reg, stack_pointer_rtx));
10866 /* Subtract frame size from stack pointer. */
10868 if (DISP_IN_RANGE (INTVAL (frame_off)))
10870 insn = gen_rtx_SET (stack_pointer_rtx,
10871 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
10872 frame_off));
10873 insn = emit_insn (insn);
10875 else
10877 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
10878 frame_off = force_const_mem (Pmode, frame_off);
10880 insn = emit_insn (gen_add2_insn (stack_pointer_rtx, frame_off));
10881 annotate_constant_pool_refs (&PATTERN (insn));
10884 RTX_FRAME_RELATED_P (insn) = 1;
10885 real_frame_off = GEN_INT (-cfun_frame_layout.frame_size);
10886 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
10887 gen_rtx_SET (stack_pointer_rtx,
10888 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
10889 real_frame_off)));
10891 /* Set backchain. */
10893 if (TARGET_BACKCHAIN)
10895 if (cfun_frame_layout.backchain_offset)
10896 addr = gen_rtx_MEM (Pmode,
10897 plus_constant (Pmode, stack_pointer_rtx,
10898 cfun_frame_layout.backchain_offset));
10899 else
10900 addr = gen_rtx_MEM (Pmode, stack_pointer_rtx);
10901 set_mem_alias_set (addr, get_frame_alias_set ());
10902 insn = emit_insn (gen_move_insn (addr, temp_reg));
10905 /* If we support non-call exceptions (e.g. for Java),
10906 we need to make sure the backchain pointer is set up
10907 before any possibly trapping memory access. */
10908 if (TARGET_BACKCHAIN && cfun->can_throw_non_call_exceptions)
10910 addr = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode));
10911 emit_clobber (addr);
10915 /* Save fprs 8 - 15 (64 bit ABI). */
10917 if (cfun_save_high_fprs_p && next_fpr)
10919 /* If the stack might be accessed through a different register
10920 we have to make sure that the stack pointer decrement is not
10921 moved below the use of the stack slots. */
10922 s390_emit_stack_tie ();
10924 insn = emit_insn (gen_add2_insn (temp_reg,
10925 GEN_INT (cfun_frame_layout.f8_offset)));
10927 offset = 0;
10929 for (i = FPR8_REGNUM; i <= next_fpr; i++)
10930 if (cfun_fpr_save_p (i))
10932 rtx addr = plus_constant (Pmode, stack_pointer_rtx,
10933 cfun_frame_layout.frame_size
10934 + cfun_frame_layout.f8_offset
10935 + offset);
10937 insn = save_fpr (temp_reg, offset, i);
10938 offset += 8;
10939 RTX_FRAME_RELATED_P (insn) = 1;
10940 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
10941 gen_rtx_SET (gen_rtx_MEM (DFmode, addr),
10942 gen_rtx_REG (DFmode, i)));
10946 /* Set frame pointer, if needed. */
10948 if (frame_pointer_needed)
10950 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
10951 RTX_FRAME_RELATED_P (insn) = 1;
10954 /* Set up got pointer, if needed. */
10956 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
10958 rtx_insn *insns = s390_load_got ();
10960 for (rtx_insn *insn = insns; insn; insn = NEXT_INSN (insn))
10961 annotate_constant_pool_refs (&PATTERN (insn));
10963 emit_insn (insns);
10966 if (TARGET_TPF_PROFILING)
10968 /* Generate a BAS instruction to serve as a function
10969 entry intercept to facilitate the use of tracing
10970 algorithms located at the branch target. */
10971 emit_insn (gen_prologue_tpf ());
10973 /* Emit a blockage here so that all code
10974 lies between the profiling mechanisms. */
10975 emit_insn (gen_blockage ());
10979 /* Expand the epilogue into a bunch of separate insns. */
10981 void
10982 s390_emit_epilogue (bool sibcall)
10984 rtx frame_pointer, return_reg, cfa_restores = NULL_RTX;
10985 int area_bottom, area_top, offset = 0;
10986 int next_offset;
10987 rtvec p;
10988 int i;
10990 if (TARGET_TPF_PROFILING)
10993 /* Generate a BAS instruction to serve as a function
10994 entry intercept to facilitate the use of tracing
10995 algorithms located at the branch target. */
10997 /* Emit a blockage here so that all code
10998 lies between the profiling mechanisms. */
10999 emit_insn (gen_blockage ());
11001 emit_insn (gen_epilogue_tpf ());
11004 /* Check whether to use frame or stack pointer for restore. */
11006 frame_pointer = (frame_pointer_needed
11007 ? hard_frame_pointer_rtx : stack_pointer_rtx);
11009 s390_frame_area (&area_bottom, &area_top);
11011 /* Check whether we can access the register save area.
11012 If not, increment the frame pointer as required. */
11014 if (area_top <= area_bottom)
11016 /* Nothing to restore. */
11018 else if (DISP_IN_RANGE (cfun_frame_layout.frame_size + area_bottom)
11019 && DISP_IN_RANGE (cfun_frame_layout.frame_size + area_top - 1))
11021 /* Area is in range. */
11022 offset = cfun_frame_layout.frame_size;
11024 else
11026 rtx insn, frame_off, cfa;
11028 offset = area_bottom < 0 ? -area_bottom : 0;
11029 frame_off = GEN_INT (cfun_frame_layout.frame_size - offset);
11031 cfa = gen_rtx_SET (frame_pointer,
11032 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
11033 if (DISP_IN_RANGE (INTVAL (frame_off)))
11035 insn = gen_rtx_SET (frame_pointer,
11036 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
11037 insn = emit_insn (insn);
11039 else
11041 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
11042 frame_off = force_const_mem (Pmode, frame_off);
11044 insn = emit_insn (gen_add2_insn (frame_pointer, frame_off));
11045 annotate_constant_pool_refs (&PATTERN (insn));
11047 add_reg_note (insn, REG_CFA_ADJUST_CFA, cfa);
11048 RTX_FRAME_RELATED_P (insn) = 1;
11051 /* Restore call saved fprs. */
11053 if (TARGET_64BIT)
11055 if (cfun_save_high_fprs_p)
11057 next_offset = cfun_frame_layout.f8_offset;
11058 for (i = FPR8_REGNUM; i <= FPR15_REGNUM; i++)
11060 if (cfun_fpr_save_p (i))
11062 restore_fpr (frame_pointer,
11063 offset + next_offset, i);
11064 cfa_restores
11065 = alloc_reg_note (REG_CFA_RESTORE,
11066 gen_rtx_REG (DFmode, i), cfa_restores);
11067 next_offset += 8;
11073 else
11075 next_offset = cfun_frame_layout.f4_offset;
11076 /* f4, f6 */
11077 for (i = FPR4_REGNUM; i <= FPR4_REGNUM + 1; i++)
11079 if (cfun_fpr_save_p (i))
11081 restore_fpr (frame_pointer,
11082 offset + next_offset, i);
11083 cfa_restores
11084 = alloc_reg_note (REG_CFA_RESTORE,
11085 gen_rtx_REG (DFmode, i), cfa_restores);
11086 next_offset += 8;
11088 else if (!TARGET_PACKED_STACK)
11089 next_offset += 8;
11094 /* Return register. */
11096 return_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
11098 /* Restore call saved gprs. */
11100 if (cfun_frame_layout.first_restore_gpr != -1)
11102 rtx insn, addr;
11103 int i;
11105 /* Check for global register and save them
11106 to stack location from where they get restored. */
11108 for (i = cfun_frame_layout.first_restore_gpr;
11109 i <= cfun_frame_layout.last_restore_gpr;
11110 i++)
11112 if (global_not_special_regno_p (i))
11114 addr = plus_constant (Pmode, frame_pointer,
11115 offset + cfun_frame_layout.gprs_offset
11116 + (i - cfun_frame_layout.first_save_gpr_slot)
11117 * UNITS_PER_LONG);
11118 addr = gen_rtx_MEM (Pmode, addr);
11119 set_mem_alias_set (addr, get_frame_alias_set ());
11120 emit_move_insn (addr, gen_rtx_REG (Pmode, i));
11122 else
11123 cfa_restores
11124 = alloc_reg_note (REG_CFA_RESTORE,
11125 gen_rtx_REG (Pmode, i), cfa_restores);
11128 if (! sibcall)
11130 /* Fetch return address from stack before load multiple,
11131 this will do good for scheduling.
11133 Only do this if we already decided that r14 needs to be
11134 saved to a stack slot. (And not just because r14 happens to
11135 be in between two GPRs which need saving.) Otherwise it
11136 would be difficult to take that decision back in
11137 s390_optimize_prologue. */
11138 if (cfun_gpr_save_slot (RETURN_REGNUM) == SAVE_SLOT_STACK)
11140 int return_regnum = find_unused_clobbered_reg();
11141 if (!return_regnum)
11142 return_regnum = 4;
11143 return_reg = gen_rtx_REG (Pmode, return_regnum);
11145 addr = plus_constant (Pmode, frame_pointer,
11146 offset + cfun_frame_layout.gprs_offset
11147 + (RETURN_REGNUM
11148 - cfun_frame_layout.first_save_gpr_slot)
11149 * UNITS_PER_LONG);
11150 addr = gen_rtx_MEM (Pmode, addr);
11151 set_mem_alias_set (addr, get_frame_alias_set ());
11152 emit_move_insn (return_reg, addr);
11154 /* Once we did that optimization we have to make sure
11155 s390_optimize_prologue does not try to remove the
11156 store of r14 since we will not be able to find the
11157 load issued here. */
11158 cfun_frame_layout.save_return_addr_p = true;
11162 insn = restore_gprs (frame_pointer,
11163 offset + cfun_frame_layout.gprs_offset
11164 + (cfun_frame_layout.first_restore_gpr
11165 - cfun_frame_layout.first_save_gpr_slot)
11166 * UNITS_PER_LONG,
11167 cfun_frame_layout.first_restore_gpr,
11168 cfun_frame_layout.last_restore_gpr);
11169 insn = emit_insn (insn);
11170 REG_NOTES (insn) = cfa_restores;
11171 add_reg_note (insn, REG_CFA_DEF_CFA,
11172 plus_constant (Pmode, stack_pointer_rtx,
11173 STACK_POINTER_OFFSET));
11174 RTX_FRAME_RELATED_P (insn) = 1;
11177 s390_restore_gprs_from_fprs ();
11179 if (! sibcall)
11182 /* Return to caller. */
11184 p = rtvec_alloc (2);
11186 RTVEC_ELT (p, 0) = ret_rtx;
11187 RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode, return_reg);
11188 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
11192 /* Implement TARGET_SET_UP_BY_PROLOGUE. */
11194 static void
11195 s300_set_up_by_prologue (hard_reg_set_container *regs)
11197 if (cfun->machine->base_reg
11198 && !call_really_used_regs[REGNO (cfun->machine->base_reg)])
11199 SET_HARD_REG_BIT (regs->set, REGNO (cfun->machine->base_reg));
11202 /* -fsplit-stack support. */
11204 /* A SYMBOL_REF for __morestack. */
11205 static GTY(()) rtx morestack_ref;
11207 /* When using -fsplit-stack, the allocation routines set a field in
11208 the TCB to the bottom of the stack plus this much space, measured
11209 in bytes. */
11211 #define SPLIT_STACK_AVAILABLE 1024
11213 /* Emit -fsplit-stack prologue, which goes before the regular function
11214 prologue. */
11216 void
11217 s390_expand_split_stack_prologue (void)
11219 rtx r1, guard, cc = NULL;
11220 rtx_insn *insn;
11221 /* Offset from thread pointer to __private_ss. */
11222 int psso = TARGET_64BIT ? 0x38 : 0x20;
11223 /* Pointer size in bytes. */
11224 /* Frame size and argument size - the two parameters to __morestack. */
11225 HOST_WIDE_INT frame_size = cfun_frame_layout.frame_size;
11226 /* Align argument size to 8 bytes - simplifies __morestack code. */
11227 HOST_WIDE_INT args_size = crtl->args.size >= 0
11228 ? ((crtl->args.size + 7) & ~7)
11229 : 0;
11230 /* Label to be called by __morestack. */
11231 rtx_code_label *call_done = NULL;
11232 rtx_code_label *parm_base = NULL;
11233 rtx tmp;
11235 gcc_assert (flag_split_stack && reload_completed);
11236 if (!TARGET_CPU_ZARCH)
11238 sorry ("CPUs older than z900 are not supported for -fsplit-stack");
11239 return;
11242 r1 = gen_rtx_REG (Pmode, 1);
11244 /* If no stack frame will be allocated, don't do anything. */
11245 if (!frame_size)
11247 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
11249 /* If va_start is used, just use r15. */
11250 emit_move_insn (r1,
11251 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
11252 GEN_INT (STACK_POINTER_OFFSET)));
11255 return;
11258 if (morestack_ref == NULL_RTX)
11260 morestack_ref = gen_rtx_SYMBOL_REF (Pmode, "__morestack");
11261 SYMBOL_REF_FLAGS (morestack_ref) |= (SYMBOL_FLAG_LOCAL
11262 | SYMBOL_FLAG_FUNCTION);
11265 if (CONST_OK_FOR_K (frame_size) || CONST_OK_FOR_Op (frame_size))
11267 /* If frame_size will fit in an add instruction, do a stack space
11268 check, and only call __morestack if there's not enough space. */
11270 /* Get thread pointer. r1 is the only register we can always destroy - r0
11271 could contain a static chain (and cannot be used to address memory
11272 anyway), r2-r6 can contain parameters, and r6-r15 are callee-saved. */
11273 emit_move_insn (r1, gen_rtx_REG (Pmode, TP_REGNUM));
11274 /* Aim at __private_ss. */
11275 guard = gen_rtx_MEM (Pmode, plus_constant (Pmode, r1, psso));
11277 /* If less that 1kiB used, skip addition and compare directly with
11278 __private_ss. */
11279 if (frame_size > SPLIT_STACK_AVAILABLE)
11281 emit_move_insn (r1, guard);
11282 if (TARGET_64BIT)
11283 emit_insn (gen_adddi3 (r1, r1, GEN_INT (frame_size)));
11284 else
11285 emit_insn (gen_addsi3 (r1, r1, GEN_INT (frame_size)));
11286 guard = r1;
11289 /* Compare the (maybe adjusted) guard with the stack pointer. */
11290 cc = s390_emit_compare (LT, stack_pointer_rtx, guard);
11293 call_done = gen_label_rtx ();
11294 parm_base = gen_label_rtx ();
11296 /* Emit the parameter block. */
11297 tmp = gen_split_stack_data (parm_base, call_done,
11298 GEN_INT (frame_size),
11299 GEN_INT (args_size));
11300 insn = emit_insn (tmp);
11301 add_reg_note (insn, REG_LABEL_OPERAND, call_done);
11302 LABEL_NUSES (call_done)++;
11303 add_reg_note (insn, REG_LABEL_OPERAND, parm_base);
11304 LABEL_NUSES (parm_base)++;
11306 /* %r1 = litbase. */
11307 insn = emit_move_insn (r1, gen_rtx_LABEL_REF (VOIDmode, parm_base));
11308 add_reg_note (insn, REG_LABEL_OPERAND, parm_base);
11309 LABEL_NUSES (parm_base)++;
11311 /* Now, we need to call __morestack. It has very special calling
11312 conventions: it preserves param/return/static chain registers for
11313 calling main function body, and looks for its own parameters at %r1. */
11315 if (cc != NULL)
11317 tmp = gen_split_stack_cond_call (morestack_ref, cc, call_done);
11319 insn = emit_jump_insn (tmp);
11320 JUMP_LABEL (insn) = call_done;
11321 LABEL_NUSES (call_done)++;
11323 /* Mark the jump as very unlikely to be taken. */
11324 add_int_reg_note (insn, REG_BR_PROB, REG_BR_PROB_BASE / 100);
11326 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
11328 /* If va_start is used, and __morestack was not called, just use
11329 r15. */
11330 emit_move_insn (r1,
11331 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
11332 GEN_INT (STACK_POINTER_OFFSET)));
11335 else
11337 tmp = gen_split_stack_call (morestack_ref, call_done);
11338 insn = emit_jump_insn (tmp);
11339 JUMP_LABEL (insn) = call_done;
11340 LABEL_NUSES (call_done)++;
11341 emit_barrier ();
11344 /* __morestack will call us here. */
11346 emit_label (call_done);
11349 /* We may have to tell the dataflow pass that the split stack prologue
11350 is initializing a register. */
11352 static void
11353 s390_live_on_entry (bitmap regs)
11355 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
11357 gcc_assert (flag_split_stack);
11358 bitmap_set_bit (regs, 1);
11362 /* Return true if the function can use simple_return to return outside
11363 of a shrink-wrapped region. At present shrink-wrapping is supported
11364 in all cases. */
11366 bool
11367 s390_can_use_simple_return_insn (void)
11369 return true;
11372 /* Return true if the epilogue is guaranteed to contain only a return
11373 instruction and if a direct return can therefore be used instead.
11374 One of the main advantages of using direct return instructions
11375 is that we can then use conditional returns. */
11377 bool
11378 s390_can_use_return_insn (void)
11380 int i;
11382 if (!reload_completed)
11383 return false;
11385 if (crtl->profile)
11386 return false;
11388 if (TARGET_TPF_PROFILING)
11389 return false;
11391 for (i = 0; i < 16; i++)
11392 if (cfun_gpr_save_slot (i) != SAVE_SLOT_NONE)
11393 return false;
11395 /* For 31 bit this is not covered by the frame_size check below
11396 since f4, f6 are saved in the register save area without needing
11397 additional stack space. */
11398 if (!TARGET_64BIT
11399 && (cfun_fpr_save_p (FPR4_REGNUM) || cfun_fpr_save_p (FPR6_REGNUM)))
11400 return false;
11402 if (cfun->machine->base_reg
11403 && !call_really_used_regs[REGNO (cfun->machine->base_reg)])
11404 return false;
11406 return cfun_frame_layout.frame_size == 0;
11409 /* The VX ABI differs for vararg functions. Therefore we need the
11410 prototype of the callee to be available when passing vector type
11411 values. */
11412 static const char *
11413 s390_invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
11415 return ((TARGET_VX_ABI
11416 && typelist == 0
11417 && VECTOR_TYPE_P (TREE_TYPE (val))
11418 && (funcdecl == NULL_TREE
11419 || (TREE_CODE (funcdecl) == FUNCTION_DECL
11420 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
11421 ? N_("Vector argument passed to unprototyped function")
11422 : NULL);
11426 /* Return the size in bytes of a function argument of
11427 type TYPE and/or mode MODE. At least one of TYPE or
11428 MODE must be specified. */
11430 static int
11431 s390_function_arg_size (machine_mode mode, const_tree type)
11433 if (type)
11434 return int_size_in_bytes (type);
11436 /* No type info available for some library calls ... */
11437 if (mode != BLKmode)
11438 return GET_MODE_SIZE (mode);
11440 /* If we have neither type nor mode, abort */
11441 gcc_unreachable ();
11444 /* Return true if a function argument of type TYPE and mode MODE
11445 is to be passed in a vector register, if available. */
11447 bool
11448 s390_function_arg_vector (machine_mode mode, const_tree type)
11450 if (!TARGET_VX_ABI)
11451 return false;
11453 if (s390_function_arg_size (mode, type) > 16)
11454 return false;
11456 /* No type info available for some library calls ... */
11457 if (!type)
11458 return VECTOR_MODE_P (mode);
11460 /* The ABI says that record types with a single member are treated
11461 just like that member would be. */
11462 while (TREE_CODE (type) == RECORD_TYPE)
11464 tree field, single = NULL_TREE;
11466 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
11468 if (TREE_CODE (field) != FIELD_DECL)
11469 continue;
11471 if (single == NULL_TREE)
11472 single = TREE_TYPE (field);
11473 else
11474 return false;
11477 if (single == NULL_TREE)
11478 return false;
11479 else
11481 /* If the field declaration adds extra byte due to
11482 e.g. padding this is not accepted as vector type. */
11483 if (int_size_in_bytes (single) <= 0
11484 || int_size_in_bytes (single) != int_size_in_bytes (type))
11485 return false;
11486 type = single;
11490 return VECTOR_TYPE_P (type);
11493 /* Return true if a function argument of type TYPE and mode MODE
11494 is to be passed in a floating-point register, if available. */
11496 static bool
11497 s390_function_arg_float (machine_mode mode, const_tree type)
11499 if (s390_function_arg_size (mode, type) > 8)
11500 return false;
11502 /* Soft-float changes the ABI: no floating-point registers are used. */
11503 if (TARGET_SOFT_FLOAT)
11504 return false;
11506 /* No type info available for some library calls ... */
11507 if (!type)
11508 return mode == SFmode || mode == DFmode || mode == SDmode || mode == DDmode;
11510 /* The ABI says that record types with a single member are treated
11511 just like that member would be. */
11512 while (TREE_CODE (type) == RECORD_TYPE)
11514 tree field, single = NULL_TREE;
11516 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
11518 if (TREE_CODE (field) != FIELD_DECL)
11519 continue;
11521 if (single == NULL_TREE)
11522 single = TREE_TYPE (field);
11523 else
11524 return false;
11527 if (single == NULL_TREE)
11528 return false;
11529 else
11530 type = single;
11533 return TREE_CODE (type) == REAL_TYPE;
11536 /* Return true if a function argument of type TYPE and mode MODE
11537 is to be passed in an integer register, or a pair of integer
11538 registers, if available. */
11540 static bool
11541 s390_function_arg_integer (machine_mode mode, const_tree type)
11543 int size = s390_function_arg_size (mode, type);
11544 if (size > 8)
11545 return false;
11547 /* No type info available for some library calls ... */
11548 if (!type)
11549 return GET_MODE_CLASS (mode) == MODE_INT
11550 || (TARGET_SOFT_FLOAT && SCALAR_FLOAT_MODE_P (mode));
11552 /* We accept small integral (and similar) types. */
11553 if (INTEGRAL_TYPE_P (type)
11554 || POINTER_TYPE_P (type)
11555 || TREE_CODE (type) == NULLPTR_TYPE
11556 || TREE_CODE (type) == OFFSET_TYPE
11557 || (TARGET_SOFT_FLOAT && TREE_CODE (type) == REAL_TYPE))
11558 return true;
11560 /* We also accept structs of size 1, 2, 4, 8 that are not
11561 passed in floating-point registers. */
11562 if (AGGREGATE_TYPE_P (type)
11563 && exact_log2 (size) >= 0
11564 && !s390_function_arg_float (mode, type))
11565 return true;
11567 return false;
11570 /* Return 1 if a function argument of type TYPE and mode MODE
11571 is to be passed by reference. The ABI specifies that only
11572 structures of size 1, 2, 4, or 8 bytes are passed by value,
11573 all other structures (and complex numbers) are passed by
11574 reference. */
11576 static bool
11577 s390_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
11578 machine_mode mode, const_tree type,
11579 bool named ATTRIBUTE_UNUSED)
11581 int size = s390_function_arg_size (mode, type);
11583 if (s390_function_arg_vector (mode, type))
11584 return false;
11586 if (size > 8)
11587 return true;
11589 if (type)
11591 if (AGGREGATE_TYPE_P (type) && exact_log2 (size) < 0)
11592 return true;
11594 if (TREE_CODE (type) == COMPLEX_TYPE
11595 || TREE_CODE (type) == VECTOR_TYPE)
11596 return true;
11599 return false;
11602 /* Update the data in CUM to advance over an argument of mode MODE and
11603 data type TYPE. (TYPE is null for libcalls where that information
11604 may not be available.). The boolean NAMED specifies whether the
11605 argument is a named argument (as opposed to an unnamed argument
11606 matching an ellipsis). */
11608 static void
11609 s390_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
11610 const_tree type, bool named)
11612 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
11614 if (s390_function_arg_vector (mode, type))
11616 /* We are called for unnamed vector stdarg arguments which are
11617 passed on the stack. In this case this hook does not have to
11618 do anything since stack arguments are tracked by common
11619 code. */
11620 if (!named)
11621 return;
11622 cum->vrs += 1;
11624 else if (s390_function_arg_float (mode, type))
11626 cum->fprs += 1;
11628 else if (s390_function_arg_integer (mode, type))
11630 int size = s390_function_arg_size (mode, type);
11631 cum->gprs += ((size + UNITS_PER_LONG - 1) / UNITS_PER_LONG);
11633 else
11634 gcc_unreachable ();
11637 /* Define where to put the arguments to a function.
11638 Value is zero to push the argument on the stack,
11639 or a hard register in which to store the argument.
11641 MODE is the argument's machine mode.
11642 TYPE is the data type of the argument (as a tree).
11643 This is null for libcalls where that information may
11644 not be available.
11645 CUM is a variable of type CUMULATIVE_ARGS which gives info about
11646 the preceding args and about the function being called.
11647 NAMED is nonzero if this argument is a named parameter
11648 (otherwise it is an extra parameter matching an ellipsis).
11650 On S/390, we use general purpose registers 2 through 6 to
11651 pass integer, pointer, and certain structure arguments, and
11652 floating point registers 0 and 2 (0, 2, 4, and 6 on 64-bit)
11653 to pass floating point arguments. All remaining arguments
11654 are pushed to the stack. */
11656 static rtx
11657 s390_function_arg (cumulative_args_t cum_v, machine_mode mode,
11658 const_tree type, bool named)
11660 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
11662 if (!named)
11663 s390_check_type_for_vector_abi (type, true, false);
11665 if (s390_function_arg_vector (mode, type))
11667 /* Vector arguments being part of the ellipsis are passed on the
11668 stack. */
11669 if (!named || (cum->vrs + 1 > VEC_ARG_NUM_REG))
11670 return NULL_RTX;
11672 return gen_rtx_REG (mode, cum->vrs + FIRST_VEC_ARG_REGNO);
11674 else if (s390_function_arg_float (mode, type))
11676 if (cum->fprs + 1 > FP_ARG_NUM_REG)
11677 return NULL_RTX;
11678 else
11679 return gen_rtx_REG (mode, cum->fprs + 16);
11681 else if (s390_function_arg_integer (mode, type))
11683 int size = s390_function_arg_size (mode, type);
11684 int n_gprs = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
11686 if (cum->gprs + n_gprs > GP_ARG_NUM_REG)
11687 return NULL_RTX;
11688 else if (n_gprs == 1 || UNITS_PER_WORD == UNITS_PER_LONG)
11689 return gen_rtx_REG (mode, cum->gprs + 2);
11690 else if (n_gprs == 2)
11692 rtvec p = rtvec_alloc (2);
11694 RTVEC_ELT (p, 0)
11695 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 2),
11696 const0_rtx);
11697 RTVEC_ELT (p, 1)
11698 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 3),
11699 GEN_INT (4));
11701 return gen_rtx_PARALLEL (mode, p);
11705 /* After the real arguments, expand_call calls us once again
11706 with a void_type_node type. Whatever we return here is
11707 passed as operand 2 to the call expanders.
11709 We don't need this feature ... */
11710 else if (type == void_type_node)
11711 return const0_rtx;
11713 gcc_unreachable ();
11716 /* Return true if return values of type TYPE should be returned
11717 in a memory buffer whose address is passed by the caller as
11718 hidden first argument. */
11720 static bool
11721 s390_return_in_memory (const_tree type, const_tree fundecl ATTRIBUTE_UNUSED)
11723 /* We accept small integral (and similar) types. */
11724 if (INTEGRAL_TYPE_P (type)
11725 || POINTER_TYPE_P (type)
11726 || TREE_CODE (type) == OFFSET_TYPE
11727 || TREE_CODE (type) == REAL_TYPE)
11728 return int_size_in_bytes (type) > 8;
11730 /* vector types which fit into a VR. */
11731 if (TARGET_VX_ABI
11732 && VECTOR_TYPE_P (type)
11733 && int_size_in_bytes (type) <= 16)
11734 return false;
11736 /* Aggregates and similar constructs are always returned
11737 in memory. */
11738 if (AGGREGATE_TYPE_P (type)
11739 || TREE_CODE (type) == COMPLEX_TYPE
11740 || VECTOR_TYPE_P (type))
11741 return true;
11743 /* ??? We get called on all sorts of random stuff from
11744 aggregate_value_p. We can't abort, but it's not clear
11745 what's safe to return. Pretend it's a struct I guess. */
11746 return true;
11749 /* Function arguments and return values are promoted to word size. */
11751 static machine_mode
11752 s390_promote_function_mode (const_tree type, machine_mode mode,
11753 int *punsignedp,
11754 const_tree fntype ATTRIBUTE_UNUSED,
11755 int for_return ATTRIBUTE_UNUSED)
11757 if (INTEGRAL_MODE_P (mode)
11758 && GET_MODE_SIZE (mode) < UNITS_PER_LONG)
11760 if (type != NULL_TREE && POINTER_TYPE_P (type))
11761 *punsignedp = POINTERS_EXTEND_UNSIGNED;
11762 return Pmode;
11765 return mode;
11768 /* Define where to return a (scalar) value of type RET_TYPE.
11769 If RET_TYPE is null, define where to return a (scalar)
11770 value of mode MODE from a libcall. */
11772 static rtx
11773 s390_function_and_libcall_value (machine_mode mode,
11774 const_tree ret_type,
11775 const_tree fntype_or_decl,
11776 bool outgoing ATTRIBUTE_UNUSED)
11778 /* For vector return types it is important to use the RET_TYPE
11779 argument whenever available since the middle-end might have
11780 changed the mode to a scalar mode. */
11781 bool vector_ret_type_p = ((ret_type && VECTOR_TYPE_P (ret_type))
11782 || (!ret_type && VECTOR_MODE_P (mode)));
11784 /* For normal functions perform the promotion as
11785 promote_function_mode would do. */
11786 if (ret_type)
11788 int unsignedp = TYPE_UNSIGNED (ret_type);
11789 mode = promote_function_mode (ret_type, mode, &unsignedp,
11790 fntype_or_decl, 1);
11793 gcc_assert (GET_MODE_CLASS (mode) == MODE_INT
11794 || SCALAR_FLOAT_MODE_P (mode)
11795 || (TARGET_VX_ABI && vector_ret_type_p));
11796 gcc_assert (GET_MODE_SIZE (mode) <= (TARGET_VX_ABI ? 16 : 8));
11798 if (TARGET_VX_ABI && vector_ret_type_p)
11799 return gen_rtx_REG (mode, FIRST_VEC_ARG_REGNO);
11800 else if (TARGET_HARD_FLOAT && SCALAR_FLOAT_MODE_P (mode))
11801 return gen_rtx_REG (mode, 16);
11802 else if (GET_MODE_SIZE (mode) <= UNITS_PER_LONG
11803 || UNITS_PER_LONG == UNITS_PER_WORD)
11804 return gen_rtx_REG (mode, 2);
11805 else if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_LONG)
11807 /* This case is triggered when returning a 64 bit value with
11808 -m31 -mzarch. Although the value would fit into a single
11809 register it has to be forced into a 32 bit register pair in
11810 order to match the ABI. */
11811 rtvec p = rtvec_alloc (2);
11813 RTVEC_ELT (p, 0)
11814 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 2), const0_rtx);
11815 RTVEC_ELT (p, 1)
11816 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 3), GEN_INT (4));
11818 return gen_rtx_PARALLEL (mode, p);
11821 gcc_unreachable ();
11824 /* Define where to return a scalar return value of type RET_TYPE. */
11826 static rtx
11827 s390_function_value (const_tree ret_type, const_tree fn_decl_or_type,
11828 bool outgoing)
11830 return s390_function_and_libcall_value (TYPE_MODE (ret_type), ret_type,
11831 fn_decl_or_type, outgoing);
11834 /* Define where to return a scalar libcall return value of mode
11835 MODE. */
11837 static rtx
11838 s390_libcall_value (machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
11840 return s390_function_and_libcall_value (mode, NULL_TREE,
11841 NULL_TREE, true);
11845 /* Create and return the va_list datatype.
11847 On S/390, va_list is an array type equivalent to
11849 typedef struct __va_list_tag
11851 long __gpr;
11852 long __fpr;
11853 void *__overflow_arg_area;
11854 void *__reg_save_area;
11855 } va_list[1];
11857 where __gpr and __fpr hold the number of general purpose
11858 or floating point arguments used up to now, respectively,
11859 __overflow_arg_area points to the stack location of the
11860 next argument passed on the stack, and __reg_save_area
11861 always points to the start of the register area in the
11862 call frame of the current function. The function prologue
11863 saves all registers used for argument passing into this
11864 area if the function uses variable arguments. */
11866 static tree
11867 s390_build_builtin_va_list (void)
11869 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
11871 record = lang_hooks.types.make_type (RECORD_TYPE);
11873 type_decl =
11874 build_decl (BUILTINS_LOCATION,
11875 TYPE_DECL, get_identifier ("__va_list_tag"), record);
11877 f_gpr = build_decl (BUILTINS_LOCATION,
11878 FIELD_DECL, get_identifier ("__gpr"),
11879 long_integer_type_node);
11880 f_fpr = build_decl (BUILTINS_LOCATION,
11881 FIELD_DECL, get_identifier ("__fpr"),
11882 long_integer_type_node);
11883 f_ovf = build_decl (BUILTINS_LOCATION,
11884 FIELD_DECL, get_identifier ("__overflow_arg_area"),
11885 ptr_type_node);
11886 f_sav = build_decl (BUILTINS_LOCATION,
11887 FIELD_DECL, get_identifier ("__reg_save_area"),
11888 ptr_type_node);
11890 va_list_gpr_counter_field = f_gpr;
11891 va_list_fpr_counter_field = f_fpr;
11893 DECL_FIELD_CONTEXT (f_gpr) = record;
11894 DECL_FIELD_CONTEXT (f_fpr) = record;
11895 DECL_FIELD_CONTEXT (f_ovf) = record;
11896 DECL_FIELD_CONTEXT (f_sav) = record;
11898 TYPE_STUB_DECL (record) = type_decl;
11899 TYPE_NAME (record) = type_decl;
11900 TYPE_FIELDS (record) = f_gpr;
11901 DECL_CHAIN (f_gpr) = f_fpr;
11902 DECL_CHAIN (f_fpr) = f_ovf;
11903 DECL_CHAIN (f_ovf) = f_sav;
11905 layout_type (record);
11907 /* The correct type is an array type of one element. */
11908 return build_array_type (record, build_index_type (size_zero_node));
11911 /* Implement va_start by filling the va_list structure VALIST.
11912 STDARG_P is always true, and ignored.
11913 NEXTARG points to the first anonymous stack argument.
11915 The following global variables are used to initialize
11916 the va_list structure:
11918 crtl->args.info:
11919 holds number of gprs and fprs used for named arguments.
11920 crtl->args.arg_offset_rtx:
11921 holds the offset of the first anonymous stack argument
11922 (relative to the virtual arg pointer). */
11924 static void
11925 s390_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
11927 HOST_WIDE_INT n_gpr, n_fpr;
11928 int off;
11929 tree f_gpr, f_fpr, f_ovf, f_sav;
11930 tree gpr, fpr, ovf, sav, t;
11932 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
11933 f_fpr = DECL_CHAIN (f_gpr);
11934 f_ovf = DECL_CHAIN (f_fpr);
11935 f_sav = DECL_CHAIN (f_ovf);
11937 valist = build_simple_mem_ref (valist);
11938 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
11939 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
11940 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
11941 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
11943 /* Count number of gp and fp argument registers used. */
11945 n_gpr = crtl->args.info.gprs;
11946 n_fpr = crtl->args.info.fprs;
11948 if (cfun->va_list_gpr_size)
11950 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
11951 build_int_cst (NULL_TREE, n_gpr));
11952 TREE_SIDE_EFFECTS (t) = 1;
11953 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
11956 if (cfun->va_list_fpr_size)
11958 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
11959 build_int_cst (NULL_TREE, n_fpr));
11960 TREE_SIDE_EFFECTS (t) = 1;
11961 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
11964 if (flag_split_stack
11965 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun->decl))
11966 == NULL)
11967 && cfun->machine->split_stack_varargs_pointer == NULL_RTX)
11969 rtx reg;
11970 rtx_insn *seq;
11972 reg = gen_reg_rtx (Pmode);
11973 cfun->machine->split_stack_varargs_pointer = reg;
11975 start_sequence ();
11976 emit_move_insn (reg, gen_rtx_REG (Pmode, 1));
11977 seq = get_insns ();
11978 end_sequence ();
11980 push_topmost_sequence ();
11981 emit_insn_after (seq, entry_of_function ());
11982 pop_topmost_sequence ();
11985 /* Find the overflow area.
11986 FIXME: This currently is too pessimistic when the vector ABI is
11987 enabled. In that case we *always* set up the overflow area
11988 pointer. */
11989 if (n_gpr + cfun->va_list_gpr_size > GP_ARG_NUM_REG
11990 || n_fpr + cfun->va_list_fpr_size > FP_ARG_NUM_REG
11991 || TARGET_VX_ABI)
11993 if (cfun->machine->split_stack_varargs_pointer == NULL_RTX)
11994 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
11995 else
11996 t = make_tree (TREE_TYPE (ovf), cfun->machine->split_stack_varargs_pointer);
11998 off = INTVAL (crtl->args.arg_offset_rtx);
11999 off = off < 0 ? 0 : off;
12000 if (TARGET_DEBUG_ARG)
12001 fprintf (stderr, "va_start: n_gpr = %d, n_fpr = %d off %d\n",
12002 (int)n_gpr, (int)n_fpr, off);
12004 t = fold_build_pointer_plus_hwi (t, off);
12006 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
12007 TREE_SIDE_EFFECTS (t) = 1;
12008 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12011 /* Find the register save area. */
12012 if ((cfun->va_list_gpr_size && n_gpr < GP_ARG_NUM_REG)
12013 || (cfun->va_list_fpr_size && n_fpr < FP_ARG_NUM_REG))
12015 t = make_tree (TREE_TYPE (sav), return_address_pointer_rtx);
12016 t = fold_build_pointer_plus_hwi (t, -RETURN_REGNUM * UNITS_PER_LONG);
12018 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
12019 TREE_SIDE_EFFECTS (t) = 1;
12020 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12024 /* Implement va_arg by updating the va_list structure
12025 VALIST as required to retrieve an argument of type
12026 TYPE, and returning that argument.
12028 Generates code equivalent to:
12030 if (integral value) {
12031 if (size <= 4 && args.gpr < 5 ||
12032 size > 4 && args.gpr < 4 )
12033 ret = args.reg_save_area[args.gpr+8]
12034 else
12035 ret = *args.overflow_arg_area++;
12036 } else if (vector value) {
12037 ret = *args.overflow_arg_area;
12038 args.overflow_arg_area += size / 8;
12039 } else if (float value) {
12040 if (args.fgpr < 2)
12041 ret = args.reg_save_area[args.fpr+64]
12042 else
12043 ret = *args.overflow_arg_area++;
12044 } else if (aggregate value) {
12045 if (args.gpr < 5)
12046 ret = *args.reg_save_area[args.gpr]
12047 else
12048 ret = **args.overflow_arg_area++;
12049 } */
12051 static tree
12052 s390_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
12053 gimple_seq *post_p ATTRIBUTE_UNUSED)
12055 tree f_gpr, f_fpr, f_ovf, f_sav;
12056 tree gpr, fpr, ovf, sav, reg, t, u;
12057 int indirect_p, size, n_reg, sav_ofs, sav_scale, max_reg;
12058 tree lab_false, lab_over;
12059 tree addr = create_tmp_var (ptr_type_node, "addr");
12060 bool left_align_p; /* How a value < UNITS_PER_LONG is aligned within
12061 a stack slot. */
12063 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12064 f_fpr = DECL_CHAIN (f_gpr);
12065 f_ovf = DECL_CHAIN (f_fpr);
12066 f_sav = DECL_CHAIN (f_ovf);
12068 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12069 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
12070 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
12072 /* The tree for args* cannot be shared between gpr/fpr and ovf since
12073 both appear on a lhs. */
12074 valist = unshare_expr (valist);
12075 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
12077 size = int_size_in_bytes (type);
12079 s390_check_type_for_vector_abi (type, true, false);
12081 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
12083 if (TARGET_DEBUG_ARG)
12085 fprintf (stderr, "va_arg: aggregate type");
12086 debug_tree (type);
12089 /* Aggregates are passed by reference. */
12090 indirect_p = 1;
12091 reg = gpr;
12092 n_reg = 1;
12094 /* kernel stack layout on 31 bit: It is assumed here that no padding
12095 will be added by s390_frame_info because for va_args always an even
12096 number of gprs has to be saved r15-r2 = 14 regs. */
12097 sav_ofs = 2 * UNITS_PER_LONG;
12098 sav_scale = UNITS_PER_LONG;
12099 size = UNITS_PER_LONG;
12100 max_reg = GP_ARG_NUM_REG - n_reg;
12101 left_align_p = false;
12103 else if (s390_function_arg_vector (TYPE_MODE (type), type))
12105 if (TARGET_DEBUG_ARG)
12107 fprintf (stderr, "va_arg: vector type");
12108 debug_tree (type);
12111 indirect_p = 0;
12112 reg = NULL_TREE;
12113 n_reg = 0;
12114 sav_ofs = 0;
12115 sav_scale = 8;
12116 max_reg = 0;
12117 left_align_p = true;
12119 else if (s390_function_arg_float (TYPE_MODE (type), type))
12121 if (TARGET_DEBUG_ARG)
12123 fprintf (stderr, "va_arg: float type");
12124 debug_tree (type);
12127 /* FP args go in FP registers, if present. */
12128 indirect_p = 0;
12129 reg = fpr;
12130 n_reg = 1;
12131 sav_ofs = 16 * UNITS_PER_LONG;
12132 sav_scale = 8;
12133 max_reg = FP_ARG_NUM_REG - n_reg;
12134 left_align_p = false;
12136 else
12138 if (TARGET_DEBUG_ARG)
12140 fprintf (stderr, "va_arg: other type");
12141 debug_tree (type);
12144 /* Otherwise into GP registers. */
12145 indirect_p = 0;
12146 reg = gpr;
12147 n_reg = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
12149 /* kernel stack layout on 31 bit: It is assumed here that no padding
12150 will be added by s390_frame_info because for va_args always an even
12151 number of gprs has to be saved r15-r2 = 14 regs. */
12152 sav_ofs = 2 * UNITS_PER_LONG;
12154 if (size < UNITS_PER_LONG)
12155 sav_ofs += UNITS_PER_LONG - size;
12157 sav_scale = UNITS_PER_LONG;
12158 max_reg = GP_ARG_NUM_REG - n_reg;
12159 left_align_p = false;
12162 /* Pull the value out of the saved registers ... */
12164 if (reg != NULL_TREE)
12167 if (reg > ((typeof (reg))max_reg))
12168 goto lab_false;
12170 addr = sav + sav_ofs + reg * save_scale;
12172 goto lab_over;
12174 lab_false:
12177 lab_false = create_artificial_label (UNKNOWN_LOCATION);
12178 lab_over = create_artificial_label (UNKNOWN_LOCATION);
12180 t = fold_convert (TREE_TYPE (reg), size_int (max_reg));
12181 t = build2 (GT_EXPR, boolean_type_node, reg, t);
12182 u = build1 (GOTO_EXPR, void_type_node, lab_false);
12183 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
12184 gimplify_and_add (t, pre_p);
12186 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
12187 u = build2 (MULT_EXPR, TREE_TYPE (reg), reg,
12188 fold_convert (TREE_TYPE (reg), size_int (sav_scale)));
12189 t = fold_build_pointer_plus (t, u);
12191 gimplify_assign (addr, t, pre_p);
12193 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
12195 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
12198 /* ... Otherwise out of the overflow area. */
12200 t = ovf;
12201 if (size < UNITS_PER_LONG && !left_align_p)
12202 t = fold_build_pointer_plus_hwi (t, UNITS_PER_LONG - size);
12204 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
12206 gimplify_assign (addr, t, pre_p);
12208 if (size < UNITS_PER_LONG && left_align_p)
12209 t = fold_build_pointer_plus_hwi (t, UNITS_PER_LONG);
12210 else
12211 t = fold_build_pointer_plus_hwi (t, size);
12213 gimplify_assign (ovf, t, pre_p);
12215 if (reg != NULL_TREE)
12216 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
12219 /* Increment register save count. */
12221 if (n_reg > 0)
12223 u = build2 (PREINCREMENT_EXPR, TREE_TYPE (reg), reg,
12224 fold_convert (TREE_TYPE (reg), size_int (n_reg)));
12225 gimplify_and_add (u, pre_p);
12228 if (indirect_p)
12230 t = build_pointer_type_for_mode (build_pointer_type (type),
12231 ptr_mode, true);
12232 addr = fold_convert (t, addr);
12233 addr = build_va_arg_indirect_ref (addr);
12235 else
12237 t = build_pointer_type_for_mode (type, ptr_mode, true);
12238 addr = fold_convert (t, addr);
12241 return build_va_arg_indirect_ref (addr);
12244 /* Emit rtl for the tbegin or tbegin_retry (RETRY != NULL_RTX)
12245 expanders.
12246 DEST - Register location where CC will be stored.
12247 TDB - Pointer to a 256 byte area where to store the transaction.
12248 diagnostic block. NULL if TDB is not needed.
12249 RETRY - Retry count value. If non-NULL a retry loop for CC2
12250 is emitted
12251 CLOBBER_FPRS_P - If true clobbers for all FPRs are emitted as part
12252 of the tbegin instruction pattern. */
12254 void
12255 s390_expand_tbegin (rtx dest, rtx tdb, rtx retry, bool clobber_fprs_p)
12257 rtx retry_plus_two = gen_reg_rtx (SImode);
12258 rtx retry_reg = gen_reg_rtx (SImode);
12259 rtx_code_label *retry_label = NULL;
12261 if (retry != NULL_RTX)
12263 emit_move_insn (retry_reg, retry);
12264 emit_insn (gen_addsi3 (retry_plus_two, retry_reg, const2_rtx));
12265 emit_insn (gen_addsi3 (retry_reg, retry_reg, const1_rtx));
12266 retry_label = gen_label_rtx ();
12267 emit_label (retry_label);
12270 if (clobber_fprs_p)
12272 if (TARGET_VX)
12273 emit_insn (gen_tbegin_1_z13 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
12274 tdb));
12275 else
12276 emit_insn (gen_tbegin_1 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
12277 tdb));
12279 else
12280 emit_insn (gen_tbegin_nofloat_1 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
12281 tdb));
12283 emit_move_insn (dest, gen_rtx_UNSPEC (SImode,
12284 gen_rtvec (1, gen_rtx_REG (CCRAWmode,
12285 CC_REGNUM)),
12286 UNSPEC_CC_TO_INT));
12287 if (retry != NULL_RTX)
12289 const int CC0 = 1 << 3;
12290 const int CC1 = 1 << 2;
12291 const int CC3 = 1 << 0;
12292 rtx jump;
12293 rtx count = gen_reg_rtx (SImode);
12294 rtx_code_label *leave_label = gen_label_rtx ();
12296 /* Exit for success and permanent failures. */
12297 jump = s390_emit_jump (leave_label,
12298 gen_rtx_EQ (VOIDmode,
12299 gen_rtx_REG (CCRAWmode, CC_REGNUM),
12300 gen_rtx_CONST_INT (VOIDmode, CC0 | CC1 | CC3)));
12301 LABEL_NUSES (leave_label) = 1;
12303 /* CC2 - transient failure. Perform retry with ppa. */
12304 emit_move_insn (count, retry_plus_two);
12305 emit_insn (gen_subsi3 (count, count, retry_reg));
12306 emit_insn (gen_tx_assist (count));
12307 jump = emit_jump_insn (gen_doloop_si64 (retry_label,
12308 retry_reg,
12309 retry_reg));
12310 JUMP_LABEL (jump) = retry_label;
12311 LABEL_NUSES (retry_label) = 1;
12312 emit_label (leave_label);
12317 /* Return the decl for the target specific builtin with the function
12318 code FCODE. */
12320 static tree
12321 s390_builtin_decl (unsigned fcode, bool initialized_p ATTRIBUTE_UNUSED)
12323 if (fcode >= S390_BUILTIN_MAX)
12324 return error_mark_node;
12326 return s390_builtin_decls[fcode];
12329 /* We call mcount before the function prologue. So a profiled leaf
12330 function should stay a leaf function. */
12332 static bool
12333 s390_keep_leaf_when_profiled ()
12335 return true;
12338 /* Output assembly code for the trampoline template to
12339 stdio stream FILE.
12341 On S/390, we use gpr 1 internally in the trampoline code;
12342 gpr 0 is used to hold the static chain. */
12344 static void
12345 s390_asm_trampoline_template (FILE *file)
12347 rtx op[2];
12348 op[0] = gen_rtx_REG (Pmode, 0);
12349 op[1] = gen_rtx_REG (Pmode, 1);
12351 if (TARGET_64BIT)
12353 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
12354 output_asm_insn ("lmg\t%0,%1,14(%1)", op); /* 6 byte */
12355 output_asm_insn ("br\t%1", op); /* 2 byte */
12356 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 10));
12358 else
12360 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
12361 output_asm_insn ("lm\t%0,%1,6(%1)", op); /* 4 byte */
12362 output_asm_insn ("br\t%1", op); /* 2 byte */
12363 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 8));
12367 /* Emit RTL insns to initialize the variable parts of a trampoline.
12368 FNADDR is an RTX for the address of the function's pure code.
12369 CXT is an RTX for the static chain value for the function. */
12371 static void
12372 s390_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
12374 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
12375 rtx mem;
12377 emit_block_move (m_tramp, assemble_trampoline_template (),
12378 GEN_INT (2 * UNITS_PER_LONG), BLOCK_OP_NORMAL);
12380 mem = adjust_address (m_tramp, Pmode, 2 * UNITS_PER_LONG);
12381 emit_move_insn (mem, cxt);
12382 mem = adjust_address (m_tramp, Pmode, 3 * UNITS_PER_LONG);
12383 emit_move_insn (mem, fnaddr);
12386 /* Output assembler code to FILE to increment profiler label # LABELNO
12387 for profiling a function entry. */
12389 void
12390 s390_function_profiler (FILE *file, int labelno)
12392 rtx op[7];
12394 char label[128];
12395 ASM_GENERATE_INTERNAL_LABEL (label, "LP", labelno);
12397 fprintf (file, "# function profiler \n");
12399 op[0] = gen_rtx_REG (Pmode, RETURN_REGNUM);
12400 op[1] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
12401 op[1] = gen_rtx_MEM (Pmode, plus_constant (Pmode, op[1], UNITS_PER_LONG));
12403 op[2] = gen_rtx_REG (Pmode, 1);
12404 op[3] = gen_rtx_SYMBOL_REF (Pmode, label);
12405 SYMBOL_REF_FLAGS (op[3]) = SYMBOL_FLAG_LOCAL;
12407 op[4] = gen_rtx_SYMBOL_REF (Pmode, "_mcount");
12408 if (flag_pic)
12410 op[4] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[4]), UNSPEC_PLT);
12411 op[4] = gen_rtx_CONST (Pmode, op[4]);
12414 if (TARGET_64BIT)
12416 output_asm_insn ("stg\t%0,%1", op);
12417 output_asm_insn ("larl\t%2,%3", op);
12418 output_asm_insn ("brasl\t%0,%4", op);
12419 output_asm_insn ("lg\t%0,%1", op);
12421 else if (TARGET_CPU_ZARCH)
12423 output_asm_insn ("st\t%0,%1", op);
12424 output_asm_insn ("larl\t%2,%3", op);
12425 output_asm_insn ("brasl\t%0,%4", op);
12426 output_asm_insn ("l\t%0,%1", op);
12428 else if (!flag_pic)
12430 op[6] = gen_label_rtx ();
12432 output_asm_insn ("st\t%0,%1", op);
12433 output_asm_insn ("bras\t%2,%l6", op);
12434 output_asm_insn (".long\t%4", op);
12435 output_asm_insn (".long\t%3", op);
12436 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
12437 output_asm_insn ("l\t%0,0(%2)", op);
12438 output_asm_insn ("l\t%2,4(%2)", op);
12439 output_asm_insn ("basr\t%0,%0", op);
12440 output_asm_insn ("l\t%0,%1", op);
12442 else
12444 op[5] = gen_label_rtx ();
12445 op[6] = gen_label_rtx ();
12447 output_asm_insn ("st\t%0,%1", op);
12448 output_asm_insn ("bras\t%2,%l6", op);
12449 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[5]));
12450 output_asm_insn (".long\t%4-%l5", op);
12451 output_asm_insn (".long\t%3-%l5", op);
12452 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
12453 output_asm_insn ("lr\t%0,%2", op);
12454 output_asm_insn ("a\t%0,0(%2)", op);
12455 output_asm_insn ("a\t%2,4(%2)", op);
12456 output_asm_insn ("basr\t%0,%0", op);
12457 output_asm_insn ("l\t%0,%1", op);
12461 /* Encode symbol attributes (local vs. global, tls model) of a SYMBOL_REF
12462 into its SYMBOL_REF_FLAGS. */
12464 static void
12465 s390_encode_section_info (tree decl, rtx rtl, int first)
12467 default_encode_section_info (decl, rtl, first);
12469 if (TREE_CODE (decl) == VAR_DECL)
12471 /* Store the alignment to be able to check if we can use
12472 a larl/load-relative instruction. We only handle the cases
12473 that can go wrong (i.e. no FUNC_DECLs). */
12474 if (DECL_ALIGN (decl) == 0 || DECL_ALIGN (decl) % 16)
12475 SYMBOL_FLAG_SET_NOTALIGN2 (XEXP (rtl, 0));
12476 else if (DECL_ALIGN (decl) % 32)
12477 SYMBOL_FLAG_SET_NOTALIGN4 (XEXP (rtl, 0));
12478 else if (DECL_ALIGN (decl) % 64)
12479 SYMBOL_FLAG_SET_NOTALIGN8 (XEXP (rtl, 0));
12482 /* Literal pool references don't have a decl so they are handled
12483 differently here. We rely on the information in the MEM_ALIGN
12484 entry to decide upon the alignment. */
12485 if (MEM_P (rtl)
12486 && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF
12487 && TREE_CONSTANT_POOL_ADDRESS_P (XEXP (rtl, 0)))
12489 if (MEM_ALIGN (rtl) == 0 || MEM_ALIGN (rtl) % 16)
12490 SYMBOL_FLAG_SET_NOTALIGN2 (XEXP (rtl, 0));
12491 else if (MEM_ALIGN (rtl) % 32)
12492 SYMBOL_FLAG_SET_NOTALIGN4 (XEXP (rtl, 0));
12493 else if (MEM_ALIGN (rtl) % 64)
12494 SYMBOL_FLAG_SET_NOTALIGN8 (XEXP (rtl, 0));
12498 /* Output thunk to FILE that implements a C++ virtual function call (with
12499 multiple inheritance) to FUNCTION. The thunk adjusts the this pointer
12500 by DELTA, and unless VCALL_OFFSET is zero, applies an additional adjustment
12501 stored at VCALL_OFFSET in the vtable whose address is located at offset 0
12502 relative to the resulting this pointer. */
12504 static void
12505 s390_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
12506 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
12507 tree function)
12509 rtx op[10];
12510 int nonlocal = 0;
12512 /* Make sure unwind info is emitted for the thunk if needed. */
12513 final_start_function (emit_barrier (), file, 1);
12515 /* Operand 0 is the target function. */
12516 op[0] = XEXP (DECL_RTL (function), 0);
12517 if (flag_pic && !SYMBOL_REF_LOCAL_P (op[0]))
12519 nonlocal = 1;
12520 op[0] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[0]),
12521 TARGET_64BIT ? UNSPEC_PLT : UNSPEC_GOT);
12522 op[0] = gen_rtx_CONST (Pmode, op[0]);
12525 /* Operand 1 is the 'this' pointer. */
12526 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
12527 op[1] = gen_rtx_REG (Pmode, 3);
12528 else
12529 op[1] = gen_rtx_REG (Pmode, 2);
12531 /* Operand 2 is the delta. */
12532 op[2] = GEN_INT (delta);
12534 /* Operand 3 is the vcall_offset. */
12535 op[3] = GEN_INT (vcall_offset);
12537 /* Operand 4 is the temporary register. */
12538 op[4] = gen_rtx_REG (Pmode, 1);
12540 /* Operands 5 to 8 can be used as labels. */
12541 op[5] = NULL_RTX;
12542 op[6] = NULL_RTX;
12543 op[7] = NULL_RTX;
12544 op[8] = NULL_RTX;
12546 /* Operand 9 can be used for temporary register. */
12547 op[9] = NULL_RTX;
12549 /* Generate code. */
12550 if (TARGET_64BIT)
12552 /* Setup literal pool pointer if required. */
12553 if ((!DISP_IN_RANGE (delta)
12554 && !CONST_OK_FOR_K (delta)
12555 && !CONST_OK_FOR_Os (delta))
12556 || (!DISP_IN_RANGE (vcall_offset)
12557 && !CONST_OK_FOR_K (vcall_offset)
12558 && !CONST_OK_FOR_Os (vcall_offset)))
12560 op[5] = gen_label_rtx ();
12561 output_asm_insn ("larl\t%4,%5", op);
12564 /* Add DELTA to this pointer. */
12565 if (delta)
12567 if (CONST_OK_FOR_J (delta))
12568 output_asm_insn ("la\t%1,%2(%1)", op);
12569 else if (DISP_IN_RANGE (delta))
12570 output_asm_insn ("lay\t%1,%2(%1)", op);
12571 else if (CONST_OK_FOR_K (delta))
12572 output_asm_insn ("aghi\t%1,%2", op);
12573 else if (CONST_OK_FOR_Os (delta))
12574 output_asm_insn ("agfi\t%1,%2", op);
12575 else
12577 op[6] = gen_label_rtx ();
12578 output_asm_insn ("agf\t%1,%6-%5(%4)", op);
12582 /* Perform vcall adjustment. */
12583 if (vcall_offset)
12585 if (DISP_IN_RANGE (vcall_offset))
12587 output_asm_insn ("lg\t%4,0(%1)", op);
12588 output_asm_insn ("ag\t%1,%3(%4)", op);
12590 else if (CONST_OK_FOR_K (vcall_offset))
12592 output_asm_insn ("lghi\t%4,%3", op);
12593 output_asm_insn ("ag\t%4,0(%1)", op);
12594 output_asm_insn ("ag\t%1,0(%4)", op);
12596 else if (CONST_OK_FOR_Os (vcall_offset))
12598 output_asm_insn ("lgfi\t%4,%3", op);
12599 output_asm_insn ("ag\t%4,0(%1)", op);
12600 output_asm_insn ("ag\t%1,0(%4)", op);
12602 else
12604 op[7] = gen_label_rtx ();
12605 output_asm_insn ("llgf\t%4,%7-%5(%4)", op);
12606 output_asm_insn ("ag\t%4,0(%1)", op);
12607 output_asm_insn ("ag\t%1,0(%4)", op);
12611 /* Jump to target. */
12612 output_asm_insn ("jg\t%0", op);
12614 /* Output literal pool if required. */
12615 if (op[5])
12617 output_asm_insn (".align\t4", op);
12618 targetm.asm_out.internal_label (file, "L",
12619 CODE_LABEL_NUMBER (op[5]));
12621 if (op[6])
12623 targetm.asm_out.internal_label (file, "L",
12624 CODE_LABEL_NUMBER (op[6]));
12625 output_asm_insn (".long\t%2", op);
12627 if (op[7])
12629 targetm.asm_out.internal_label (file, "L",
12630 CODE_LABEL_NUMBER (op[7]));
12631 output_asm_insn (".long\t%3", op);
12634 else
12636 /* Setup base pointer if required. */
12637 if (!vcall_offset
12638 || (!DISP_IN_RANGE (delta)
12639 && !CONST_OK_FOR_K (delta)
12640 && !CONST_OK_FOR_Os (delta))
12641 || (!DISP_IN_RANGE (delta)
12642 && !CONST_OK_FOR_K (vcall_offset)
12643 && !CONST_OK_FOR_Os (vcall_offset)))
12645 op[5] = gen_label_rtx ();
12646 output_asm_insn ("basr\t%4,0", op);
12647 targetm.asm_out.internal_label (file, "L",
12648 CODE_LABEL_NUMBER (op[5]));
12651 /* Add DELTA to this pointer. */
12652 if (delta)
12654 if (CONST_OK_FOR_J (delta))
12655 output_asm_insn ("la\t%1,%2(%1)", op);
12656 else if (DISP_IN_RANGE (delta))
12657 output_asm_insn ("lay\t%1,%2(%1)", op);
12658 else if (CONST_OK_FOR_K (delta))
12659 output_asm_insn ("ahi\t%1,%2", op);
12660 else if (CONST_OK_FOR_Os (delta))
12661 output_asm_insn ("afi\t%1,%2", op);
12662 else
12664 op[6] = gen_label_rtx ();
12665 output_asm_insn ("a\t%1,%6-%5(%4)", op);
12669 /* Perform vcall adjustment. */
12670 if (vcall_offset)
12672 if (CONST_OK_FOR_J (vcall_offset))
12674 output_asm_insn ("l\t%4,0(%1)", op);
12675 output_asm_insn ("a\t%1,%3(%4)", op);
12677 else if (DISP_IN_RANGE (vcall_offset))
12679 output_asm_insn ("l\t%4,0(%1)", op);
12680 output_asm_insn ("ay\t%1,%3(%4)", op);
12682 else if (CONST_OK_FOR_K (vcall_offset))
12684 output_asm_insn ("lhi\t%4,%3", op);
12685 output_asm_insn ("a\t%4,0(%1)", op);
12686 output_asm_insn ("a\t%1,0(%4)", op);
12688 else if (CONST_OK_FOR_Os (vcall_offset))
12690 output_asm_insn ("iilf\t%4,%3", op);
12691 output_asm_insn ("a\t%4,0(%1)", op);
12692 output_asm_insn ("a\t%1,0(%4)", op);
12694 else
12696 op[7] = gen_label_rtx ();
12697 output_asm_insn ("l\t%4,%7-%5(%4)", op);
12698 output_asm_insn ("a\t%4,0(%1)", op);
12699 output_asm_insn ("a\t%1,0(%4)", op);
12702 /* We had to clobber the base pointer register.
12703 Re-setup the base pointer (with a different base). */
12704 op[5] = gen_label_rtx ();
12705 output_asm_insn ("basr\t%4,0", op);
12706 targetm.asm_out.internal_label (file, "L",
12707 CODE_LABEL_NUMBER (op[5]));
12710 /* Jump to target. */
12711 op[8] = gen_label_rtx ();
12713 if (!flag_pic)
12714 output_asm_insn ("l\t%4,%8-%5(%4)", op);
12715 else if (!nonlocal)
12716 output_asm_insn ("a\t%4,%8-%5(%4)", op);
12717 /* We cannot call through .plt, since .plt requires %r12 loaded. */
12718 else if (flag_pic == 1)
12720 output_asm_insn ("a\t%4,%8-%5(%4)", op);
12721 output_asm_insn ("l\t%4,%0(%4)", op);
12723 else if (flag_pic == 2)
12725 op[9] = gen_rtx_REG (Pmode, 0);
12726 output_asm_insn ("l\t%9,%8-4-%5(%4)", op);
12727 output_asm_insn ("a\t%4,%8-%5(%4)", op);
12728 output_asm_insn ("ar\t%4,%9", op);
12729 output_asm_insn ("l\t%4,0(%4)", op);
12732 output_asm_insn ("br\t%4", op);
12734 /* Output literal pool. */
12735 output_asm_insn (".align\t4", op);
12737 if (nonlocal && flag_pic == 2)
12738 output_asm_insn (".long\t%0", op);
12739 if (nonlocal)
12741 op[0] = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
12742 SYMBOL_REF_FLAGS (op[0]) = SYMBOL_FLAG_LOCAL;
12745 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[8]));
12746 if (!flag_pic)
12747 output_asm_insn (".long\t%0", op);
12748 else
12749 output_asm_insn (".long\t%0-%5", op);
12751 if (op[6])
12753 targetm.asm_out.internal_label (file, "L",
12754 CODE_LABEL_NUMBER (op[6]));
12755 output_asm_insn (".long\t%2", op);
12757 if (op[7])
12759 targetm.asm_out.internal_label (file, "L",
12760 CODE_LABEL_NUMBER (op[7]));
12761 output_asm_insn (".long\t%3", op);
12764 final_end_function ();
12767 static bool
12768 s390_valid_pointer_mode (machine_mode mode)
12770 return (mode == SImode || (TARGET_64BIT && mode == DImode));
12773 /* Checks whether the given CALL_EXPR would use a caller
12774 saved register. This is used to decide whether sibling call
12775 optimization could be performed on the respective function
12776 call. */
12778 static bool
12779 s390_call_saved_register_used (tree call_expr)
12781 CUMULATIVE_ARGS cum_v;
12782 cumulative_args_t cum;
12783 tree parameter;
12784 machine_mode mode;
12785 tree type;
12786 rtx parm_rtx;
12787 int reg, i;
12789 INIT_CUMULATIVE_ARGS (cum_v, NULL, NULL, 0, 0);
12790 cum = pack_cumulative_args (&cum_v);
12792 for (i = 0; i < call_expr_nargs (call_expr); i++)
12794 parameter = CALL_EXPR_ARG (call_expr, i);
12795 gcc_assert (parameter);
12797 /* For an undeclared variable passed as parameter we will get
12798 an ERROR_MARK node here. */
12799 if (TREE_CODE (parameter) == ERROR_MARK)
12800 return true;
12802 type = TREE_TYPE (parameter);
12803 gcc_assert (type);
12805 mode = TYPE_MODE (type);
12806 gcc_assert (mode);
12808 /* We assume that in the target function all parameters are
12809 named. This only has an impact on vector argument register
12810 usage none of which is call-saved. */
12811 if (pass_by_reference (&cum_v, mode, type, true))
12813 mode = Pmode;
12814 type = build_pointer_type (type);
12817 parm_rtx = s390_function_arg (cum, mode, type, true);
12819 s390_function_arg_advance (cum, mode, type, true);
12821 if (!parm_rtx)
12822 continue;
12824 if (REG_P (parm_rtx))
12826 for (reg = 0;
12827 reg < HARD_REGNO_NREGS (REGNO (parm_rtx), GET_MODE (parm_rtx));
12828 reg++)
12829 if (!call_used_regs[reg + REGNO (parm_rtx)])
12830 return true;
12833 if (GET_CODE (parm_rtx) == PARALLEL)
12835 int i;
12837 for (i = 0; i < XVECLEN (parm_rtx, 0); i++)
12839 rtx r = XEXP (XVECEXP (parm_rtx, 0, i), 0);
12841 gcc_assert (REG_P (r));
12843 for (reg = 0;
12844 reg < HARD_REGNO_NREGS (REGNO (r), GET_MODE (r));
12845 reg++)
12846 if (!call_used_regs[reg + REGNO (r)])
12847 return true;
12852 return false;
12855 /* Return true if the given call expression can be
12856 turned into a sibling call.
12857 DECL holds the declaration of the function to be called whereas
12858 EXP is the call expression itself. */
12860 static bool
12861 s390_function_ok_for_sibcall (tree decl, tree exp)
12863 /* The TPF epilogue uses register 1. */
12864 if (TARGET_TPF_PROFILING)
12865 return false;
12867 /* The 31 bit PLT code uses register 12 (GOT pointer - caller saved)
12868 which would have to be restored before the sibcall. */
12869 if (!TARGET_64BIT && flag_pic && decl && !targetm.binds_local_p (decl))
12870 return false;
12872 /* Register 6 on s390 is available as an argument register but unfortunately
12873 "caller saved". This makes functions needing this register for arguments
12874 not suitable for sibcalls. */
12875 return !s390_call_saved_register_used (exp);
12878 /* Return the fixed registers used for condition codes. */
12880 static bool
12881 s390_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
12883 *p1 = CC_REGNUM;
12884 *p2 = INVALID_REGNUM;
12886 return true;
12889 /* This function is used by the call expanders of the machine description.
12890 It emits the call insn itself together with the necessary operations
12891 to adjust the target address and returns the emitted insn.
12892 ADDR_LOCATION is the target address rtx
12893 TLS_CALL the location of the thread-local symbol
12894 RESULT_REG the register where the result of the call should be stored
12895 RETADDR_REG the register where the return address should be stored
12896 If this parameter is NULL_RTX the call is considered
12897 to be a sibling call. */
12899 rtx_insn *
12900 s390_emit_call (rtx addr_location, rtx tls_call, rtx result_reg,
12901 rtx retaddr_reg)
12903 bool plt_call = false;
12904 rtx_insn *insn;
12905 rtx call;
12906 rtx clobber;
12907 rtvec vec;
12909 /* Direct function calls need special treatment. */
12910 if (GET_CODE (addr_location) == SYMBOL_REF)
12912 /* When calling a global routine in PIC mode, we must
12913 replace the symbol itself with the PLT stub. */
12914 if (flag_pic && !SYMBOL_REF_LOCAL_P (addr_location))
12916 if (TARGET_64BIT || retaddr_reg != NULL_RTX)
12918 addr_location = gen_rtx_UNSPEC (Pmode,
12919 gen_rtvec (1, addr_location),
12920 UNSPEC_PLT);
12921 addr_location = gen_rtx_CONST (Pmode, addr_location);
12922 plt_call = true;
12924 else
12925 /* For -fpic code the PLT entries might use r12 which is
12926 call-saved. Therefore we cannot do a sibcall when
12927 calling directly using a symbol ref. When reaching
12928 this point we decided (in s390_function_ok_for_sibcall)
12929 to do a sibcall for a function pointer but one of the
12930 optimizers was able to get rid of the function pointer
12931 by propagating the symbol ref into the call. This
12932 optimization is illegal for S/390 so we turn the direct
12933 call into a indirect call again. */
12934 addr_location = force_reg (Pmode, addr_location);
12937 /* Unless we can use the bras(l) insn, force the
12938 routine address into a register. */
12939 if (!TARGET_SMALL_EXEC && !TARGET_CPU_ZARCH)
12941 if (flag_pic)
12942 addr_location = legitimize_pic_address (addr_location, 0);
12943 else
12944 addr_location = force_reg (Pmode, addr_location);
12948 /* If it is already an indirect call or the code above moved the
12949 SYMBOL_REF to somewhere else make sure the address can be found in
12950 register 1. */
12951 if (retaddr_reg == NULL_RTX
12952 && GET_CODE (addr_location) != SYMBOL_REF
12953 && !plt_call)
12955 emit_move_insn (gen_rtx_REG (Pmode, SIBCALL_REGNUM), addr_location);
12956 addr_location = gen_rtx_REG (Pmode, SIBCALL_REGNUM);
12959 addr_location = gen_rtx_MEM (QImode, addr_location);
12960 call = gen_rtx_CALL (VOIDmode, addr_location, const0_rtx);
12962 if (result_reg != NULL_RTX)
12963 call = gen_rtx_SET (result_reg, call);
12965 if (retaddr_reg != NULL_RTX)
12967 clobber = gen_rtx_CLOBBER (VOIDmode, retaddr_reg);
12969 if (tls_call != NULL_RTX)
12970 vec = gen_rtvec (3, call, clobber,
12971 gen_rtx_USE (VOIDmode, tls_call));
12972 else
12973 vec = gen_rtvec (2, call, clobber);
12975 call = gen_rtx_PARALLEL (VOIDmode, vec);
12978 insn = emit_call_insn (call);
12980 /* 31-bit PLT stubs and tls calls use the GOT register implicitly. */
12981 if ((!TARGET_64BIT && plt_call) || tls_call != NULL_RTX)
12983 /* s390_function_ok_for_sibcall should
12984 have denied sibcalls in this case. */
12985 gcc_assert (retaddr_reg != NULL_RTX);
12986 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, 12));
12988 return insn;
12991 /* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
12993 static void
12994 s390_conditional_register_usage (void)
12996 int i;
12998 if (flag_pic)
13000 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
13001 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
13003 if (TARGET_CPU_ZARCH)
13005 fixed_regs[BASE_REGNUM] = 0;
13006 call_used_regs[BASE_REGNUM] = 0;
13007 fixed_regs[RETURN_REGNUM] = 0;
13008 call_used_regs[RETURN_REGNUM] = 0;
13010 if (TARGET_64BIT)
13012 for (i = FPR8_REGNUM; i <= FPR15_REGNUM; i++)
13013 call_used_regs[i] = call_really_used_regs[i] = 0;
13015 else
13017 call_used_regs[FPR4_REGNUM] = call_really_used_regs[FPR4_REGNUM] = 0;
13018 call_used_regs[FPR6_REGNUM] = call_really_used_regs[FPR6_REGNUM] = 0;
13021 if (TARGET_SOFT_FLOAT)
13023 for (i = FPR0_REGNUM; i <= FPR15_REGNUM; i++)
13024 call_used_regs[i] = fixed_regs[i] = 1;
13027 /* Disable v16 - v31 for non-vector target. */
13028 if (!TARGET_VX)
13030 for (i = VR16_REGNUM; i <= VR31_REGNUM; i++)
13031 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
13035 /* Corresponding function to eh_return expander. */
13037 static GTY(()) rtx s390_tpf_eh_return_symbol;
13038 void
13039 s390_emit_tpf_eh_return (rtx target)
13041 rtx_insn *insn;
13042 rtx reg, orig_ra;
13044 if (!s390_tpf_eh_return_symbol)
13045 s390_tpf_eh_return_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tpf_eh_return");
13047 reg = gen_rtx_REG (Pmode, 2);
13048 orig_ra = gen_rtx_REG (Pmode, 3);
13050 emit_move_insn (reg, target);
13051 emit_move_insn (orig_ra, get_hard_reg_initial_val (Pmode, RETURN_REGNUM));
13052 insn = s390_emit_call (s390_tpf_eh_return_symbol, NULL_RTX, reg,
13053 gen_rtx_REG (Pmode, RETURN_REGNUM));
13054 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), reg);
13055 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), orig_ra);
13057 emit_move_insn (EH_RETURN_HANDLER_RTX, reg);
13060 /* Rework the prologue/epilogue to avoid saving/restoring
13061 registers unnecessarily. */
13063 static void
13064 s390_optimize_prologue (void)
13066 rtx_insn *insn, *new_insn, *next_insn;
13068 /* Do a final recompute of the frame-related data. */
13069 s390_optimize_register_info ();
13071 /* If all special registers are in fact used, there's nothing we
13072 can do, so no point in walking the insn list. */
13074 if (cfun_frame_layout.first_save_gpr <= BASE_REGNUM
13075 && cfun_frame_layout.last_save_gpr >= BASE_REGNUM
13076 && (TARGET_CPU_ZARCH
13077 || (cfun_frame_layout.first_save_gpr <= RETURN_REGNUM
13078 && cfun_frame_layout.last_save_gpr >= RETURN_REGNUM)))
13079 return;
13081 /* Search for prologue/epilogue insns and replace them. */
13083 for (insn = get_insns (); insn; insn = next_insn)
13085 int first, last, off;
13086 rtx set, base, offset;
13087 rtx pat;
13089 next_insn = NEXT_INSN (insn);
13091 if (! NONJUMP_INSN_P (insn) || ! RTX_FRAME_RELATED_P (insn))
13092 continue;
13094 pat = PATTERN (insn);
13096 /* Remove ldgr/lgdr instructions used for saving and restore
13097 GPRs if possible. */
13098 if (TARGET_Z10)
13100 rtx tmp_pat = pat;
13102 if (INSN_CODE (insn) == CODE_FOR_stack_restore_from_fpr)
13103 tmp_pat = XVECEXP (pat, 0, 0);
13105 if (GET_CODE (tmp_pat) == SET
13106 && GET_MODE (SET_SRC (tmp_pat)) == DImode
13107 && REG_P (SET_SRC (tmp_pat))
13108 && REG_P (SET_DEST (tmp_pat)))
13110 int src_regno = REGNO (SET_SRC (tmp_pat));
13111 int dest_regno = REGNO (SET_DEST (tmp_pat));
13112 int gpr_regno;
13113 int fpr_regno;
13115 if (!((GENERAL_REGNO_P (src_regno)
13116 && FP_REGNO_P (dest_regno))
13117 || (FP_REGNO_P (src_regno)
13118 && GENERAL_REGNO_P (dest_regno))))
13119 continue;
13121 gpr_regno = GENERAL_REGNO_P (src_regno) ? src_regno : dest_regno;
13122 fpr_regno = FP_REGNO_P (src_regno) ? src_regno : dest_regno;
13124 /* GPR must be call-saved, FPR must be call-clobbered. */
13125 if (!call_really_used_regs[fpr_regno]
13126 || call_really_used_regs[gpr_regno])
13127 continue;
13129 /* It must not happen that what we once saved in an FPR now
13130 needs a stack slot. */
13131 gcc_assert (cfun_gpr_save_slot (gpr_regno) != SAVE_SLOT_STACK);
13133 if (cfun_gpr_save_slot (gpr_regno) == SAVE_SLOT_NONE)
13135 remove_insn (insn);
13136 continue;
13141 if (GET_CODE (pat) == PARALLEL
13142 && store_multiple_operation (pat, VOIDmode))
13144 set = XVECEXP (pat, 0, 0);
13145 first = REGNO (SET_SRC (set));
13146 last = first + XVECLEN (pat, 0) - 1;
13147 offset = const0_rtx;
13148 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
13149 off = INTVAL (offset);
13151 if (GET_CODE (base) != REG || off < 0)
13152 continue;
13153 if (cfun_frame_layout.first_save_gpr != -1
13154 && (cfun_frame_layout.first_save_gpr < first
13155 || cfun_frame_layout.last_save_gpr > last))
13156 continue;
13157 if (REGNO (base) != STACK_POINTER_REGNUM
13158 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
13159 continue;
13160 if (first > BASE_REGNUM || last < BASE_REGNUM)
13161 continue;
13163 if (cfun_frame_layout.first_save_gpr != -1)
13165 rtx s_pat = save_gprs (base,
13166 off + (cfun_frame_layout.first_save_gpr
13167 - first) * UNITS_PER_LONG,
13168 cfun_frame_layout.first_save_gpr,
13169 cfun_frame_layout.last_save_gpr);
13170 new_insn = emit_insn_before (s_pat, insn);
13171 INSN_ADDRESSES_NEW (new_insn, -1);
13174 remove_insn (insn);
13175 continue;
13178 if (cfun_frame_layout.first_save_gpr == -1
13179 && GET_CODE (pat) == SET
13180 && GENERAL_REG_P (SET_SRC (pat))
13181 && GET_CODE (SET_DEST (pat)) == MEM)
13183 set = pat;
13184 first = REGNO (SET_SRC (set));
13185 offset = const0_rtx;
13186 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
13187 off = INTVAL (offset);
13189 if (GET_CODE (base) != REG || off < 0)
13190 continue;
13191 if (REGNO (base) != STACK_POINTER_REGNUM
13192 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
13193 continue;
13195 remove_insn (insn);
13196 continue;
13199 if (GET_CODE (pat) == PARALLEL
13200 && load_multiple_operation (pat, VOIDmode))
13202 set = XVECEXP (pat, 0, 0);
13203 first = REGNO (SET_DEST (set));
13204 last = first + XVECLEN (pat, 0) - 1;
13205 offset = const0_rtx;
13206 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
13207 off = INTVAL (offset);
13209 if (GET_CODE (base) != REG || off < 0)
13210 continue;
13212 if (cfun_frame_layout.first_restore_gpr != -1
13213 && (cfun_frame_layout.first_restore_gpr < first
13214 || cfun_frame_layout.last_restore_gpr > last))
13215 continue;
13216 if (REGNO (base) != STACK_POINTER_REGNUM
13217 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
13218 continue;
13219 if (first > BASE_REGNUM || last < BASE_REGNUM)
13220 continue;
13222 if (cfun_frame_layout.first_restore_gpr != -1)
13224 rtx rpat = restore_gprs (base,
13225 off + (cfun_frame_layout.first_restore_gpr
13226 - first) * UNITS_PER_LONG,
13227 cfun_frame_layout.first_restore_gpr,
13228 cfun_frame_layout.last_restore_gpr);
13230 /* Remove REG_CFA_RESTOREs for registers that we no
13231 longer need to save. */
13232 REG_NOTES (rpat) = REG_NOTES (insn);
13233 for (rtx *ptr = &REG_NOTES (rpat); *ptr; )
13234 if (REG_NOTE_KIND (*ptr) == REG_CFA_RESTORE
13235 && ((int) REGNO (XEXP (*ptr, 0))
13236 < cfun_frame_layout.first_restore_gpr))
13237 *ptr = XEXP (*ptr, 1);
13238 else
13239 ptr = &XEXP (*ptr, 1);
13240 new_insn = emit_insn_before (rpat, insn);
13241 RTX_FRAME_RELATED_P (new_insn) = 1;
13242 INSN_ADDRESSES_NEW (new_insn, -1);
13245 remove_insn (insn);
13246 continue;
13249 if (cfun_frame_layout.first_restore_gpr == -1
13250 && GET_CODE (pat) == SET
13251 && GENERAL_REG_P (SET_DEST (pat))
13252 && GET_CODE (SET_SRC (pat)) == MEM)
13254 set = pat;
13255 first = REGNO (SET_DEST (set));
13256 offset = const0_rtx;
13257 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
13258 off = INTVAL (offset);
13260 if (GET_CODE (base) != REG || off < 0)
13261 continue;
13263 if (REGNO (base) != STACK_POINTER_REGNUM
13264 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
13265 continue;
13267 remove_insn (insn);
13268 continue;
13273 /* On z10 and later the dynamic branch prediction must see the
13274 backward jump within a certain windows. If not it falls back to
13275 the static prediction. This function rearranges the loop backward
13276 branch in a way which makes the static prediction always correct.
13277 The function returns true if it added an instruction. */
13278 static bool
13279 s390_fix_long_loop_prediction (rtx_insn *insn)
13281 rtx set = single_set (insn);
13282 rtx code_label, label_ref, new_label;
13283 rtx_insn *uncond_jump;
13284 rtx_insn *cur_insn;
13285 rtx tmp;
13286 int distance;
13288 /* This will exclude branch on count and branch on index patterns
13289 since these are correctly statically predicted. */
13290 if (!set
13291 || SET_DEST (set) != pc_rtx
13292 || GET_CODE (SET_SRC(set)) != IF_THEN_ELSE)
13293 return false;
13295 /* Skip conditional returns. */
13296 if (ANY_RETURN_P (XEXP (SET_SRC (set), 1))
13297 && XEXP (SET_SRC (set), 2) == pc_rtx)
13298 return false;
13300 label_ref = (GET_CODE (XEXP (SET_SRC (set), 1)) == LABEL_REF ?
13301 XEXP (SET_SRC (set), 1) : XEXP (SET_SRC (set), 2));
13303 gcc_assert (GET_CODE (label_ref) == LABEL_REF);
13305 code_label = XEXP (label_ref, 0);
13307 if (INSN_ADDRESSES (INSN_UID (code_label)) == -1
13308 || INSN_ADDRESSES (INSN_UID (insn)) == -1
13309 || (INSN_ADDRESSES (INSN_UID (insn))
13310 - INSN_ADDRESSES (INSN_UID (code_label)) < PREDICT_DISTANCE))
13311 return false;
13313 for (distance = 0, cur_insn = PREV_INSN (insn);
13314 distance < PREDICT_DISTANCE - 6;
13315 distance += get_attr_length (cur_insn), cur_insn = PREV_INSN (cur_insn))
13316 if (!cur_insn || JUMP_P (cur_insn) || LABEL_P (cur_insn))
13317 return false;
13319 new_label = gen_label_rtx ();
13320 uncond_jump = emit_jump_insn_after (
13321 gen_rtx_SET (pc_rtx,
13322 gen_rtx_LABEL_REF (VOIDmode, code_label)),
13323 insn);
13324 emit_label_after (new_label, uncond_jump);
13326 tmp = XEXP (SET_SRC (set), 1);
13327 XEXP (SET_SRC (set), 1) = XEXP (SET_SRC (set), 2);
13328 XEXP (SET_SRC (set), 2) = tmp;
13329 INSN_CODE (insn) = -1;
13331 XEXP (label_ref, 0) = new_label;
13332 JUMP_LABEL (insn) = new_label;
13333 JUMP_LABEL (uncond_jump) = code_label;
13335 return true;
13338 /* Returns 1 if INSN reads the value of REG for purposes not related
13339 to addressing of memory, and 0 otherwise. */
13340 static int
13341 s390_non_addr_reg_read_p (rtx reg, rtx_insn *insn)
13343 return reg_referenced_p (reg, PATTERN (insn))
13344 && !reg_used_in_mem_p (REGNO (reg), PATTERN (insn));
13347 /* Starting from INSN find_cond_jump looks downwards in the insn
13348 stream for a single jump insn which is the last user of the
13349 condition code set in INSN. */
13350 static rtx_insn *
13351 find_cond_jump (rtx_insn *insn)
13353 for (; insn; insn = NEXT_INSN (insn))
13355 rtx ite, cc;
13357 if (LABEL_P (insn))
13358 break;
13360 if (!JUMP_P (insn))
13362 if (reg_mentioned_p (gen_rtx_REG (CCmode, CC_REGNUM), insn))
13363 break;
13364 continue;
13367 /* This will be triggered by a return. */
13368 if (GET_CODE (PATTERN (insn)) != SET)
13369 break;
13371 gcc_assert (SET_DEST (PATTERN (insn)) == pc_rtx);
13372 ite = SET_SRC (PATTERN (insn));
13374 if (GET_CODE (ite) != IF_THEN_ELSE)
13375 break;
13377 cc = XEXP (XEXP (ite, 0), 0);
13378 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc)))
13379 break;
13381 if (find_reg_note (insn, REG_DEAD, cc))
13382 return insn;
13383 break;
13386 return NULL;
13389 /* Swap the condition in COND and the operands in OP0 and OP1 so that
13390 the semantics does not change. If NULL_RTX is passed as COND the
13391 function tries to find the conditional jump starting with INSN. */
13392 static void
13393 s390_swap_cmp (rtx cond, rtx *op0, rtx *op1, rtx_insn *insn)
13395 rtx tmp = *op0;
13397 if (cond == NULL_RTX)
13399 rtx_insn *jump = find_cond_jump (NEXT_INSN (insn));
13400 rtx set = jump ? single_set (jump) : NULL_RTX;
13402 if (set == NULL_RTX)
13403 return;
13405 cond = XEXP (SET_SRC (set), 0);
13408 *op0 = *op1;
13409 *op1 = tmp;
13410 PUT_CODE (cond, swap_condition (GET_CODE (cond)));
13413 /* On z10, instructions of the compare-and-branch family have the
13414 property to access the register occurring as second operand with
13415 its bits complemented. If such a compare is grouped with a second
13416 instruction that accesses the same register non-complemented, and
13417 if that register's value is delivered via a bypass, then the
13418 pipeline recycles, thereby causing significant performance decline.
13419 This function locates such situations and exchanges the two
13420 operands of the compare. The function return true whenever it
13421 added an insn. */
13422 static bool
13423 s390_z10_optimize_cmp (rtx_insn *insn)
13425 rtx_insn *prev_insn, *next_insn;
13426 bool insn_added_p = false;
13427 rtx cond, *op0, *op1;
13429 if (GET_CODE (PATTERN (insn)) == PARALLEL)
13431 /* Handle compare and branch and branch on count
13432 instructions. */
13433 rtx pattern = single_set (insn);
13435 if (!pattern
13436 || SET_DEST (pattern) != pc_rtx
13437 || GET_CODE (SET_SRC (pattern)) != IF_THEN_ELSE)
13438 return false;
13440 cond = XEXP (SET_SRC (pattern), 0);
13441 op0 = &XEXP (cond, 0);
13442 op1 = &XEXP (cond, 1);
13444 else if (GET_CODE (PATTERN (insn)) == SET)
13446 rtx src, dest;
13448 /* Handle normal compare instructions. */
13449 src = SET_SRC (PATTERN (insn));
13450 dest = SET_DEST (PATTERN (insn));
13452 if (!REG_P (dest)
13453 || !CC_REGNO_P (REGNO (dest))
13454 || GET_CODE (src) != COMPARE)
13455 return false;
13457 /* s390_swap_cmp will try to find the conditional
13458 jump when passing NULL_RTX as condition. */
13459 cond = NULL_RTX;
13460 op0 = &XEXP (src, 0);
13461 op1 = &XEXP (src, 1);
13463 else
13464 return false;
13466 if (!REG_P (*op0) || !REG_P (*op1))
13467 return false;
13469 if (GET_MODE_CLASS (GET_MODE (*op0)) != MODE_INT)
13470 return false;
13472 /* Swap the COMPARE arguments and its mask if there is a
13473 conflicting access in the previous insn. */
13474 prev_insn = prev_active_insn (insn);
13475 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
13476 && reg_referenced_p (*op1, PATTERN (prev_insn)))
13477 s390_swap_cmp (cond, op0, op1, insn);
13479 /* Check if there is a conflict with the next insn. If there
13480 was no conflict with the previous insn, then swap the
13481 COMPARE arguments and its mask. If we already swapped
13482 the operands, or if swapping them would cause a conflict
13483 with the previous insn, issue a NOP after the COMPARE in
13484 order to separate the two instuctions. */
13485 next_insn = next_active_insn (insn);
13486 if (next_insn != NULL_RTX && INSN_P (next_insn)
13487 && s390_non_addr_reg_read_p (*op1, next_insn))
13489 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
13490 && s390_non_addr_reg_read_p (*op0, prev_insn))
13492 if (REGNO (*op1) == 0)
13493 emit_insn_after (gen_nop1 (), insn);
13494 else
13495 emit_insn_after (gen_nop (), insn);
13496 insn_added_p = true;
13498 else
13499 s390_swap_cmp (cond, op0, op1, insn);
13501 return insn_added_p;
13504 /* Perform machine-dependent processing. */
13506 static void
13507 s390_reorg (void)
13509 bool pool_overflow = false;
13510 int hw_before, hw_after;
13512 /* Make sure all splits have been performed; splits after
13513 machine_dependent_reorg might confuse insn length counts. */
13514 split_all_insns_noflow ();
13516 /* Install the main literal pool and the associated base
13517 register load insns.
13519 In addition, there are two problematic situations we need
13520 to correct:
13522 - the literal pool might be > 4096 bytes in size, so that
13523 some of its elements cannot be directly accessed
13525 - a branch target might be > 64K away from the branch, so that
13526 it is not possible to use a PC-relative instruction.
13528 To fix those, we split the single literal pool into multiple
13529 pool chunks, reloading the pool base register at various
13530 points throughout the function to ensure it always points to
13531 the pool chunk the following code expects, and / or replace
13532 PC-relative branches by absolute branches.
13534 However, the two problems are interdependent: splitting the
13535 literal pool can move a branch further away from its target,
13536 causing the 64K limit to overflow, and on the other hand,
13537 replacing a PC-relative branch by an absolute branch means
13538 we need to put the branch target address into the literal
13539 pool, possibly causing it to overflow.
13541 So, we loop trying to fix up both problems until we manage
13542 to satisfy both conditions at the same time. Note that the
13543 loop is guaranteed to terminate as every pass of the loop
13544 strictly decreases the total number of PC-relative branches
13545 in the function. (This is not completely true as there
13546 might be branch-over-pool insns introduced by chunkify_start.
13547 Those never need to be split however.) */
13549 for (;;)
13551 struct constant_pool *pool = NULL;
13553 /* Collect the literal pool. */
13554 if (!pool_overflow)
13556 pool = s390_mainpool_start ();
13557 if (!pool)
13558 pool_overflow = true;
13561 /* If literal pool overflowed, start to chunkify it. */
13562 if (pool_overflow)
13563 pool = s390_chunkify_start ();
13565 /* Split out-of-range branches. If this has created new
13566 literal pool entries, cancel current chunk list and
13567 recompute it. zSeries machines have large branch
13568 instructions, so we never need to split a branch. */
13569 if (!TARGET_CPU_ZARCH && s390_split_branches ())
13571 if (pool_overflow)
13572 s390_chunkify_cancel (pool);
13573 else
13574 s390_mainpool_cancel (pool);
13576 continue;
13579 /* If we made it up to here, both conditions are satisfied.
13580 Finish up literal pool related changes. */
13581 if (pool_overflow)
13582 s390_chunkify_finish (pool);
13583 else
13584 s390_mainpool_finish (pool);
13586 /* We're done splitting branches. */
13587 cfun->machine->split_branches_pending_p = false;
13588 break;
13591 /* Generate out-of-pool execute target insns. */
13592 if (TARGET_CPU_ZARCH)
13594 rtx_insn *insn, *target;
13595 rtx label;
13597 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
13599 label = s390_execute_label (insn);
13600 if (!label)
13601 continue;
13603 gcc_assert (label != const0_rtx);
13605 target = emit_label (XEXP (label, 0));
13606 INSN_ADDRESSES_NEW (target, -1);
13608 target = emit_insn (s390_execute_target (insn));
13609 INSN_ADDRESSES_NEW (target, -1);
13613 /* Try to optimize prologue and epilogue further. */
13614 s390_optimize_prologue ();
13616 /* Walk over the insns and do some >=z10 specific changes. */
13617 if (s390_tune >= PROCESSOR_2097_Z10)
13619 rtx_insn *insn;
13620 bool insn_added_p = false;
13622 /* The insn lengths and addresses have to be up to date for the
13623 following manipulations. */
13624 shorten_branches (get_insns ());
13626 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
13628 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
13629 continue;
13631 if (JUMP_P (insn))
13632 insn_added_p |= s390_fix_long_loop_prediction (insn);
13634 if ((GET_CODE (PATTERN (insn)) == PARALLEL
13635 || GET_CODE (PATTERN (insn)) == SET)
13636 && s390_tune == PROCESSOR_2097_Z10)
13637 insn_added_p |= s390_z10_optimize_cmp (insn);
13640 /* Adjust branches if we added new instructions. */
13641 if (insn_added_p)
13642 shorten_branches (get_insns ());
13645 s390_function_num_hotpatch_hw (current_function_decl, &hw_before, &hw_after);
13646 if (hw_after > 0)
13648 rtx_insn *insn;
13650 /* Insert NOPs for hotpatching. */
13651 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
13652 /* Emit NOPs
13653 1. inside the area covered by debug information to allow setting
13654 breakpoints at the NOPs,
13655 2. before any insn which results in an asm instruction,
13656 3. before in-function labels to avoid jumping to the NOPs, for
13657 example as part of a loop,
13658 4. before any barrier in case the function is completely empty
13659 (__builtin_unreachable ()) and has neither internal labels nor
13660 active insns.
13662 if (active_insn_p (insn) || BARRIER_P (insn) || LABEL_P (insn))
13663 break;
13664 /* Output a series of NOPs before the first active insn. */
13665 while (insn && hw_after > 0)
13667 if (hw_after >= 3 && TARGET_CPU_ZARCH)
13669 emit_insn_before (gen_nop_6_byte (), insn);
13670 hw_after -= 3;
13672 else if (hw_after >= 2)
13674 emit_insn_before (gen_nop_4_byte (), insn);
13675 hw_after -= 2;
13677 else
13679 emit_insn_before (gen_nop_2_byte (), insn);
13680 hw_after -= 1;
13686 /* Return true if INSN is a fp load insn writing register REGNO. */
13687 static inline bool
13688 s390_fpload_toreg (rtx_insn *insn, unsigned int regno)
13690 rtx set;
13691 enum attr_type flag = s390_safe_attr_type (insn);
13693 if (flag != TYPE_FLOADSF && flag != TYPE_FLOADDF)
13694 return false;
13696 set = single_set (insn);
13698 if (set == NULL_RTX)
13699 return false;
13701 if (!REG_P (SET_DEST (set)) || !MEM_P (SET_SRC (set)))
13702 return false;
13704 if (REGNO (SET_DEST (set)) != regno)
13705 return false;
13707 return true;
13710 /* This value describes the distance to be avoided between an
13711 aritmetic fp instruction and an fp load writing the same register.
13712 Z10_EARLYLOAD_DISTANCE - 1 as well as Z10_EARLYLOAD_DISTANCE + 1 is
13713 fine but the exact value has to be avoided. Otherwise the FP
13714 pipeline will throw an exception causing a major penalty. */
13715 #define Z10_EARLYLOAD_DISTANCE 7
13717 /* Rearrange the ready list in order to avoid the situation described
13718 for Z10_EARLYLOAD_DISTANCE. A problematic load instruction is
13719 moved to the very end of the ready list. */
13720 static void
13721 s390_z10_prevent_earlyload_conflicts (rtx_insn **ready, int *nready_p)
13723 unsigned int regno;
13724 int nready = *nready_p;
13725 rtx_insn *tmp;
13726 int i;
13727 rtx_insn *insn;
13728 rtx set;
13729 enum attr_type flag;
13730 int distance;
13732 /* Skip DISTANCE - 1 active insns. */
13733 for (insn = last_scheduled_insn, distance = Z10_EARLYLOAD_DISTANCE - 1;
13734 distance > 0 && insn != NULL_RTX;
13735 distance--, insn = prev_active_insn (insn))
13736 if (CALL_P (insn) || JUMP_P (insn))
13737 return;
13739 if (insn == NULL_RTX)
13740 return;
13742 set = single_set (insn);
13744 if (set == NULL_RTX || !REG_P (SET_DEST (set))
13745 || GET_MODE_CLASS (GET_MODE (SET_DEST (set))) != MODE_FLOAT)
13746 return;
13748 flag = s390_safe_attr_type (insn);
13750 if (flag == TYPE_FLOADSF || flag == TYPE_FLOADDF)
13751 return;
13753 regno = REGNO (SET_DEST (set));
13754 i = nready - 1;
13756 while (!s390_fpload_toreg (ready[i], regno) && i > 0)
13757 i--;
13759 if (!i)
13760 return;
13762 tmp = ready[i];
13763 memmove (&ready[1], &ready[0], sizeof (rtx_insn *) * i);
13764 ready[0] = tmp;
13768 /* The s390_sched_state variable tracks the state of the current or
13769 the last instruction group.
13771 0,1,2 number of instructions scheduled in the current group
13772 3 the last group is complete - normal insns
13773 4 the last group was a cracked/expanded insn */
13775 static int s390_sched_state;
13777 #define S390_SCHED_STATE_NORMAL 3
13778 #define S390_SCHED_STATE_CRACKED 4
13780 #define S390_SCHED_ATTR_MASK_CRACKED 0x1
13781 #define S390_SCHED_ATTR_MASK_EXPANDED 0x2
13782 #define S390_SCHED_ATTR_MASK_ENDGROUP 0x4
13783 #define S390_SCHED_ATTR_MASK_GROUPALONE 0x8
13785 static unsigned int
13786 s390_get_sched_attrmask (rtx_insn *insn)
13788 unsigned int mask = 0;
13790 switch (s390_tune)
13792 case PROCESSOR_2827_ZEC12:
13793 if (get_attr_zEC12_cracked (insn))
13794 mask |= S390_SCHED_ATTR_MASK_CRACKED;
13795 if (get_attr_zEC12_expanded (insn))
13796 mask |= S390_SCHED_ATTR_MASK_EXPANDED;
13797 if (get_attr_zEC12_endgroup (insn))
13798 mask |= S390_SCHED_ATTR_MASK_ENDGROUP;
13799 if (get_attr_zEC12_groupalone (insn))
13800 mask |= S390_SCHED_ATTR_MASK_GROUPALONE;
13801 break;
13802 case PROCESSOR_2964_Z13:
13803 if (get_attr_z13_cracked (insn))
13804 mask |= S390_SCHED_ATTR_MASK_CRACKED;
13805 if (get_attr_z13_expanded (insn))
13806 mask |= S390_SCHED_ATTR_MASK_EXPANDED;
13807 if (get_attr_z13_endgroup (insn))
13808 mask |= S390_SCHED_ATTR_MASK_ENDGROUP;
13809 if (get_attr_z13_groupalone (insn))
13810 mask |= S390_SCHED_ATTR_MASK_GROUPALONE;
13811 break;
13812 default:
13813 gcc_unreachable ();
13815 return mask;
13818 static unsigned int
13819 s390_get_unit_mask (rtx_insn *insn, int *units)
13821 unsigned int mask = 0;
13823 switch (s390_tune)
13825 case PROCESSOR_2964_Z13:
13826 *units = 3;
13827 if (get_attr_z13_unit_lsu (insn))
13828 mask |= 1 << 0;
13829 if (get_attr_z13_unit_fxu (insn))
13830 mask |= 1 << 1;
13831 if (get_attr_z13_unit_vfu (insn))
13832 mask |= 1 << 2;
13833 break;
13834 default:
13835 gcc_unreachable ();
13837 return mask;
13840 /* Return the scheduling score for INSN. The higher the score the
13841 better. The score is calculated from the OOO scheduling attributes
13842 of INSN and the scheduling state s390_sched_state. */
13843 static int
13844 s390_sched_score (rtx_insn *insn)
13846 unsigned int mask = s390_get_sched_attrmask (insn);
13847 int score = 0;
13849 switch (s390_sched_state)
13851 case 0:
13852 /* Try to put insns into the first slot which would otherwise
13853 break a group. */
13854 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) != 0
13855 || (mask & S390_SCHED_ATTR_MASK_EXPANDED) != 0)
13856 score += 5;
13857 if ((mask & S390_SCHED_ATTR_MASK_GROUPALONE) != 0)
13858 score += 10;
13859 /* fallthrough */
13860 case 1:
13861 /* Prefer not cracked insns while trying to put together a
13862 group. */
13863 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) == 0
13864 && (mask & S390_SCHED_ATTR_MASK_EXPANDED) == 0
13865 && (mask & S390_SCHED_ATTR_MASK_GROUPALONE) == 0)
13866 score += 10;
13867 if ((mask & S390_SCHED_ATTR_MASK_ENDGROUP) == 0)
13868 score += 5;
13869 break;
13870 case 2:
13871 /* Prefer not cracked insns while trying to put together a
13872 group. */
13873 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) == 0
13874 && (mask & S390_SCHED_ATTR_MASK_EXPANDED) == 0
13875 && (mask & S390_SCHED_ATTR_MASK_GROUPALONE) == 0)
13876 score += 10;
13877 /* Prefer endgroup insns in the last slot. */
13878 if ((mask & S390_SCHED_ATTR_MASK_ENDGROUP) != 0)
13879 score += 10;
13880 break;
13881 case S390_SCHED_STATE_NORMAL:
13882 /* Prefer not cracked insns if the last was not cracked. */
13883 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) == 0
13884 && (mask & S390_SCHED_ATTR_MASK_EXPANDED) == 0)
13885 score += 5;
13886 if ((mask & S390_SCHED_ATTR_MASK_GROUPALONE) != 0)
13887 score += 10;
13888 break;
13889 case S390_SCHED_STATE_CRACKED:
13890 /* Try to keep cracked insns together to prevent them from
13891 interrupting groups. */
13892 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) != 0
13893 || (mask & S390_SCHED_ATTR_MASK_EXPANDED) != 0)
13894 score += 5;
13895 break;
13898 if (s390_tune == PROCESSOR_2964_Z13)
13900 int units, i;
13901 unsigned unit_mask, m = 1;
13903 unit_mask = s390_get_unit_mask (insn, &units);
13904 gcc_assert (units <= MAX_SCHED_UNITS);
13906 /* Add a score in range 0..MAX_SCHED_MIX_SCORE depending on how long
13907 ago the last insn of this unit type got scheduled. This is
13908 supposed to help providing a proper instruction mix to the
13909 CPU. */
13910 for (i = 0; i < units; i++, m <<= 1)
13911 if (m & unit_mask)
13912 score += (last_scheduled_unit_distance[i] * MAX_SCHED_MIX_SCORE /
13913 MAX_SCHED_MIX_DISTANCE);
13915 return score;
13918 /* This function is called via hook TARGET_SCHED_REORDER before
13919 issuing one insn from list READY which contains *NREADYP entries.
13920 For target z10 it reorders load instructions to avoid early load
13921 conflicts in the floating point pipeline */
13922 static int
13923 s390_sched_reorder (FILE *file, int verbose,
13924 rtx_insn **ready, int *nreadyp, int clock ATTRIBUTE_UNUSED)
13926 if (s390_tune == PROCESSOR_2097_Z10
13927 && reload_completed
13928 && *nreadyp > 1)
13929 s390_z10_prevent_earlyload_conflicts (ready, nreadyp);
13931 if (s390_tune >= PROCESSOR_2827_ZEC12
13932 && reload_completed
13933 && *nreadyp > 1)
13935 int i;
13936 int last_index = *nreadyp - 1;
13937 int max_index = -1;
13938 int max_score = -1;
13939 rtx_insn *tmp;
13941 /* Just move the insn with the highest score to the top (the
13942 end) of the list. A full sort is not needed since a conflict
13943 in the hazard recognition cannot happen. So the top insn in
13944 the ready list will always be taken. */
13945 for (i = last_index; i >= 0; i--)
13947 int score;
13949 if (recog_memoized (ready[i]) < 0)
13950 continue;
13952 score = s390_sched_score (ready[i]);
13953 if (score > max_score)
13955 max_score = score;
13956 max_index = i;
13960 if (max_index != -1)
13962 if (max_index != last_index)
13964 tmp = ready[max_index];
13965 ready[max_index] = ready[last_index];
13966 ready[last_index] = tmp;
13968 if (verbose > 5)
13969 fprintf (file,
13970 ";;\t\tBACKEND: move insn %d to the top of list\n",
13971 INSN_UID (ready[last_index]));
13973 else if (verbose > 5)
13974 fprintf (file,
13975 ";;\t\tBACKEND: best insn %d already on top\n",
13976 INSN_UID (ready[last_index]));
13979 if (verbose > 5)
13981 fprintf (file, "ready list ooo attributes - sched state: %d\n",
13982 s390_sched_state);
13984 for (i = last_index; i >= 0; i--)
13986 unsigned int sched_mask;
13987 rtx_insn *insn = ready[i];
13989 if (recog_memoized (insn) < 0)
13990 continue;
13992 sched_mask = s390_get_sched_attrmask (insn);
13993 fprintf (file, ";;\t\tBACKEND: insn %d score: %d: ",
13994 INSN_UID (insn),
13995 s390_sched_score (insn));
13996 #define PRINT_SCHED_ATTR(M, ATTR) fprintf (file, "%s ",\
13997 ((M) & sched_mask) ? #ATTR : "");
13998 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_CRACKED, cracked);
13999 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_EXPANDED, expanded);
14000 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_ENDGROUP, endgroup);
14001 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_GROUPALONE, groupalone);
14002 #undef PRINT_SCHED_ATTR
14003 if (s390_tune == PROCESSOR_2964_Z13)
14005 unsigned int unit_mask, m = 1;
14006 int units, j;
14008 unit_mask = s390_get_unit_mask (insn, &units);
14009 fprintf (file, "(units:");
14010 for (j = 0; j < units; j++, m <<= 1)
14011 if (m & unit_mask)
14012 fprintf (file, " u%d", j);
14013 fprintf (file, ")");
14015 fprintf (file, "\n");
14020 return s390_issue_rate ();
14024 /* This function is called via hook TARGET_SCHED_VARIABLE_ISSUE after
14025 the scheduler has issued INSN. It stores the last issued insn into
14026 last_scheduled_insn in order to make it available for
14027 s390_sched_reorder. */
14028 static int
14029 s390_sched_variable_issue (FILE *file, int verbose, rtx_insn *insn, int more)
14031 last_scheduled_insn = insn;
14033 if (s390_tune >= PROCESSOR_2827_ZEC12
14034 && reload_completed
14035 && recog_memoized (insn) >= 0)
14037 unsigned int mask = s390_get_sched_attrmask (insn);
14039 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) != 0
14040 || (mask & S390_SCHED_ATTR_MASK_EXPANDED) != 0)
14041 s390_sched_state = S390_SCHED_STATE_CRACKED;
14042 else if ((mask & S390_SCHED_ATTR_MASK_ENDGROUP) != 0
14043 || (mask & S390_SCHED_ATTR_MASK_GROUPALONE) != 0)
14044 s390_sched_state = S390_SCHED_STATE_NORMAL;
14045 else
14047 /* Only normal insns are left (mask == 0). */
14048 switch (s390_sched_state)
14050 case 0:
14051 case 1:
14052 case 2:
14053 case S390_SCHED_STATE_NORMAL:
14054 if (s390_sched_state == S390_SCHED_STATE_NORMAL)
14055 s390_sched_state = 1;
14056 else
14057 s390_sched_state++;
14059 break;
14060 case S390_SCHED_STATE_CRACKED:
14061 s390_sched_state = S390_SCHED_STATE_NORMAL;
14062 break;
14066 if (s390_tune == PROCESSOR_2964_Z13)
14068 int units, i;
14069 unsigned unit_mask, m = 1;
14071 unit_mask = s390_get_unit_mask (insn, &units);
14072 gcc_assert (units <= MAX_SCHED_UNITS);
14074 for (i = 0; i < units; i++, m <<= 1)
14075 if (m & unit_mask)
14076 last_scheduled_unit_distance[i] = 0;
14077 else if (last_scheduled_unit_distance[i] < MAX_SCHED_MIX_DISTANCE)
14078 last_scheduled_unit_distance[i]++;
14081 if (verbose > 5)
14083 unsigned int sched_mask;
14085 sched_mask = s390_get_sched_attrmask (insn);
14087 fprintf (file, ";;\t\tBACKEND: insn %d: ", INSN_UID (insn));
14088 #define PRINT_SCHED_ATTR(M, ATTR) fprintf (file, "%s ", ((M) & sched_mask) ? #ATTR : "");
14089 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_CRACKED, cracked);
14090 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_EXPANDED, expanded);
14091 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_ENDGROUP, endgroup);
14092 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_GROUPALONE, groupalone);
14093 #undef PRINT_SCHED_ATTR
14095 if (s390_tune == PROCESSOR_2964_Z13)
14097 unsigned int unit_mask, m = 1;
14098 int units, j;
14100 unit_mask = s390_get_unit_mask (insn, &units);
14101 fprintf (file, "(units:");
14102 for (j = 0; j < units; j++, m <<= 1)
14103 if (m & unit_mask)
14104 fprintf (file, " %d", j);
14105 fprintf (file, ")");
14107 fprintf (file, " sched state: %d\n", s390_sched_state);
14109 if (s390_tune == PROCESSOR_2964_Z13)
14111 int units, j;
14113 s390_get_unit_mask (insn, &units);
14115 fprintf (file, ";;\t\tBACKEND: units unused for: ");
14116 for (j = 0; j < units; j++)
14117 fprintf (file, "%d:%d ", j, last_scheduled_unit_distance[j]);
14118 fprintf (file, "\n");
14123 if (GET_CODE (PATTERN (insn)) != USE
14124 && GET_CODE (PATTERN (insn)) != CLOBBER)
14125 return more - 1;
14126 else
14127 return more;
14130 static void
14131 s390_sched_init (FILE *file ATTRIBUTE_UNUSED,
14132 int verbose ATTRIBUTE_UNUSED,
14133 int max_ready ATTRIBUTE_UNUSED)
14135 last_scheduled_insn = NULL;
14136 memset (last_scheduled_unit_distance, 0, MAX_SCHED_UNITS * sizeof (int));
14137 s390_sched_state = 0;
14140 /* This target hook implementation for TARGET_LOOP_UNROLL_ADJUST calculates
14141 a new number struct loop *loop should be unrolled if tuned for cpus with
14142 a built-in stride prefetcher.
14143 The loop is analyzed for memory accesses by calling check_dpu for
14144 each rtx of the loop. Depending on the loop_depth and the amount of
14145 memory accesses a new number <=nunroll is returned to improve the
14146 behavior of the hardware prefetch unit. */
14147 static unsigned
14148 s390_loop_unroll_adjust (unsigned nunroll, struct loop *loop)
14150 basic_block *bbs;
14151 rtx_insn *insn;
14152 unsigned i;
14153 unsigned mem_count = 0;
14155 if (s390_tune < PROCESSOR_2097_Z10)
14156 return nunroll;
14158 /* Count the number of memory references within the loop body. */
14159 bbs = get_loop_body (loop);
14160 subrtx_iterator::array_type array;
14161 for (i = 0; i < loop->num_nodes; i++)
14162 FOR_BB_INSNS (bbs[i], insn)
14163 if (INSN_P (insn) && INSN_CODE (insn) != -1)
14164 FOR_EACH_SUBRTX (iter, array, PATTERN (insn), NONCONST)
14165 if (MEM_P (*iter))
14166 mem_count += 1;
14167 free (bbs);
14169 /* Prevent division by zero, and we do not need to adjust nunroll in this case. */
14170 if (mem_count == 0)
14171 return nunroll;
14173 switch (loop_depth(loop))
14175 case 1:
14176 return MIN (nunroll, 28 / mem_count);
14177 case 2:
14178 return MIN (nunroll, 22 / mem_count);
14179 default:
14180 return MIN (nunroll, 16 / mem_count);
14184 /* Restore the current options. This is a hook function and also called
14185 internally. */
14187 static void
14188 s390_function_specific_restore (struct gcc_options *opts,
14189 struct cl_target_option *ptr ATTRIBUTE_UNUSED)
14191 opts->x_s390_cost_pointer = (long)processor_table[opts->x_s390_tune].cost;
14194 static void
14195 s390_option_override_internal (bool main_args_p,
14196 struct gcc_options *opts,
14197 const struct gcc_options *opts_set)
14199 const char *prefix;
14200 const char *suffix;
14202 /* Set up prefix/suffix so the error messages refer to either the command
14203 line argument, or the attribute(target). */
14204 if (main_args_p)
14206 prefix = "-m";
14207 suffix = "";
14209 else
14211 prefix = "option(\"";
14212 suffix = "\")";
14216 /* Architecture mode defaults according to ABI. */
14217 if (!(opts_set->x_target_flags & MASK_ZARCH))
14219 if (TARGET_64BIT)
14220 opts->x_target_flags |= MASK_ZARCH;
14221 else
14222 opts->x_target_flags &= ~MASK_ZARCH;
14225 /* Set the march default in case it hasn't been specified on cmdline. */
14226 if (!opts_set->x_s390_arch)
14227 opts->x_s390_arch = PROCESSOR_2064_Z900;
14228 else if (opts->x_s390_arch == PROCESSOR_9672_G5
14229 || opts->x_s390_arch == PROCESSOR_9672_G6)
14230 warning (OPT_Wdeprecated, "%sarch=%s%s is deprecated and will be removed "
14231 "in future releases; use at least %sarch=z900%s",
14232 prefix, opts->x_s390_arch == PROCESSOR_9672_G5 ? "g5" : "g6",
14233 suffix, prefix, suffix);
14235 opts->x_s390_arch_flags = processor_flags_table[(int) opts->x_s390_arch];
14237 /* Determine processor to tune for. */
14238 if (!opts_set->x_s390_tune)
14239 opts->x_s390_tune = opts->x_s390_arch;
14240 else if (opts->x_s390_tune == PROCESSOR_9672_G5
14241 || opts->x_s390_tune == PROCESSOR_9672_G6)
14242 warning (OPT_Wdeprecated, "%stune=%s%s is deprecated and will be removed "
14243 "in future releases; use at least %stune=z900%s",
14244 prefix, opts->x_s390_tune == PROCESSOR_9672_G5 ? "g5" : "g6",
14245 suffix, prefix, suffix);
14247 opts->x_s390_tune_flags = processor_flags_table[opts->x_s390_tune];
14249 /* Sanity checks. */
14250 if (opts->x_s390_arch == PROCESSOR_NATIVE
14251 || opts->x_s390_tune == PROCESSOR_NATIVE)
14252 gcc_unreachable ();
14253 if (TARGET_ZARCH_P (opts->x_target_flags) && !TARGET_CPU_ZARCH_P (opts))
14254 error ("z/Architecture mode not supported on %s",
14255 processor_table[(int)opts->x_s390_arch].name);
14256 if (TARGET_64BIT && !TARGET_ZARCH_P (opts->x_target_flags))
14257 error ("64-bit ABI not supported in ESA/390 mode");
14259 /* Enable hardware transactions if available and not explicitly
14260 disabled by user. E.g. with -m31 -march=zEC12 -mzarch */
14261 if (!TARGET_OPT_HTM_P (opts_set->x_target_flags))
14263 if (TARGET_CPU_HTM_P (opts) && TARGET_ZARCH_P (opts->x_target_flags))
14264 opts->x_target_flags |= MASK_OPT_HTM;
14265 else
14266 opts->x_target_flags &= ~MASK_OPT_HTM;
14269 if (TARGET_OPT_VX_P (opts_set->x_target_flags))
14271 if (TARGET_OPT_VX_P (opts->x_target_flags))
14273 if (!TARGET_CPU_VX_P (opts))
14274 error ("hardware vector support not available on %s",
14275 processor_table[(int)opts->x_s390_arch].name);
14276 if (TARGET_SOFT_FLOAT_P (opts->x_target_flags))
14277 error ("hardware vector support not available with -msoft-float");
14280 else
14282 if (TARGET_CPU_VX_P (opts))
14283 /* Enable vector support if available and not explicitly disabled
14284 by user. E.g. with -m31 -march=z13 -mzarch */
14285 opts->x_target_flags |= MASK_OPT_VX;
14286 else
14287 opts->x_target_flags &= ~MASK_OPT_VX;
14290 /* Use hardware DFP if available and not explicitly disabled by
14291 user. E.g. with -m31 -march=z10 -mzarch */
14292 if (!TARGET_HARD_DFP_P (opts_set->x_target_flags))
14294 if (TARGET_DFP_P (opts))
14295 opts->x_target_flags |= MASK_HARD_DFP;
14296 else
14297 opts->x_target_flags &= ~MASK_HARD_DFP;
14300 if (TARGET_HARD_DFP_P (opts->x_target_flags) && !TARGET_DFP_P (opts))
14302 if (TARGET_HARD_DFP_P (opts_set->x_target_flags))
14304 if (!TARGET_CPU_DFP_P (opts))
14305 error ("hardware decimal floating point instructions"
14306 " not available on %s",
14307 processor_table[(int)opts->x_s390_arch].name);
14308 if (!TARGET_ZARCH_P (opts->x_target_flags))
14309 error ("hardware decimal floating point instructions"
14310 " not available in ESA/390 mode");
14312 else
14313 opts->x_target_flags &= ~MASK_HARD_DFP;
14316 if (TARGET_SOFT_FLOAT_P (opts_set->x_target_flags)
14317 && TARGET_SOFT_FLOAT_P (opts->x_target_flags))
14319 if (TARGET_HARD_DFP_P (opts_set->x_target_flags)
14320 && TARGET_HARD_DFP_P (opts->x_target_flags))
14321 error ("-mhard-dfp can%'t be used in conjunction with -msoft-float");
14323 opts->x_target_flags &= ~MASK_HARD_DFP;
14326 if (TARGET_BACKCHAIN_P (opts->x_target_flags)
14327 && TARGET_PACKED_STACK_P (opts->x_target_flags)
14328 && TARGET_HARD_FLOAT_P (opts->x_target_flags))
14329 error ("-mbackchain -mpacked-stack -mhard-float are not supported "
14330 "in combination");
14332 if (opts->x_s390_stack_size)
14334 if (opts->x_s390_stack_guard >= opts->x_s390_stack_size)
14335 error ("stack size must be greater than the stack guard value");
14336 else if (opts->x_s390_stack_size > 1 << 16)
14337 error ("stack size must not be greater than 64k");
14339 else if (opts->x_s390_stack_guard)
14340 error ("-mstack-guard implies use of -mstack-size");
14342 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
14343 if (!TARGET_LONG_DOUBLE_128_P (opts_set->x_target_flags))
14344 opts->x_target_flags |= MASK_LONG_DOUBLE_128;
14345 #endif
14347 if (opts->x_s390_tune >= PROCESSOR_2097_Z10)
14349 maybe_set_param_value (PARAM_MAX_UNROLLED_INSNS, 100,
14350 opts->x_param_values,
14351 opts_set->x_param_values);
14352 maybe_set_param_value (PARAM_MAX_UNROLL_TIMES, 32,
14353 opts->x_param_values,
14354 opts_set->x_param_values);
14355 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 2000,
14356 opts->x_param_values,
14357 opts_set->x_param_values);
14358 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEEL_TIMES, 64,
14359 opts->x_param_values,
14360 opts_set->x_param_values);
14363 maybe_set_param_value (PARAM_MAX_PENDING_LIST_LENGTH, 256,
14364 opts->x_param_values,
14365 opts_set->x_param_values);
14366 /* values for loop prefetching */
14367 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, 256,
14368 opts->x_param_values,
14369 opts_set->x_param_values);
14370 maybe_set_param_value (PARAM_L1_CACHE_SIZE, 128,
14371 opts->x_param_values,
14372 opts_set->x_param_values);
14373 /* s390 has more than 2 levels and the size is much larger. Since
14374 we are always running virtualized assume that we only get a small
14375 part of the caches above l1. */
14376 maybe_set_param_value (PARAM_L2_CACHE_SIZE, 1500,
14377 opts->x_param_values,
14378 opts_set->x_param_values);
14379 maybe_set_param_value (PARAM_PREFETCH_MIN_INSN_TO_MEM_RATIO, 2,
14380 opts->x_param_values,
14381 opts_set->x_param_values);
14382 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 6,
14383 opts->x_param_values,
14384 opts_set->x_param_values);
14386 /* Use the alternative scheduling-pressure algorithm by default. */
14387 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM, 2,
14388 opts->x_param_values,
14389 opts_set->x_param_values);
14391 /* Call target specific restore function to do post-init work. At the moment,
14392 this just sets opts->x_s390_cost_pointer. */
14393 s390_function_specific_restore (opts, NULL);
14396 static void
14397 s390_option_override (void)
14399 unsigned int i;
14400 cl_deferred_option *opt;
14401 vec<cl_deferred_option> *v =
14402 (vec<cl_deferred_option> *) s390_deferred_options;
14404 if (v)
14405 FOR_EACH_VEC_ELT (*v, i, opt)
14407 switch (opt->opt_index)
14409 case OPT_mhotpatch_:
14411 int val1;
14412 int val2;
14413 char s[256];
14414 char *t;
14416 strncpy (s, opt->arg, 256);
14417 s[255] = 0;
14418 t = strchr (s, ',');
14419 if (t != NULL)
14421 *t = 0;
14422 t++;
14423 val1 = integral_argument (s);
14424 val2 = integral_argument (t);
14426 else
14428 val1 = -1;
14429 val2 = -1;
14431 if (val1 == -1 || val2 == -1)
14433 /* argument is not a plain number */
14434 error ("arguments to %qs should be non-negative integers",
14435 "-mhotpatch=n,m");
14436 break;
14438 else if (val1 > s390_hotpatch_hw_max
14439 || val2 > s390_hotpatch_hw_max)
14441 error ("argument to %qs is too large (max. %d)",
14442 "-mhotpatch=n,m", s390_hotpatch_hw_max);
14443 break;
14445 s390_hotpatch_hw_before_label = val1;
14446 s390_hotpatch_hw_after_label = val2;
14447 break;
14449 default:
14450 gcc_unreachable ();
14454 /* Set up function hooks. */
14455 init_machine_status = s390_init_machine_status;
14457 s390_option_override_internal (true, &global_options, &global_options_set);
14459 /* Save the initial options in case the user does function specific
14460 options. */
14461 target_option_default_node = build_target_option_node (&global_options);
14462 target_option_current_node = target_option_default_node;
14464 /* This cannot reside in s390_option_optimization_table since HAVE_prefetch
14465 requires the arch flags to be evaluated already. Since prefetching
14466 is beneficial on s390, we enable it if available. */
14467 if (flag_prefetch_loop_arrays < 0 && HAVE_prefetch && optimize >= 3)
14468 flag_prefetch_loop_arrays = 1;
14470 if (TARGET_TPF)
14472 /* Don't emit DWARF3/4 unless specifically selected. The TPF
14473 debuggers do not yet support DWARF 3/4. */
14474 if (!global_options_set.x_dwarf_strict)
14475 dwarf_strict = 1;
14476 if (!global_options_set.x_dwarf_version)
14477 dwarf_version = 2;
14480 /* Register a target-specific optimization-and-lowering pass
14481 to run immediately before prologue and epilogue generation.
14483 Registering the pass must be done at start up. It's
14484 convenient to do it here. */
14485 opt_pass *new_pass = new pass_s390_early_mach (g);
14486 struct register_pass_info insert_pass_s390_early_mach =
14488 new_pass, /* pass */
14489 "pro_and_epilogue", /* reference_pass_name */
14490 1, /* ref_pass_instance_number */
14491 PASS_POS_INSERT_BEFORE /* po_op */
14493 register_pass (&insert_pass_s390_early_mach);
14496 #if S390_USE_TARGET_ATTRIBUTE
14497 /* Inner function to process the attribute((target(...))), take an argument and
14498 set the current options from the argument. If we have a list, recursively go
14499 over the list. */
14501 static bool
14502 s390_valid_target_attribute_inner_p (tree args,
14503 struct gcc_options *opts,
14504 struct gcc_options *new_opts_set,
14505 bool force_pragma)
14507 char *next_optstr;
14508 bool ret = true;
14510 #define S390_ATTRIB(S,O,A) { S, sizeof (S)-1, O, A, 0 }
14511 #define S390_PRAGMA(S,O,A) { S, sizeof (S)-1, O, A, 1 }
14512 static const struct
14514 const char *string;
14515 size_t len;
14516 int opt;
14517 int has_arg;
14518 int only_as_pragma;
14519 } attrs[] = {
14520 /* enum options */
14521 S390_ATTRIB ("arch=", OPT_march_, 1),
14522 S390_ATTRIB ("tune=", OPT_mtune_, 1),
14523 /* uinteger options */
14524 S390_ATTRIB ("stack-guard=", OPT_mstack_guard_, 1),
14525 S390_ATTRIB ("stack-size=", OPT_mstack_size_, 1),
14526 S390_ATTRIB ("branch-cost=", OPT_mbranch_cost_, 1),
14527 S390_ATTRIB ("warn-framesize=", OPT_mwarn_framesize_, 1),
14528 /* flag options */
14529 S390_ATTRIB ("backchain", OPT_mbackchain, 0),
14530 S390_ATTRIB ("hard-dfp", OPT_mhard_dfp, 0),
14531 S390_ATTRIB ("hard-float", OPT_mhard_float, 0),
14532 S390_ATTRIB ("htm", OPT_mhtm, 0),
14533 S390_ATTRIB ("vx", OPT_mvx, 0),
14534 S390_ATTRIB ("packed-stack", OPT_mpacked_stack, 0),
14535 S390_ATTRIB ("small-exec", OPT_msmall_exec, 0),
14536 S390_ATTRIB ("soft-float", OPT_msoft_float, 0),
14537 S390_ATTRIB ("mvcle", OPT_mmvcle, 0),
14538 S390_PRAGMA ("zvector", OPT_mzvector, 0),
14539 /* boolean options */
14540 S390_ATTRIB ("warn-dynamicstack", OPT_mwarn_dynamicstack, 0),
14542 #undef S390_ATTRIB
14543 #undef S390_PRAGMA
14545 /* If this is a list, recurse to get the options. */
14546 if (TREE_CODE (args) == TREE_LIST)
14548 bool ret = true;
14549 int num_pragma_values;
14550 int i;
14552 /* Note: attribs.c:decl_attributes prepends the values from
14553 current_target_pragma to the list of target attributes. To determine
14554 whether we're looking at a value of the attribute or the pragma we
14555 assume that the first [list_length (current_target_pragma)] values in
14556 the list are the values from the pragma. */
14557 num_pragma_values = (!force_pragma && current_target_pragma != NULL)
14558 ? list_length (current_target_pragma) : 0;
14559 for (i = 0; args; args = TREE_CHAIN (args), i++)
14561 bool is_pragma;
14563 is_pragma = (force_pragma || i < num_pragma_values);
14564 if (TREE_VALUE (args)
14565 && !s390_valid_target_attribute_inner_p (TREE_VALUE (args),
14566 opts, new_opts_set,
14567 is_pragma))
14569 ret = false;
14572 return ret;
14575 else if (TREE_CODE (args) != STRING_CST)
14577 error ("attribute %<target%> argument not a string");
14578 return false;
14581 /* Handle multiple arguments separated by commas. */
14582 next_optstr = ASTRDUP (TREE_STRING_POINTER (args));
14584 while (next_optstr && *next_optstr != '\0')
14586 char *p = next_optstr;
14587 char *orig_p = p;
14588 char *comma = strchr (next_optstr, ',');
14589 size_t len, opt_len;
14590 int opt;
14591 bool opt_set_p;
14592 char ch;
14593 unsigned i;
14594 int mask = 0;
14595 enum cl_var_type var_type;
14596 bool found;
14598 if (comma)
14600 *comma = '\0';
14601 len = comma - next_optstr;
14602 next_optstr = comma + 1;
14604 else
14606 len = strlen (p);
14607 next_optstr = NULL;
14610 /* Recognize no-xxx. */
14611 if (len > 3 && p[0] == 'n' && p[1] == 'o' && p[2] == '-')
14613 opt_set_p = false;
14614 p += 3;
14615 len -= 3;
14617 else
14618 opt_set_p = true;
14620 /* Find the option. */
14621 ch = *p;
14622 found = false;
14623 for (i = 0; i < ARRAY_SIZE (attrs); i++)
14625 opt_len = attrs[i].len;
14626 if (ch == attrs[i].string[0]
14627 && ((attrs[i].has_arg) ? len > opt_len : len == opt_len)
14628 && memcmp (p, attrs[i].string, opt_len) == 0)
14630 opt = attrs[i].opt;
14631 if (!opt_set_p && cl_options[opt].cl_reject_negative)
14632 continue;
14633 mask = cl_options[opt].var_value;
14634 var_type = cl_options[opt].var_type;
14635 found = true;
14636 break;
14640 /* Process the option. */
14641 if (!found)
14643 error ("attribute(target(\"%s\")) is unknown", orig_p);
14644 return false;
14646 else if (attrs[i].only_as_pragma && !force_pragma)
14648 /* Value is not allowed for the target attribute. */
14649 error ("Value %qs is not supported by attribute %<target%>",
14650 attrs[i].string);
14651 return false;
14654 else if (var_type == CLVC_BIT_SET || var_type == CLVC_BIT_CLEAR)
14656 if (var_type == CLVC_BIT_CLEAR)
14657 opt_set_p = !opt_set_p;
14659 if (opt_set_p)
14660 opts->x_target_flags |= mask;
14661 else
14662 opts->x_target_flags &= ~mask;
14663 new_opts_set->x_target_flags |= mask;
14666 else if (cl_options[opt].var_type == CLVC_BOOLEAN)
14668 int value;
14670 if (cl_options[opt].cl_uinteger)
14672 /* Unsigned integer argument. Code based on the function
14673 decode_cmdline_option () in opts-common.c. */
14674 value = integral_argument (p + opt_len);
14676 else
14677 value = (opt_set_p) ? 1 : 0;
14679 if (value != -1)
14681 struct cl_decoded_option decoded;
14683 /* Value range check; only implemented for numeric and boolean
14684 options at the moment. */
14685 generate_option (opt, NULL, value, CL_TARGET, &decoded);
14686 s390_handle_option (opts, new_opts_set, &decoded, input_location);
14687 set_option (opts, new_opts_set, opt, value,
14688 p + opt_len, DK_UNSPECIFIED, input_location,
14689 global_dc);
14691 else
14693 error ("attribute(target(\"%s\")) is unknown", orig_p);
14694 ret = false;
14698 else if (cl_options[opt].var_type == CLVC_ENUM)
14700 bool arg_ok;
14701 int value;
14703 arg_ok = opt_enum_arg_to_value (opt, p + opt_len, &value, CL_TARGET);
14704 if (arg_ok)
14705 set_option (opts, new_opts_set, opt, value,
14706 p + opt_len, DK_UNSPECIFIED, input_location,
14707 global_dc);
14708 else
14710 error ("attribute(target(\"%s\")) is unknown", orig_p);
14711 ret = false;
14715 else
14716 gcc_unreachable ();
14718 return ret;
14721 /* Return a TARGET_OPTION_NODE tree of the target options listed or NULL. */
14723 tree
14724 s390_valid_target_attribute_tree (tree args,
14725 struct gcc_options *opts,
14726 const struct gcc_options *opts_set,
14727 bool force_pragma)
14729 tree t = NULL_TREE;
14730 struct gcc_options new_opts_set;
14732 memset (&new_opts_set, 0, sizeof (new_opts_set));
14734 /* Process each of the options on the chain. */
14735 if (! s390_valid_target_attribute_inner_p (args, opts, &new_opts_set,
14736 force_pragma))
14737 return error_mark_node;
14739 /* If some option was set (even if it has not changed), rerun
14740 s390_option_override_internal, and then save the options away. */
14741 if (new_opts_set.x_target_flags
14742 || new_opts_set.x_s390_arch
14743 || new_opts_set.x_s390_tune
14744 || new_opts_set.x_s390_stack_guard
14745 || new_opts_set.x_s390_stack_size
14746 || new_opts_set.x_s390_branch_cost
14747 || new_opts_set.x_s390_warn_framesize
14748 || new_opts_set.x_s390_warn_dynamicstack_p)
14750 const unsigned char *src = (const unsigned char *)opts_set;
14751 unsigned char *dest = (unsigned char *)&new_opts_set;
14752 unsigned int i;
14754 /* Merge the original option flags into the new ones. */
14755 for (i = 0; i < sizeof(*opts_set); i++)
14756 dest[i] |= src[i];
14758 /* Do any overrides, such as arch=xxx, or tune=xxx support. */
14759 s390_option_override_internal (false, opts, &new_opts_set);
14760 /* Save the current options unless we are validating options for
14761 #pragma. */
14762 t = build_target_option_node (opts);
14764 return t;
14767 /* Hook to validate attribute((target("string"))). */
14769 static bool
14770 s390_valid_target_attribute_p (tree fndecl,
14771 tree ARG_UNUSED (name),
14772 tree args,
14773 int ARG_UNUSED (flags))
14775 struct gcc_options func_options;
14776 tree new_target, new_optimize;
14777 bool ret = true;
14779 /* attribute((target("default"))) does nothing, beyond
14780 affecting multi-versioning. */
14781 if (TREE_VALUE (args)
14782 && TREE_CODE (TREE_VALUE (args)) == STRING_CST
14783 && TREE_CHAIN (args) == NULL_TREE
14784 && strcmp (TREE_STRING_POINTER (TREE_VALUE (args)), "default") == 0)
14785 return true;
14787 tree old_optimize = build_optimization_node (&global_options);
14789 /* Get the optimization options of the current function. */
14790 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
14792 if (!func_optimize)
14793 func_optimize = old_optimize;
14795 /* Init func_options. */
14796 memset (&func_options, 0, sizeof (func_options));
14797 init_options_struct (&func_options, NULL);
14798 lang_hooks.init_options_struct (&func_options);
14800 cl_optimization_restore (&func_options, TREE_OPTIMIZATION (func_optimize));
14802 /* Initialize func_options to the default before its target options can
14803 be set. */
14804 cl_target_option_restore (&func_options,
14805 TREE_TARGET_OPTION (target_option_default_node));
14807 new_target = s390_valid_target_attribute_tree (args, &func_options,
14808 &global_options_set,
14809 (args ==
14810 current_target_pragma));
14811 new_optimize = build_optimization_node (&func_options);
14812 if (new_target == error_mark_node)
14813 ret = false;
14814 else if (fndecl && new_target)
14816 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
14817 if (old_optimize != new_optimize)
14818 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
14820 return ret;
14823 /* Restore targets globals from NEW_TREE and invalidate s390_previous_fndecl
14824 cache. */
14826 void
14827 s390_activate_target_options (tree new_tree)
14829 cl_target_option_restore (&global_options, TREE_TARGET_OPTION (new_tree));
14830 if (TREE_TARGET_GLOBALS (new_tree))
14831 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
14832 else if (new_tree == target_option_default_node)
14833 restore_target_globals (&default_target_globals);
14834 else
14835 TREE_TARGET_GLOBALS (new_tree) = save_target_globals_default_opts ();
14836 s390_previous_fndecl = NULL_TREE;
14839 /* Establish appropriate back-end context for processing the function
14840 FNDECL. The argument might be NULL to indicate processing at top
14841 level, outside of any function scope. */
14842 static void
14843 s390_set_current_function (tree fndecl)
14845 /* Only change the context if the function changes. This hook is called
14846 several times in the course of compiling a function, and we don't want to
14847 slow things down too much or call target_reinit when it isn't safe. */
14848 if (fndecl == s390_previous_fndecl)
14849 return;
14851 tree old_tree;
14852 if (s390_previous_fndecl == NULL_TREE)
14853 old_tree = target_option_current_node;
14854 else if (DECL_FUNCTION_SPECIFIC_TARGET (s390_previous_fndecl))
14855 old_tree = DECL_FUNCTION_SPECIFIC_TARGET (s390_previous_fndecl);
14856 else
14857 old_tree = target_option_default_node;
14859 if (fndecl == NULL_TREE)
14861 if (old_tree != target_option_current_node)
14862 s390_activate_target_options (target_option_current_node);
14863 return;
14866 tree new_tree = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
14867 if (new_tree == NULL_TREE)
14868 new_tree = target_option_default_node;
14870 if (old_tree != new_tree)
14871 s390_activate_target_options (new_tree);
14872 s390_previous_fndecl = fndecl;
14874 #endif
14876 /* Implement TARGET_USE_BY_PIECES_INFRASTRUCTURE_P. */
14878 static bool
14879 s390_use_by_pieces_infrastructure_p (unsigned HOST_WIDE_INT size,
14880 unsigned int align ATTRIBUTE_UNUSED,
14881 enum by_pieces_operation op ATTRIBUTE_UNUSED,
14882 bool speed_p ATTRIBUTE_UNUSED)
14884 return (size == 1 || size == 2
14885 || size == 4 || (TARGET_ZARCH && size == 8));
14888 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
14890 static void
14891 s390_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
14893 tree sfpc = s390_builtin_decls[S390_BUILTIN_s390_sfpc];
14894 tree efpc = s390_builtin_decls[S390_BUILTIN_s390_efpc];
14895 tree call_efpc = build_call_expr (efpc, 0);
14896 tree fenv_var = create_tmp_var_raw (unsigned_type_node);
14898 #define FPC_EXCEPTION_MASK HOST_WIDE_INT_UC (0xf8000000)
14899 #define FPC_FLAGS_MASK HOST_WIDE_INT_UC (0x00f80000)
14900 #define FPC_DXC_MASK HOST_WIDE_INT_UC (0x0000ff00)
14901 #define FPC_EXCEPTION_MASK_SHIFT HOST_WIDE_INT_UC (24)
14902 #define FPC_FLAGS_SHIFT HOST_WIDE_INT_UC (16)
14903 #define FPC_DXC_SHIFT HOST_WIDE_INT_UC (8)
14905 /* Generates the equivalent of feholdexcept (&fenv_var)
14907 fenv_var = __builtin_s390_efpc ();
14908 __builtin_s390_sfpc (fenv_var & mask) */
14909 tree old_fpc = build2 (MODIFY_EXPR, unsigned_type_node, fenv_var, call_efpc);
14910 tree new_fpc =
14911 build2 (BIT_AND_EXPR, unsigned_type_node, fenv_var,
14912 build_int_cst (unsigned_type_node,
14913 ~(FPC_DXC_MASK | FPC_FLAGS_MASK |
14914 FPC_EXCEPTION_MASK)));
14915 tree set_new_fpc = build_call_expr (sfpc, 1, new_fpc);
14916 *hold = build2 (COMPOUND_EXPR, void_type_node, old_fpc, set_new_fpc);
14918 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT)
14920 __builtin_s390_sfpc (__builtin_s390_efpc () & mask) */
14921 new_fpc = build2 (BIT_AND_EXPR, unsigned_type_node, call_efpc,
14922 build_int_cst (unsigned_type_node,
14923 ~(FPC_DXC_MASK | FPC_FLAGS_MASK)));
14924 *clear = build_call_expr (sfpc, 1, new_fpc);
14926 /* Generates the equivalent of feupdateenv (fenv_var)
14928 old_fpc = __builtin_s390_efpc ();
14929 __builtin_s390_sfpc (fenv_var);
14930 __atomic_feraiseexcept ((old_fpc & FPC_FLAGS_MASK) >> FPC_FLAGS_SHIFT); */
14932 old_fpc = create_tmp_var_raw (unsigned_type_node);
14933 tree store_old_fpc = build2 (MODIFY_EXPR, void_type_node,
14934 old_fpc, call_efpc);
14936 set_new_fpc = build_call_expr (sfpc, 1, fenv_var);
14938 tree raise_old_except = build2 (BIT_AND_EXPR, unsigned_type_node, old_fpc,
14939 build_int_cst (unsigned_type_node,
14940 FPC_FLAGS_MASK));
14941 raise_old_except = build2 (RSHIFT_EXPR, unsigned_type_node, raise_old_except,
14942 build_int_cst (unsigned_type_node,
14943 FPC_FLAGS_SHIFT));
14944 tree atomic_feraiseexcept
14945 = builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT);
14946 raise_old_except = build_call_expr (atomic_feraiseexcept,
14947 1, raise_old_except);
14949 *update = build2 (COMPOUND_EXPR, void_type_node,
14950 build2 (COMPOUND_EXPR, void_type_node,
14951 store_old_fpc, set_new_fpc),
14952 raise_old_except);
14954 #undef FPC_EXCEPTION_MASK
14955 #undef FPC_FLAGS_MASK
14956 #undef FPC_DXC_MASK
14957 #undef FPC_EXCEPTION_MASK_SHIFT
14958 #undef FPC_FLAGS_SHIFT
14959 #undef FPC_DXC_SHIFT
14962 /* Return the vector mode to be used for inner mode MODE when doing
14963 vectorization. */
14964 static machine_mode
14965 s390_preferred_simd_mode (machine_mode mode)
14967 if (TARGET_VX)
14968 switch (mode)
14970 case DFmode:
14971 return V2DFmode;
14972 case DImode:
14973 return V2DImode;
14974 case SImode:
14975 return V4SImode;
14976 case HImode:
14977 return V8HImode;
14978 case QImode:
14979 return V16QImode;
14980 default:;
14982 return word_mode;
14985 /* Our hardware does not require vectors to be strictly aligned. */
14986 static bool
14987 s390_support_vector_misalignment (machine_mode mode ATTRIBUTE_UNUSED,
14988 const_tree type ATTRIBUTE_UNUSED,
14989 int misalignment ATTRIBUTE_UNUSED,
14990 bool is_packed ATTRIBUTE_UNUSED)
14992 if (TARGET_VX)
14993 return true;
14995 return default_builtin_support_vector_misalignment (mode, type, misalignment,
14996 is_packed);
14999 /* The vector ABI requires vector types to be aligned on an 8 byte
15000 boundary (our stack alignment). However, we allow this to be
15001 overriden by the user, while this definitely breaks the ABI. */
15002 static HOST_WIDE_INT
15003 s390_vector_alignment (const_tree type)
15005 if (!TARGET_VX_ABI)
15006 return default_vector_alignment (type);
15008 if (TYPE_USER_ALIGN (type))
15009 return TYPE_ALIGN (type);
15011 return MIN (64, tree_to_shwi (TYPE_SIZE (type)));
15014 #ifdef HAVE_AS_MACHINE_MACHINEMODE
15015 /* Implement TARGET_ASM_FILE_START. */
15016 static void
15017 s390_asm_file_start (void)
15019 default_file_start ();
15020 s390_asm_output_machine_for_arch (asm_out_file);
15022 #endif
15024 /* Implement TARGET_ASM_FILE_END. */
15025 static void
15026 s390_asm_file_end (void)
15028 #ifdef HAVE_AS_GNU_ATTRIBUTE
15029 varpool_node *vnode;
15030 cgraph_node *cnode;
15032 FOR_EACH_VARIABLE (vnode)
15033 if (TREE_PUBLIC (vnode->decl))
15034 s390_check_type_for_vector_abi (TREE_TYPE (vnode->decl), false, false);
15036 FOR_EACH_FUNCTION (cnode)
15037 if (TREE_PUBLIC (cnode->decl))
15038 s390_check_type_for_vector_abi (TREE_TYPE (cnode->decl), false, false);
15041 if (s390_vector_abi != 0)
15042 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
15043 s390_vector_abi);
15044 #endif
15045 file_end_indicate_exec_stack ();
15047 if (flag_split_stack)
15048 file_end_indicate_split_stack ();
15051 /* Return true if TYPE is a vector bool type. */
15052 static inline bool
15053 s390_vector_bool_type_p (const_tree type)
15055 return TYPE_VECTOR_OPAQUE (type);
15058 /* Return the diagnostic message string if the binary operation OP is
15059 not permitted on TYPE1 and TYPE2, NULL otherwise. */
15060 static const char*
15061 s390_invalid_binary_op (int op ATTRIBUTE_UNUSED, const_tree type1, const_tree type2)
15063 bool bool1_p, bool2_p;
15064 bool plusminus_p;
15065 bool muldiv_p;
15066 bool compare_p;
15067 machine_mode mode1, mode2;
15069 if (!TARGET_ZVECTOR)
15070 return NULL;
15072 if (!VECTOR_TYPE_P (type1) || !VECTOR_TYPE_P (type2))
15073 return NULL;
15075 bool1_p = s390_vector_bool_type_p (type1);
15076 bool2_p = s390_vector_bool_type_p (type2);
15078 /* Mixing signed and unsigned types is forbidden for all
15079 operators. */
15080 if (!bool1_p && !bool2_p
15081 && TYPE_UNSIGNED (type1) != TYPE_UNSIGNED (type2))
15082 return N_("types differ in signess");
15084 plusminus_p = (op == PLUS_EXPR || op == MINUS_EXPR);
15085 muldiv_p = (op == MULT_EXPR || op == RDIV_EXPR || op == TRUNC_DIV_EXPR
15086 || op == CEIL_DIV_EXPR || op == FLOOR_DIV_EXPR
15087 || op == ROUND_DIV_EXPR);
15088 compare_p = (op == LT_EXPR || op == LE_EXPR || op == GT_EXPR || op == GE_EXPR
15089 || op == EQ_EXPR || op == NE_EXPR);
15091 if (bool1_p && bool2_p && (plusminus_p || muldiv_p))
15092 return N_("binary operator does not support two vector bool operands");
15094 if (bool1_p != bool2_p && (muldiv_p || compare_p))
15095 return N_("binary operator does not support vector bool operand");
15097 mode1 = TYPE_MODE (type1);
15098 mode2 = TYPE_MODE (type2);
15100 if (bool1_p != bool2_p && plusminus_p
15101 && (GET_MODE_CLASS (mode1) == MODE_VECTOR_FLOAT
15102 || GET_MODE_CLASS (mode2) == MODE_VECTOR_FLOAT))
15103 return N_("binary operator does not support mixing vector "
15104 "bool with floating point vector operands");
15106 return NULL;
15109 /* Initialize GCC target structure. */
15111 #undef TARGET_ASM_ALIGNED_HI_OP
15112 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
15113 #undef TARGET_ASM_ALIGNED_DI_OP
15114 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
15115 #undef TARGET_ASM_INTEGER
15116 #define TARGET_ASM_INTEGER s390_assemble_integer
15118 #undef TARGET_ASM_OPEN_PAREN
15119 #define TARGET_ASM_OPEN_PAREN ""
15121 #undef TARGET_ASM_CLOSE_PAREN
15122 #define TARGET_ASM_CLOSE_PAREN ""
15124 #undef TARGET_OPTION_OVERRIDE
15125 #define TARGET_OPTION_OVERRIDE s390_option_override
15127 #undef TARGET_ENCODE_SECTION_INFO
15128 #define TARGET_ENCODE_SECTION_INFO s390_encode_section_info
15130 #undef TARGET_SCALAR_MODE_SUPPORTED_P
15131 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
15133 #ifdef HAVE_AS_TLS
15134 #undef TARGET_HAVE_TLS
15135 #define TARGET_HAVE_TLS true
15136 #endif
15137 #undef TARGET_CANNOT_FORCE_CONST_MEM
15138 #define TARGET_CANNOT_FORCE_CONST_MEM s390_cannot_force_const_mem
15140 #undef TARGET_DELEGITIMIZE_ADDRESS
15141 #define TARGET_DELEGITIMIZE_ADDRESS s390_delegitimize_address
15143 #undef TARGET_LEGITIMIZE_ADDRESS
15144 #define TARGET_LEGITIMIZE_ADDRESS s390_legitimize_address
15146 #undef TARGET_RETURN_IN_MEMORY
15147 #define TARGET_RETURN_IN_MEMORY s390_return_in_memory
15149 #undef TARGET_INIT_BUILTINS
15150 #define TARGET_INIT_BUILTINS s390_init_builtins
15151 #undef TARGET_EXPAND_BUILTIN
15152 #define TARGET_EXPAND_BUILTIN s390_expand_builtin
15153 #undef TARGET_BUILTIN_DECL
15154 #define TARGET_BUILTIN_DECL s390_builtin_decl
15156 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
15157 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA s390_output_addr_const_extra
15159 #undef TARGET_ASM_OUTPUT_MI_THUNK
15160 #define TARGET_ASM_OUTPUT_MI_THUNK s390_output_mi_thunk
15161 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
15162 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
15164 #undef TARGET_SCHED_ADJUST_PRIORITY
15165 #define TARGET_SCHED_ADJUST_PRIORITY s390_adjust_priority
15166 #undef TARGET_SCHED_ISSUE_RATE
15167 #define TARGET_SCHED_ISSUE_RATE s390_issue_rate
15168 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
15169 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD s390_first_cycle_multipass_dfa_lookahead
15171 #undef TARGET_SCHED_VARIABLE_ISSUE
15172 #define TARGET_SCHED_VARIABLE_ISSUE s390_sched_variable_issue
15173 #undef TARGET_SCHED_REORDER
15174 #define TARGET_SCHED_REORDER s390_sched_reorder
15175 #undef TARGET_SCHED_INIT
15176 #define TARGET_SCHED_INIT s390_sched_init
15178 #undef TARGET_CANNOT_COPY_INSN_P
15179 #define TARGET_CANNOT_COPY_INSN_P s390_cannot_copy_insn_p
15180 #undef TARGET_RTX_COSTS
15181 #define TARGET_RTX_COSTS s390_rtx_costs
15182 #undef TARGET_ADDRESS_COST
15183 #define TARGET_ADDRESS_COST s390_address_cost
15184 #undef TARGET_REGISTER_MOVE_COST
15185 #define TARGET_REGISTER_MOVE_COST s390_register_move_cost
15186 #undef TARGET_MEMORY_MOVE_COST
15187 #define TARGET_MEMORY_MOVE_COST s390_memory_move_cost
15189 #undef TARGET_MACHINE_DEPENDENT_REORG
15190 #define TARGET_MACHINE_DEPENDENT_REORG s390_reorg
15192 #undef TARGET_VALID_POINTER_MODE
15193 #define TARGET_VALID_POINTER_MODE s390_valid_pointer_mode
15195 #undef TARGET_BUILD_BUILTIN_VA_LIST
15196 #define TARGET_BUILD_BUILTIN_VA_LIST s390_build_builtin_va_list
15197 #undef TARGET_EXPAND_BUILTIN_VA_START
15198 #define TARGET_EXPAND_BUILTIN_VA_START s390_va_start
15199 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
15200 #define TARGET_GIMPLIFY_VA_ARG_EXPR s390_gimplify_va_arg
15202 #undef TARGET_PROMOTE_FUNCTION_MODE
15203 #define TARGET_PROMOTE_FUNCTION_MODE s390_promote_function_mode
15204 #undef TARGET_PASS_BY_REFERENCE
15205 #define TARGET_PASS_BY_REFERENCE s390_pass_by_reference
15207 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
15208 #define TARGET_FUNCTION_OK_FOR_SIBCALL s390_function_ok_for_sibcall
15209 #undef TARGET_FUNCTION_ARG
15210 #define TARGET_FUNCTION_ARG s390_function_arg
15211 #undef TARGET_FUNCTION_ARG_ADVANCE
15212 #define TARGET_FUNCTION_ARG_ADVANCE s390_function_arg_advance
15213 #undef TARGET_FUNCTION_VALUE
15214 #define TARGET_FUNCTION_VALUE s390_function_value
15215 #undef TARGET_LIBCALL_VALUE
15216 #define TARGET_LIBCALL_VALUE s390_libcall_value
15217 #undef TARGET_STRICT_ARGUMENT_NAMING
15218 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
15220 #undef TARGET_KEEP_LEAF_WHEN_PROFILED
15221 #define TARGET_KEEP_LEAF_WHEN_PROFILED s390_keep_leaf_when_profiled
15223 #undef TARGET_FIXED_CONDITION_CODE_REGS
15224 #define TARGET_FIXED_CONDITION_CODE_REGS s390_fixed_condition_code_regs
15226 #undef TARGET_CC_MODES_COMPATIBLE
15227 #define TARGET_CC_MODES_COMPATIBLE s390_cc_modes_compatible
15229 #undef TARGET_INVALID_WITHIN_DOLOOP
15230 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_insn_null
15232 #ifdef HAVE_AS_TLS
15233 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
15234 #define TARGET_ASM_OUTPUT_DWARF_DTPREL s390_output_dwarf_dtprel
15235 #endif
15237 #undef TARGET_DWARF_FRAME_REG_MODE
15238 #define TARGET_DWARF_FRAME_REG_MODE s390_dwarf_frame_reg_mode
15240 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
15241 #undef TARGET_MANGLE_TYPE
15242 #define TARGET_MANGLE_TYPE s390_mangle_type
15243 #endif
15245 #undef TARGET_SCALAR_MODE_SUPPORTED_P
15246 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
15248 #undef TARGET_VECTOR_MODE_SUPPORTED_P
15249 #define TARGET_VECTOR_MODE_SUPPORTED_P s390_vector_mode_supported_p
15251 #undef TARGET_PREFERRED_RELOAD_CLASS
15252 #define TARGET_PREFERRED_RELOAD_CLASS s390_preferred_reload_class
15254 #undef TARGET_SECONDARY_RELOAD
15255 #define TARGET_SECONDARY_RELOAD s390_secondary_reload
15257 #undef TARGET_LIBGCC_CMP_RETURN_MODE
15258 #define TARGET_LIBGCC_CMP_RETURN_MODE s390_libgcc_cmp_return_mode
15260 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
15261 #define TARGET_LIBGCC_SHIFT_COUNT_MODE s390_libgcc_shift_count_mode
15263 #undef TARGET_LEGITIMATE_ADDRESS_P
15264 #define TARGET_LEGITIMATE_ADDRESS_P s390_legitimate_address_p
15266 #undef TARGET_LEGITIMATE_CONSTANT_P
15267 #define TARGET_LEGITIMATE_CONSTANT_P s390_legitimate_constant_p
15269 #undef TARGET_LRA_P
15270 #define TARGET_LRA_P s390_lra_p
15272 #undef TARGET_CAN_ELIMINATE
15273 #define TARGET_CAN_ELIMINATE s390_can_eliminate
15275 #undef TARGET_CONDITIONAL_REGISTER_USAGE
15276 #define TARGET_CONDITIONAL_REGISTER_USAGE s390_conditional_register_usage
15278 #undef TARGET_LOOP_UNROLL_ADJUST
15279 #define TARGET_LOOP_UNROLL_ADJUST s390_loop_unroll_adjust
15281 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
15282 #define TARGET_ASM_TRAMPOLINE_TEMPLATE s390_asm_trampoline_template
15283 #undef TARGET_TRAMPOLINE_INIT
15284 #define TARGET_TRAMPOLINE_INIT s390_trampoline_init
15286 #undef TARGET_UNWIND_WORD_MODE
15287 #define TARGET_UNWIND_WORD_MODE s390_unwind_word_mode
15289 #undef TARGET_CANONICALIZE_COMPARISON
15290 #define TARGET_CANONICALIZE_COMPARISON s390_canonicalize_comparison
15292 #undef TARGET_HARD_REGNO_SCRATCH_OK
15293 #define TARGET_HARD_REGNO_SCRATCH_OK s390_hard_regno_scratch_ok
15295 #undef TARGET_ATTRIBUTE_TABLE
15296 #define TARGET_ATTRIBUTE_TABLE s390_attribute_table
15298 #undef TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P
15299 #define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P hook_bool_const_tree_true
15301 #undef TARGET_SET_UP_BY_PROLOGUE
15302 #define TARGET_SET_UP_BY_PROLOGUE s300_set_up_by_prologue
15304 #undef TARGET_EXTRA_LIVE_ON_ENTRY
15305 #define TARGET_EXTRA_LIVE_ON_ENTRY s390_live_on_entry
15307 #undef TARGET_USE_BY_PIECES_INFRASTRUCTURE_P
15308 #define TARGET_USE_BY_PIECES_INFRASTRUCTURE_P \
15309 s390_use_by_pieces_infrastructure_p
15311 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
15312 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV s390_atomic_assign_expand_fenv
15314 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
15315 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN s390_invalid_arg_for_unprototyped_fn
15317 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
15318 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE s390_preferred_simd_mode
15320 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
15321 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT s390_support_vector_misalignment
15323 #undef TARGET_VECTOR_ALIGNMENT
15324 #define TARGET_VECTOR_ALIGNMENT s390_vector_alignment
15326 #undef TARGET_INVALID_BINARY_OP
15327 #define TARGET_INVALID_BINARY_OP s390_invalid_binary_op
15329 #ifdef HAVE_AS_MACHINE_MACHINEMODE
15330 #undef TARGET_ASM_FILE_START
15331 #define TARGET_ASM_FILE_START s390_asm_file_start
15332 #endif
15334 #undef TARGET_ASM_FILE_END
15335 #define TARGET_ASM_FILE_END s390_asm_file_end
15337 #if S390_USE_TARGET_ATTRIBUTE
15338 #undef TARGET_SET_CURRENT_FUNCTION
15339 #define TARGET_SET_CURRENT_FUNCTION s390_set_current_function
15341 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
15342 #define TARGET_OPTION_VALID_ATTRIBUTE_P s390_valid_target_attribute_p
15343 #endif
15345 #undef TARGET_OPTION_RESTORE
15346 #define TARGET_OPTION_RESTORE s390_function_specific_restore
15348 struct gcc_target targetm = TARGET_INITIALIZER;
15350 #include "gt-s390.h"