RISC-V: Error if function declared with different interrupt modes.
[official-gcc.git] / gcc / config / s390 / s390.c
blob23c3f3db621ae7791e7c2f03cf1118378b4892e2
1 /* Subroutines used for code generation on IBM S/390 and zSeries
2 Copyright (C) 1999-2018 Free Software Foundation, Inc.
3 Contributed by Hartmut Penner (hpenner@de.ibm.com) and
4 Ulrich Weigand (uweigand@de.ibm.com) and
5 Andreas Krebbel (Andreas.Krebbel@de.ibm.com).
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
23 #define IN_TARGET_CODE 1
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "backend.h"
29 #include "target.h"
30 #include "target-globals.h"
31 #include "rtl.h"
32 #include "tree.h"
33 #include "gimple.h"
34 #include "cfghooks.h"
35 #include "cfgloop.h"
36 #include "df.h"
37 #include "memmodel.h"
38 #include "tm_p.h"
39 #include "stringpool.h"
40 #include "attribs.h"
41 #include "expmed.h"
42 #include "optabs.h"
43 #include "regs.h"
44 #include "emit-rtl.h"
45 #include "recog.h"
46 #include "cgraph.h"
47 #include "diagnostic-core.h"
48 #include "diagnostic.h"
49 #include "alias.h"
50 #include "fold-const.h"
51 #include "print-tree.h"
52 #include "stor-layout.h"
53 #include "varasm.h"
54 #include "calls.h"
55 #include "conditions.h"
56 #include "output.h"
57 #include "insn-attr.h"
58 #include "flags.h"
59 #include "except.h"
60 #include "dojump.h"
61 #include "explow.h"
62 #include "stmt.h"
63 #include "expr.h"
64 #include "reload.h"
65 #include "cfgrtl.h"
66 #include "cfganal.h"
67 #include "lcm.h"
68 #include "cfgbuild.h"
69 #include "cfgcleanup.h"
70 #include "debug.h"
71 #include "langhooks.h"
72 #include "internal-fn.h"
73 #include "gimple-fold.h"
74 #include "tree-eh.h"
75 #include "gimplify.h"
76 #include "params.h"
77 #include "opts.h"
78 #include "tree-pass.h"
79 #include "context.h"
80 #include "builtins.h"
81 #include "rtl-iter.h"
82 #include "intl.h"
83 #include "tm-constrs.h"
84 #include "tree-vrp.h"
85 #include "symbol-summary.h"
86 #include "ipa-prop.h"
87 #include "ipa-fnsummary.h"
88 #include "sched-int.h"
90 /* This file should be included last. */
91 #include "target-def.h"
93 static bool s390_hard_regno_mode_ok (unsigned int, machine_mode);
95 /* Remember the last target of s390_set_current_function. */
96 static GTY(()) tree s390_previous_fndecl;
98 /* Define the specific costs for a given cpu. */
100 struct processor_costs
102 /* multiplication */
103 const int m; /* cost of an M instruction. */
104 const int mghi; /* cost of an MGHI instruction. */
105 const int mh; /* cost of an MH instruction. */
106 const int mhi; /* cost of an MHI instruction. */
107 const int ml; /* cost of an ML instruction. */
108 const int mr; /* cost of an MR instruction. */
109 const int ms; /* cost of an MS instruction. */
110 const int msg; /* cost of an MSG instruction. */
111 const int msgf; /* cost of an MSGF instruction. */
112 const int msgfr; /* cost of an MSGFR instruction. */
113 const int msgr; /* cost of an MSGR instruction. */
114 const int msr; /* cost of an MSR instruction. */
115 const int mult_df; /* cost of multiplication in DFmode. */
116 const int mxbr;
117 /* square root */
118 const int sqxbr; /* cost of square root in TFmode. */
119 const int sqdbr; /* cost of square root in DFmode. */
120 const int sqebr; /* cost of square root in SFmode. */
121 /* multiply and add */
122 const int madbr; /* cost of multiply and add in DFmode. */
123 const int maebr; /* cost of multiply and add in SFmode. */
124 /* division */
125 const int dxbr;
126 const int ddbr;
127 const int debr;
128 const int dlgr;
129 const int dlr;
130 const int dr;
131 const int dsgfr;
132 const int dsgr;
135 #define s390_cost ((const struct processor_costs *)(s390_cost_pointer))
137 static const
138 struct processor_costs z900_cost =
140 COSTS_N_INSNS (5), /* M */
141 COSTS_N_INSNS (10), /* MGHI */
142 COSTS_N_INSNS (5), /* MH */
143 COSTS_N_INSNS (4), /* MHI */
144 COSTS_N_INSNS (5), /* ML */
145 COSTS_N_INSNS (5), /* MR */
146 COSTS_N_INSNS (4), /* MS */
147 COSTS_N_INSNS (15), /* MSG */
148 COSTS_N_INSNS (7), /* MSGF */
149 COSTS_N_INSNS (7), /* MSGFR */
150 COSTS_N_INSNS (10), /* MSGR */
151 COSTS_N_INSNS (4), /* MSR */
152 COSTS_N_INSNS (7), /* multiplication in DFmode */
153 COSTS_N_INSNS (13), /* MXBR */
154 COSTS_N_INSNS (136), /* SQXBR */
155 COSTS_N_INSNS (44), /* SQDBR */
156 COSTS_N_INSNS (35), /* SQEBR */
157 COSTS_N_INSNS (18), /* MADBR */
158 COSTS_N_INSNS (13), /* MAEBR */
159 COSTS_N_INSNS (134), /* DXBR */
160 COSTS_N_INSNS (30), /* DDBR */
161 COSTS_N_INSNS (27), /* DEBR */
162 COSTS_N_INSNS (220), /* DLGR */
163 COSTS_N_INSNS (34), /* DLR */
164 COSTS_N_INSNS (34), /* DR */
165 COSTS_N_INSNS (32), /* DSGFR */
166 COSTS_N_INSNS (32), /* DSGR */
169 static const
170 struct processor_costs z990_cost =
172 COSTS_N_INSNS (4), /* M */
173 COSTS_N_INSNS (2), /* MGHI */
174 COSTS_N_INSNS (2), /* MH */
175 COSTS_N_INSNS (2), /* MHI */
176 COSTS_N_INSNS (4), /* ML */
177 COSTS_N_INSNS (4), /* MR */
178 COSTS_N_INSNS (5), /* MS */
179 COSTS_N_INSNS (6), /* MSG */
180 COSTS_N_INSNS (4), /* MSGF */
181 COSTS_N_INSNS (4), /* MSGFR */
182 COSTS_N_INSNS (4), /* MSGR */
183 COSTS_N_INSNS (4), /* MSR */
184 COSTS_N_INSNS (1), /* multiplication in DFmode */
185 COSTS_N_INSNS (28), /* MXBR */
186 COSTS_N_INSNS (130), /* SQXBR */
187 COSTS_N_INSNS (66), /* SQDBR */
188 COSTS_N_INSNS (38), /* SQEBR */
189 COSTS_N_INSNS (1), /* MADBR */
190 COSTS_N_INSNS (1), /* MAEBR */
191 COSTS_N_INSNS (60), /* DXBR */
192 COSTS_N_INSNS (40), /* DDBR */
193 COSTS_N_INSNS (26), /* DEBR */
194 COSTS_N_INSNS (176), /* DLGR */
195 COSTS_N_INSNS (31), /* DLR */
196 COSTS_N_INSNS (31), /* DR */
197 COSTS_N_INSNS (31), /* DSGFR */
198 COSTS_N_INSNS (31), /* DSGR */
201 static const
202 struct processor_costs z9_109_cost =
204 COSTS_N_INSNS (4), /* M */
205 COSTS_N_INSNS (2), /* MGHI */
206 COSTS_N_INSNS (2), /* MH */
207 COSTS_N_INSNS (2), /* MHI */
208 COSTS_N_INSNS (4), /* ML */
209 COSTS_N_INSNS (4), /* MR */
210 COSTS_N_INSNS (5), /* MS */
211 COSTS_N_INSNS (6), /* MSG */
212 COSTS_N_INSNS (4), /* MSGF */
213 COSTS_N_INSNS (4), /* MSGFR */
214 COSTS_N_INSNS (4), /* MSGR */
215 COSTS_N_INSNS (4), /* MSR */
216 COSTS_N_INSNS (1), /* multiplication in DFmode */
217 COSTS_N_INSNS (28), /* MXBR */
218 COSTS_N_INSNS (130), /* SQXBR */
219 COSTS_N_INSNS (66), /* SQDBR */
220 COSTS_N_INSNS (38), /* SQEBR */
221 COSTS_N_INSNS (1), /* MADBR */
222 COSTS_N_INSNS (1), /* MAEBR */
223 COSTS_N_INSNS (60), /* DXBR */
224 COSTS_N_INSNS (40), /* DDBR */
225 COSTS_N_INSNS (26), /* DEBR */
226 COSTS_N_INSNS (30), /* DLGR */
227 COSTS_N_INSNS (23), /* DLR */
228 COSTS_N_INSNS (23), /* DR */
229 COSTS_N_INSNS (24), /* DSGFR */
230 COSTS_N_INSNS (24), /* DSGR */
233 static const
234 struct processor_costs z10_cost =
236 COSTS_N_INSNS (10), /* M */
237 COSTS_N_INSNS (10), /* MGHI */
238 COSTS_N_INSNS (10), /* MH */
239 COSTS_N_INSNS (10), /* MHI */
240 COSTS_N_INSNS (10), /* ML */
241 COSTS_N_INSNS (10), /* MR */
242 COSTS_N_INSNS (10), /* MS */
243 COSTS_N_INSNS (10), /* MSG */
244 COSTS_N_INSNS (10), /* MSGF */
245 COSTS_N_INSNS (10), /* MSGFR */
246 COSTS_N_INSNS (10), /* MSGR */
247 COSTS_N_INSNS (10), /* MSR */
248 COSTS_N_INSNS (1) , /* multiplication in DFmode */
249 COSTS_N_INSNS (50), /* MXBR */
250 COSTS_N_INSNS (120), /* SQXBR */
251 COSTS_N_INSNS (52), /* SQDBR */
252 COSTS_N_INSNS (38), /* SQEBR */
253 COSTS_N_INSNS (1), /* MADBR */
254 COSTS_N_INSNS (1), /* MAEBR */
255 COSTS_N_INSNS (111), /* DXBR */
256 COSTS_N_INSNS (39), /* DDBR */
257 COSTS_N_INSNS (32), /* DEBR */
258 COSTS_N_INSNS (160), /* DLGR */
259 COSTS_N_INSNS (71), /* DLR */
260 COSTS_N_INSNS (71), /* DR */
261 COSTS_N_INSNS (71), /* DSGFR */
262 COSTS_N_INSNS (71), /* DSGR */
265 static const
266 struct processor_costs z196_cost =
268 COSTS_N_INSNS (7), /* M */
269 COSTS_N_INSNS (5), /* MGHI */
270 COSTS_N_INSNS (5), /* MH */
271 COSTS_N_INSNS (5), /* MHI */
272 COSTS_N_INSNS (7), /* ML */
273 COSTS_N_INSNS (7), /* MR */
274 COSTS_N_INSNS (6), /* MS */
275 COSTS_N_INSNS (8), /* MSG */
276 COSTS_N_INSNS (6), /* MSGF */
277 COSTS_N_INSNS (6), /* MSGFR */
278 COSTS_N_INSNS (8), /* MSGR */
279 COSTS_N_INSNS (6), /* MSR */
280 COSTS_N_INSNS (1) , /* multiplication in DFmode */
281 COSTS_N_INSNS (40), /* MXBR B+40 */
282 COSTS_N_INSNS (100), /* SQXBR B+100 */
283 COSTS_N_INSNS (42), /* SQDBR B+42 */
284 COSTS_N_INSNS (28), /* SQEBR B+28 */
285 COSTS_N_INSNS (1), /* MADBR B */
286 COSTS_N_INSNS (1), /* MAEBR B */
287 COSTS_N_INSNS (101), /* DXBR B+101 */
288 COSTS_N_INSNS (29), /* DDBR */
289 COSTS_N_INSNS (22), /* DEBR */
290 COSTS_N_INSNS (160), /* DLGR cracked */
291 COSTS_N_INSNS (160), /* DLR cracked */
292 COSTS_N_INSNS (160), /* DR expanded */
293 COSTS_N_INSNS (160), /* DSGFR cracked */
294 COSTS_N_INSNS (160), /* DSGR cracked */
297 static const
298 struct processor_costs zEC12_cost =
300 COSTS_N_INSNS (7), /* M */
301 COSTS_N_INSNS (5), /* MGHI */
302 COSTS_N_INSNS (5), /* MH */
303 COSTS_N_INSNS (5), /* MHI */
304 COSTS_N_INSNS (7), /* ML */
305 COSTS_N_INSNS (7), /* MR */
306 COSTS_N_INSNS (6), /* MS */
307 COSTS_N_INSNS (8), /* MSG */
308 COSTS_N_INSNS (6), /* MSGF */
309 COSTS_N_INSNS (6), /* MSGFR */
310 COSTS_N_INSNS (8), /* MSGR */
311 COSTS_N_INSNS (6), /* MSR */
312 COSTS_N_INSNS (1) , /* multiplication in DFmode */
313 COSTS_N_INSNS (40), /* MXBR B+40 */
314 COSTS_N_INSNS (100), /* SQXBR B+100 */
315 COSTS_N_INSNS (42), /* SQDBR B+42 */
316 COSTS_N_INSNS (28), /* SQEBR B+28 */
317 COSTS_N_INSNS (1), /* MADBR B */
318 COSTS_N_INSNS (1), /* MAEBR B */
319 COSTS_N_INSNS (131), /* DXBR B+131 */
320 COSTS_N_INSNS (29), /* DDBR */
321 COSTS_N_INSNS (22), /* DEBR */
322 COSTS_N_INSNS (160), /* DLGR cracked */
323 COSTS_N_INSNS (160), /* DLR cracked */
324 COSTS_N_INSNS (160), /* DR expanded */
325 COSTS_N_INSNS (160), /* DSGFR cracked */
326 COSTS_N_INSNS (160), /* DSGR cracked */
329 static struct
331 /* The preferred name to be used in user visible output. */
332 const char *const name;
333 /* CPU name as it should be passed to Binutils via .machine */
334 const char *const binutils_name;
335 const enum processor_type processor;
336 const struct processor_costs *cost;
338 const processor_table[] =
340 { "g5", "g5", PROCESSOR_9672_G5, &z900_cost },
341 { "g6", "g6", PROCESSOR_9672_G6, &z900_cost },
342 { "z900", "z900", PROCESSOR_2064_Z900, &z900_cost },
343 { "z990", "z990", PROCESSOR_2084_Z990, &z990_cost },
344 { "z9-109", "z9-109", PROCESSOR_2094_Z9_109, &z9_109_cost },
345 { "z9-ec", "z9-ec", PROCESSOR_2094_Z9_EC, &z9_109_cost },
346 { "z10", "z10", PROCESSOR_2097_Z10, &z10_cost },
347 { "z196", "z196", PROCESSOR_2817_Z196, &z196_cost },
348 { "zEC12", "zEC12", PROCESSOR_2827_ZEC12, &zEC12_cost },
349 { "z13", "z13", PROCESSOR_2964_Z13, &zEC12_cost },
350 { "z14", "arch12", PROCESSOR_3906_Z14, &zEC12_cost },
351 { "native", "", PROCESSOR_NATIVE, NULL }
354 extern int reload_completed;
356 /* Kept up to date using the SCHED_VARIABLE_ISSUE hook. */
357 static rtx_insn *last_scheduled_insn;
358 #define MAX_SCHED_UNITS 3
359 static int last_scheduled_unit_distance[MAX_SCHED_UNITS];
361 #define NUM_SIDES 2
362 static int current_side = 1;
363 #define LONGRUNNING_THRESHOLD 5
365 /* Estimate of number of cycles a long-running insn occupies an
366 execution unit. */
367 static unsigned fxu_longrunning[NUM_SIDES];
368 static unsigned vfu_longrunning[NUM_SIDES];
370 /* Factor to scale latencies by, determined by measurements. */
371 #define LATENCY_FACTOR 4
373 /* The maximum score added for an instruction whose unit hasn't been
374 in use for MAX_SCHED_MIX_DISTANCE steps. Increase this value to
375 give instruction mix scheduling more priority over instruction
376 grouping. */
377 #define MAX_SCHED_MIX_SCORE 8
379 /* The maximum distance up to which individual scores will be
380 calculated. Everything beyond this gives MAX_SCHED_MIX_SCORE.
381 Increase this with the OOO windows size of the machine. */
382 #define MAX_SCHED_MIX_DISTANCE 100
384 /* Structure used to hold the components of a S/390 memory
385 address. A legitimate address on S/390 is of the general
386 form
387 base + index + displacement
388 where any of the components is optional.
390 base and index are registers of the class ADDR_REGS,
391 displacement is an unsigned 12-bit immediate constant. */
393 struct s390_address
395 rtx base;
396 rtx indx;
397 rtx disp;
398 bool pointer;
399 bool literal_pool;
402 /* Few accessor macros for struct cfun->machine->s390_frame_layout. */
404 #define cfun_frame_layout (cfun->machine->frame_layout)
405 #define cfun_save_high_fprs_p (!!cfun_frame_layout.high_fprs)
406 #define cfun_save_arg_fprs_p (!!(TARGET_64BIT \
407 ? cfun_frame_layout.fpr_bitmap & 0x0f \
408 : cfun_frame_layout.fpr_bitmap & 0x03))
409 #define cfun_gprs_save_area_size ((cfun_frame_layout.last_save_gpr_slot - \
410 cfun_frame_layout.first_save_gpr_slot + 1) * UNITS_PER_LONG)
411 #define cfun_set_fpr_save(REGNO) (cfun->machine->frame_layout.fpr_bitmap |= \
412 (1 << (REGNO - FPR0_REGNUM)))
413 #define cfun_fpr_save_p(REGNO) (!!(cfun->machine->frame_layout.fpr_bitmap & \
414 (1 << (REGNO - FPR0_REGNUM))))
415 #define cfun_gpr_save_slot(REGNO) \
416 cfun->machine->frame_layout.gpr_save_slots[REGNO]
418 /* Number of GPRs and FPRs used for argument passing. */
419 #define GP_ARG_NUM_REG 5
420 #define FP_ARG_NUM_REG (TARGET_64BIT? 4 : 2)
421 #define VEC_ARG_NUM_REG 8
423 /* A couple of shortcuts. */
424 #define CONST_OK_FOR_J(x) \
425 CONST_OK_FOR_CONSTRAINT_P((x), 'J', "J")
426 #define CONST_OK_FOR_K(x) \
427 CONST_OK_FOR_CONSTRAINT_P((x), 'K', "K")
428 #define CONST_OK_FOR_Os(x) \
429 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Os")
430 #define CONST_OK_FOR_Op(x) \
431 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Op")
432 #define CONST_OK_FOR_On(x) \
433 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "On")
435 #define REGNO_PAIR_OK(REGNO, MODE) \
436 (s390_hard_regno_nregs ((REGNO), (MODE)) == 1 || !((REGNO) & 1))
438 /* That's the read ahead of the dynamic branch prediction unit in
439 bytes on a z10 (or higher) CPU. */
440 #define PREDICT_DISTANCE (TARGET_Z10 ? 384 : 2048)
442 /* Masks per jump target register indicating which thunk need to be
443 generated. */
444 static GTY(()) int indirect_branch_prez10thunk_mask = 0;
445 static GTY(()) int indirect_branch_z10thunk_mask = 0;
447 #define INDIRECT_BRANCH_NUM_OPTIONS 4
449 enum s390_indirect_branch_option
451 s390_opt_indirect_branch_jump = 0,
452 s390_opt_indirect_branch_call,
453 s390_opt_function_return_reg,
454 s390_opt_function_return_mem
457 static GTY(()) int indirect_branch_table_label_no[INDIRECT_BRANCH_NUM_OPTIONS] = { 0 };
458 const char *indirect_branch_table_label[INDIRECT_BRANCH_NUM_OPTIONS] = \
459 { "LJUMP", "LCALL", "LRETREG", "LRETMEM" };
460 const char *indirect_branch_table_name[INDIRECT_BRANCH_NUM_OPTIONS] = \
461 { ".s390_indirect_jump", ".s390_indirect_call",
462 ".s390_return_reg", ".s390_return_mem" };
464 bool
465 s390_return_addr_from_memory ()
467 return cfun_gpr_save_slot(RETURN_REGNUM) == SAVE_SLOT_STACK;
470 /* Indicate which ABI has been used for passing vector args.
471 0 - no vector type arguments have been passed where the ABI is relevant
472 1 - the old ABI has been used
473 2 - a vector type argument has been passed either in a vector register
474 or on the stack by value */
475 static int s390_vector_abi = 0;
477 /* Set the vector ABI marker if TYPE is subject to the vector ABI
478 switch. The vector ABI affects only vector data types. There are
479 two aspects of the vector ABI relevant here:
481 1. vectors >= 16 bytes have an alignment of 8 bytes with the new
482 ABI and natural alignment with the old.
484 2. vector <= 16 bytes are passed in VRs or by value on the stack
485 with the new ABI but by reference on the stack with the old.
487 If ARG_P is true TYPE is used for a function argument or return
488 value. The ABI marker then is set for all vector data types. If
489 ARG_P is false only type 1 vectors are being checked. */
491 static void
492 s390_check_type_for_vector_abi (const_tree type, bool arg_p, bool in_struct_p)
494 static hash_set<const_tree> visited_types_hash;
496 if (s390_vector_abi)
497 return;
499 if (type == NULL_TREE || TREE_CODE (type) == ERROR_MARK)
500 return;
502 if (visited_types_hash.contains (type))
503 return;
505 visited_types_hash.add (type);
507 if (VECTOR_TYPE_P (type))
509 int type_size = int_size_in_bytes (type);
511 /* Outside arguments only the alignment is changing and this
512 only happens for vector types >= 16 bytes. */
513 if (!arg_p && type_size < 16)
514 return;
516 /* In arguments vector types > 16 are passed as before (GCC
517 never enforced the bigger alignment for arguments which was
518 required by the old vector ABI). However, it might still be
519 ABI relevant due to the changed alignment if it is a struct
520 member. */
521 if (arg_p && type_size > 16 && !in_struct_p)
522 return;
524 s390_vector_abi = TARGET_VX_ABI ? 2 : 1;
526 else if (POINTER_TYPE_P (type) || TREE_CODE (type) == ARRAY_TYPE)
528 /* ARRAY_TYPE: Since with neither of the ABIs we have more than
529 natural alignment there will never be ABI dependent padding
530 in an array type. That's why we do not set in_struct_p to
531 true here. */
532 s390_check_type_for_vector_abi (TREE_TYPE (type), arg_p, in_struct_p);
534 else if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
536 tree arg_chain;
538 /* Check the return type. */
539 s390_check_type_for_vector_abi (TREE_TYPE (type), true, false);
541 for (arg_chain = TYPE_ARG_TYPES (type);
542 arg_chain;
543 arg_chain = TREE_CHAIN (arg_chain))
544 s390_check_type_for_vector_abi (TREE_VALUE (arg_chain), true, false);
546 else if (RECORD_OR_UNION_TYPE_P (type))
548 tree field;
550 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
552 if (TREE_CODE (field) != FIELD_DECL)
553 continue;
555 s390_check_type_for_vector_abi (TREE_TYPE (field), arg_p, true);
561 /* System z builtins. */
563 #include "s390-builtins.h"
565 const unsigned int bflags_builtin[S390_BUILTIN_MAX + 1] =
567 #undef B_DEF
568 #undef OB_DEF
569 #undef OB_DEF_VAR
570 #define B_DEF(NAME, PATTERN, ATTRS, BFLAGS, ...) BFLAGS,
571 #define OB_DEF(...)
572 #define OB_DEF_VAR(...)
573 #include "s390-builtins.def"
577 const unsigned int opflags_builtin[S390_BUILTIN_MAX + 1] =
579 #undef B_DEF
580 #undef OB_DEF
581 #undef OB_DEF_VAR
582 #define B_DEF(NAME, PATTERN, ATTRS, BFLAGS, OPFLAGS, ...) OPFLAGS,
583 #define OB_DEF(...)
584 #define OB_DEF_VAR(...)
585 #include "s390-builtins.def"
589 const unsigned int bflags_overloaded_builtin[S390_OVERLOADED_BUILTIN_MAX + 1] =
591 #undef B_DEF
592 #undef OB_DEF
593 #undef OB_DEF_VAR
594 #define B_DEF(...)
595 #define OB_DEF(NAME, FIRST_VAR_NAME, LAST_VAR_NAME, BFLAGS, ...) BFLAGS,
596 #define OB_DEF_VAR(...)
597 #include "s390-builtins.def"
601 const unsigned int
602 bflags_overloaded_builtin_var[S390_OVERLOADED_BUILTIN_VAR_MAX + 1] =
604 #undef B_DEF
605 #undef OB_DEF
606 #undef OB_DEF_VAR
607 #define B_DEF(...)
608 #define OB_DEF(...)
609 #define OB_DEF_VAR(NAME, PATTERN, FLAGS, OPFLAGS, FNTYPE) FLAGS,
610 #include "s390-builtins.def"
614 const unsigned int
615 opflags_overloaded_builtin_var[S390_OVERLOADED_BUILTIN_VAR_MAX + 1] =
617 #undef B_DEF
618 #undef OB_DEF
619 #undef OB_DEF_VAR
620 #define B_DEF(...)
621 #define OB_DEF(...)
622 #define OB_DEF_VAR(NAME, PATTERN, FLAGS, OPFLAGS, FNTYPE) OPFLAGS,
623 #include "s390-builtins.def"
627 tree s390_builtin_types[BT_MAX];
628 tree s390_builtin_fn_types[BT_FN_MAX];
629 tree s390_builtin_decls[S390_BUILTIN_MAX +
630 S390_OVERLOADED_BUILTIN_MAX +
631 S390_OVERLOADED_BUILTIN_VAR_MAX];
633 static enum insn_code const code_for_builtin[S390_BUILTIN_MAX + 1] = {
634 #undef B_DEF
635 #undef OB_DEF
636 #undef OB_DEF_VAR
637 #define B_DEF(NAME, PATTERN, ...) CODE_FOR_##PATTERN,
638 #define OB_DEF(...)
639 #define OB_DEF_VAR(...)
641 #include "s390-builtins.def"
642 CODE_FOR_nothing
645 static void
646 s390_init_builtins (void)
648 /* These definitions are being used in s390-builtins.def. */
649 tree returns_twice_attr = tree_cons (get_identifier ("returns_twice"),
650 NULL, NULL);
651 tree noreturn_attr = tree_cons (get_identifier ("noreturn"), NULL, NULL);
652 tree c_uint64_type_node;
654 /* The uint64_type_node from tree.c is not compatible to the C99
655 uint64_t data type. What we want is c_uint64_type_node from
656 c-common.c. But since backend code is not supposed to interface
657 with the frontend we recreate it here. */
658 if (TARGET_64BIT)
659 c_uint64_type_node = long_unsigned_type_node;
660 else
661 c_uint64_type_node = long_long_unsigned_type_node;
663 #undef DEF_TYPE
664 #define DEF_TYPE(INDEX, NODE, CONST_P) \
665 if (s390_builtin_types[INDEX] == NULL) \
666 s390_builtin_types[INDEX] = (!CONST_P) ? \
667 (NODE) : build_type_variant ((NODE), 1, 0);
669 #undef DEF_POINTER_TYPE
670 #define DEF_POINTER_TYPE(INDEX, INDEX_BASE) \
671 if (s390_builtin_types[INDEX] == NULL) \
672 s390_builtin_types[INDEX] = \
673 build_pointer_type (s390_builtin_types[INDEX_BASE]);
675 #undef DEF_DISTINCT_TYPE
676 #define DEF_DISTINCT_TYPE(INDEX, INDEX_BASE) \
677 if (s390_builtin_types[INDEX] == NULL) \
678 s390_builtin_types[INDEX] = \
679 build_distinct_type_copy (s390_builtin_types[INDEX_BASE]);
681 #undef DEF_VECTOR_TYPE
682 #define DEF_VECTOR_TYPE(INDEX, INDEX_BASE, ELEMENTS) \
683 if (s390_builtin_types[INDEX] == NULL) \
684 s390_builtin_types[INDEX] = \
685 build_vector_type (s390_builtin_types[INDEX_BASE], ELEMENTS);
687 #undef DEF_OPAQUE_VECTOR_TYPE
688 #define DEF_OPAQUE_VECTOR_TYPE(INDEX, INDEX_BASE, ELEMENTS) \
689 if (s390_builtin_types[INDEX] == NULL) \
690 s390_builtin_types[INDEX] = \
691 build_opaque_vector_type (s390_builtin_types[INDEX_BASE], ELEMENTS);
693 #undef DEF_FN_TYPE
694 #define DEF_FN_TYPE(INDEX, args...) \
695 if (s390_builtin_fn_types[INDEX] == NULL) \
696 s390_builtin_fn_types[INDEX] = \
697 build_function_type_list (args, NULL_TREE);
698 #undef DEF_OV_TYPE
699 #define DEF_OV_TYPE(...)
700 #include "s390-builtin-types.def"
702 #undef B_DEF
703 #define B_DEF(NAME, PATTERN, ATTRS, BFLAGS, OPFLAGS, FNTYPE) \
704 if (s390_builtin_decls[S390_BUILTIN_##NAME] == NULL) \
705 s390_builtin_decls[S390_BUILTIN_##NAME] = \
706 add_builtin_function ("__builtin_" #NAME, \
707 s390_builtin_fn_types[FNTYPE], \
708 S390_BUILTIN_##NAME, \
709 BUILT_IN_MD, \
710 NULL, \
711 ATTRS);
712 #undef OB_DEF
713 #define OB_DEF(NAME, FIRST_VAR_NAME, LAST_VAR_NAME, BFLAGS, FNTYPE) \
714 if (s390_builtin_decls[S390_OVERLOADED_BUILTIN_##NAME + S390_BUILTIN_MAX] \
715 == NULL) \
716 s390_builtin_decls[S390_OVERLOADED_BUILTIN_##NAME + S390_BUILTIN_MAX] = \
717 add_builtin_function ("__builtin_" #NAME, \
718 s390_builtin_fn_types[FNTYPE], \
719 S390_OVERLOADED_BUILTIN_##NAME + S390_BUILTIN_MAX, \
720 BUILT_IN_MD, \
721 NULL, \
723 #undef OB_DEF_VAR
724 #define OB_DEF_VAR(...)
725 #include "s390-builtins.def"
729 /* Return true if ARG is appropriate as argument number ARGNUM of
730 builtin DECL. The operand flags from s390-builtins.def have to
731 passed as OP_FLAGS. */
732 bool
733 s390_const_operand_ok (tree arg, int argnum, int op_flags, tree decl)
735 if (O_UIMM_P (op_flags))
737 int bitwidths[] = { 1, 2, 3, 4, 5, 8, 12, 16, 32 };
738 int bitwidth = bitwidths[op_flags - O_U1];
740 if (!tree_fits_uhwi_p (arg)
741 || tree_to_uhwi (arg) > (HOST_WIDE_INT_1U << bitwidth) - 1)
743 error("constant argument %d for builtin %qF is out of range (0.."
744 HOST_WIDE_INT_PRINT_UNSIGNED ")",
745 argnum, decl,
746 (HOST_WIDE_INT_1U << bitwidth) - 1);
747 return false;
751 if (O_SIMM_P (op_flags))
753 int bitwidths[] = { 2, 3, 4, 5, 8, 12, 16, 32 };
754 int bitwidth = bitwidths[op_flags - O_S2];
756 if (!tree_fits_shwi_p (arg)
757 || tree_to_shwi (arg) < -(HOST_WIDE_INT_1 << (bitwidth - 1))
758 || tree_to_shwi (arg) > ((HOST_WIDE_INT_1 << (bitwidth - 1)) - 1))
760 error("constant argument %d for builtin %qF is out of range ("
761 HOST_WIDE_INT_PRINT_DEC ".."
762 HOST_WIDE_INT_PRINT_DEC ")",
763 argnum, decl,
764 -(HOST_WIDE_INT_1 << (bitwidth - 1)),
765 (HOST_WIDE_INT_1 << (bitwidth - 1)) - 1);
766 return false;
769 return true;
772 /* Expand an expression EXP that calls a built-in function,
773 with result going to TARGET if that's convenient
774 (and in mode MODE if that's convenient).
775 SUBTARGET may be used as the target for computing one of EXP's operands.
776 IGNORE is nonzero if the value is to be ignored. */
778 static rtx
779 s390_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
780 machine_mode mode ATTRIBUTE_UNUSED,
781 int ignore ATTRIBUTE_UNUSED)
783 #define MAX_ARGS 6
785 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
786 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
787 enum insn_code icode;
788 rtx op[MAX_ARGS], pat;
789 int arity;
790 bool nonvoid;
791 tree arg;
792 call_expr_arg_iterator iter;
793 unsigned int all_op_flags = opflags_for_builtin (fcode);
794 machine_mode last_vec_mode = VOIDmode;
796 if (TARGET_DEBUG_ARG)
798 fprintf (stderr,
799 "s390_expand_builtin, code = %4d, %s, bflags = 0x%x\n",
800 (int)fcode, IDENTIFIER_POINTER (DECL_NAME (fndecl)),
801 bflags_for_builtin (fcode));
804 if (S390_USE_TARGET_ATTRIBUTE)
806 unsigned int bflags;
808 bflags = bflags_for_builtin (fcode);
809 if ((bflags & B_HTM) && !TARGET_HTM)
811 error ("builtin %qF is not supported without -mhtm "
812 "(default with -march=zEC12 and higher).", fndecl);
813 return const0_rtx;
815 if (((bflags & B_VX) || (bflags & B_VXE)) && !TARGET_VX)
817 error ("builtin %qF requires -mvx "
818 "(default with -march=z13 and higher).", fndecl);
819 return const0_rtx;
822 if ((bflags & B_VXE) && !TARGET_VXE)
824 error ("Builtin %qF requires z14 or higher.", fndecl);
825 return const0_rtx;
828 if (fcode >= S390_OVERLOADED_BUILTIN_VAR_OFFSET
829 && fcode < S390_ALL_BUILTIN_MAX)
831 gcc_unreachable ();
833 else if (fcode < S390_OVERLOADED_BUILTIN_OFFSET)
835 icode = code_for_builtin[fcode];
836 /* Set a flag in the machine specific cfun part in order to support
837 saving/restoring of FPRs. */
838 if (fcode == S390_BUILTIN_tbegin || fcode == S390_BUILTIN_tbegin_retry)
839 cfun->machine->tbegin_p = true;
841 else if (fcode < S390_OVERLOADED_BUILTIN_VAR_OFFSET)
843 error ("unresolved overloaded builtin");
844 return const0_rtx;
846 else
847 internal_error ("bad builtin fcode");
849 if (icode == 0)
850 internal_error ("bad builtin icode");
852 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
854 if (nonvoid)
856 machine_mode tmode = insn_data[icode].operand[0].mode;
857 if (!target
858 || GET_MODE (target) != tmode
859 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
860 target = gen_reg_rtx (tmode);
862 /* There are builtins (e.g. vec_promote) with no vector
863 arguments but an element selector. So we have to also look
864 at the vector return type when emitting the modulo
865 operation. */
866 if (VECTOR_MODE_P (insn_data[icode].operand[0].mode))
867 last_vec_mode = insn_data[icode].operand[0].mode;
870 arity = 0;
871 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
873 rtx tmp_rtx;
874 const struct insn_operand_data *insn_op;
875 unsigned int op_flags = all_op_flags & ((1 << O_SHIFT) - 1);
877 all_op_flags = all_op_flags >> O_SHIFT;
879 if (arg == error_mark_node)
880 return NULL_RTX;
881 if (arity >= MAX_ARGS)
882 return NULL_RTX;
884 if (O_IMM_P (op_flags)
885 && TREE_CODE (arg) != INTEGER_CST)
887 error ("constant value required for builtin %qF argument %d",
888 fndecl, arity + 1);
889 return const0_rtx;
892 if (!s390_const_operand_ok (arg, arity + 1, op_flags, fndecl))
893 return const0_rtx;
895 insn_op = &insn_data[icode].operand[arity + nonvoid];
896 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
898 /* expand_expr truncates constants to the target mode only if it
899 is "convenient". However, our checks below rely on this
900 being done. */
901 if (CONST_INT_P (op[arity])
902 && SCALAR_INT_MODE_P (insn_op->mode)
903 && GET_MODE (op[arity]) != insn_op->mode)
904 op[arity] = GEN_INT (trunc_int_for_mode (INTVAL (op[arity]),
905 insn_op->mode));
907 /* Wrap the expanded RTX for pointer types into a MEM expr with
908 the proper mode. This allows us to use e.g. (match_operand
909 "memory_operand"..) in the insn patterns instead of (mem
910 (match_operand "address_operand)). This is helpful for
911 patterns not just accepting MEMs. */
912 if (POINTER_TYPE_P (TREE_TYPE (arg))
913 && insn_op->predicate != address_operand)
914 op[arity] = gen_rtx_MEM (insn_op->mode, op[arity]);
916 /* Expand the module operation required on element selectors. */
917 if (op_flags == O_ELEM)
919 gcc_assert (last_vec_mode != VOIDmode);
920 op[arity] = simplify_expand_binop (SImode, code_to_optab (AND),
921 op[arity],
922 GEN_INT (GET_MODE_NUNITS (last_vec_mode) - 1),
923 NULL_RTX, 1, OPTAB_DIRECT);
926 /* Record the vector mode used for an element selector. This assumes:
927 1. There is no builtin with two different vector modes and an element selector
928 2. The element selector comes after the vector type it is referring to.
929 This currently the true for all the builtins but FIXME we
930 should better check for that. */
931 if (VECTOR_MODE_P (insn_op->mode))
932 last_vec_mode = insn_op->mode;
934 if (insn_op->predicate (op[arity], insn_op->mode))
936 arity++;
937 continue;
940 if (MEM_P (op[arity])
941 && insn_op->predicate == memory_operand
942 && (GET_MODE (XEXP (op[arity], 0)) == Pmode
943 || GET_MODE (XEXP (op[arity], 0)) == VOIDmode))
945 op[arity] = replace_equiv_address (op[arity],
946 copy_to_mode_reg (Pmode,
947 XEXP (op[arity], 0)));
949 /* Some of the builtins require different modes/types than the
950 pattern in order to implement a specific API. Instead of
951 adding many expanders which do the mode change we do it here.
952 E.g. s390_vec_add_u128 required to have vector unsigned char
953 arguments is mapped to addti3. */
954 else if (insn_op->mode != VOIDmode
955 && GET_MODE (op[arity]) != VOIDmode
956 && GET_MODE (op[arity]) != insn_op->mode
957 && ((tmp_rtx = simplify_gen_subreg (insn_op->mode, op[arity],
958 GET_MODE (op[arity]), 0))
959 != NULL_RTX))
961 op[arity] = tmp_rtx;
963 else if (GET_MODE (op[arity]) == insn_op->mode
964 || GET_MODE (op[arity]) == VOIDmode
965 || (insn_op->predicate == address_operand
966 && GET_MODE (op[arity]) == Pmode))
968 /* An address_operand usually has VOIDmode in the expander
969 so we cannot use this. */
970 machine_mode target_mode =
971 (insn_op->predicate == address_operand
972 ? (machine_mode) Pmode : insn_op->mode);
973 op[arity] = copy_to_mode_reg (target_mode, op[arity]);
976 if (!insn_op->predicate (op[arity], insn_op->mode))
978 error ("invalid argument %d for builtin %qF", arity + 1, fndecl);
979 return const0_rtx;
981 arity++;
984 switch (arity)
986 case 0:
987 pat = GEN_FCN (icode) (target);
988 break;
989 case 1:
990 if (nonvoid)
991 pat = GEN_FCN (icode) (target, op[0]);
992 else
993 pat = GEN_FCN (icode) (op[0]);
994 break;
995 case 2:
996 if (nonvoid)
997 pat = GEN_FCN (icode) (target, op[0], op[1]);
998 else
999 pat = GEN_FCN (icode) (op[0], op[1]);
1000 break;
1001 case 3:
1002 if (nonvoid)
1003 pat = GEN_FCN (icode) (target, op[0], op[1], op[2]);
1004 else
1005 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
1006 break;
1007 case 4:
1008 if (nonvoid)
1009 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3]);
1010 else
1011 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
1012 break;
1013 case 5:
1014 if (nonvoid)
1015 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3], op[4]);
1016 else
1017 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4]);
1018 break;
1019 case 6:
1020 if (nonvoid)
1021 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3], op[4], op[5]);
1022 else
1023 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4], op[5]);
1024 break;
1025 default:
1026 gcc_unreachable ();
1028 if (!pat)
1029 return NULL_RTX;
1030 emit_insn (pat);
1032 if (nonvoid)
1033 return target;
1034 else
1035 return const0_rtx;
1039 static const int s390_hotpatch_hw_max = 1000000;
1040 static int s390_hotpatch_hw_before_label = 0;
1041 static int s390_hotpatch_hw_after_label = 0;
1043 /* Check whether the hotpatch attribute is applied to a function and, if it has
1044 an argument, the argument is valid. */
1046 static tree
1047 s390_handle_hotpatch_attribute (tree *node, tree name, tree args,
1048 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
1050 tree expr;
1051 tree expr2;
1052 int err;
1054 if (TREE_CODE (*node) != FUNCTION_DECL)
1056 warning (OPT_Wattributes, "%qE attribute only applies to functions",
1057 name);
1058 *no_add_attrs = true;
1060 if (args != NULL && TREE_CHAIN (args) != NULL)
1062 expr = TREE_VALUE (args);
1063 expr2 = TREE_VALUE (TREE_CHAIN (args));
1065 if (args == NULL || TREE_CHAIN (args) == NULL)
1066 err = 1;
1067 else if (TREE_CODE (expr) != INTEGER_CST
1068 || !INTEGRAL_TYPE_P (TREE_TYPE (expr))
1069 || wi::gtu_p (wi::to_wide (expr), s390_hotpatch_hw_max))
1070 err = 1;
1071 else if (TREE_CODE (expr2) != INTEGER_CST
1072 || !INTEGRAL_TYPE_P (TREE_TYPE (expr2))
1073 || wi::gtu_p (wi::to_wide (expr2), s390_hotpatch_hw_max))
1074 err = 1;
1075 else
1076 err = 0;
1077 if (err)
1079 error ("requested %qE attribute is not a comma separated pair of"
1080 " non-negative integer constants or too large (max. %d)", name,
1081 s390_hotpatch_hw_max);
1082 *no_add_attrs = true;
1085 return NULL_TREE;
1088 /* Expand the s390_vector_bool type attribute. */
1090 static tree
1091 s390_handle_vectorbool_attribute (tree *node, tree name ATTRIBUTE_UNUSED,
1092 tree args ATTRIBUTE_UNUSED,
1093 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
1095 tree type = *node, result = NULL_TREE;
1096 machine_mode mode;
1098 while (POINTER_TYPE_P (type)
1099 || TREE_CODE (type) == FUNCTION_TYPE
1100 || TREE_CODE (type) == METHOD_TYPE
1101 || TREE_CODE (type) == ARRAY_TYPE)
1102 type = TREE_TYPE (type);
1104 mode = TYPE_MODE (type);
1105 switch (mode)
1107 case E_DImode: case E_V2DImode:
1108 result = s390_builtin_types[BT_BV2DI];
1109 break;
1110 case E_SImode: case E_V4SImode:
1111 result = s390_builtin_types[BT_BV4SI];
1112 break;
1113 case E_HImode: case E_V8HImode:
1114 result = s390_builtin_types[BT_BV8HI];
1115 break;
1116 case E_QImode: case E_V16QImode:
1117 result = s390_builtin_types[BT_BV16QI];
1118 break;
1119 default:
1120 break;
1123 *no_add_attrs = true; /* No need to hang on to the attribute. */
1125 if (result)
1126 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
1128 return NULL_TREE;
1131 /* Check syntax of function decl attributes having a string type value. */
1133 static tree
1134 s390_handle_string_attribute (tree *node, tree name ATTRIBUTE_UNUSED,
1135 tree args ATTRIBUTE_UNUSED,
1136 int flags ATTRIBUTE_UNUSED,
1137 bool *no_add_attrs)
1139 tree cst;
1141 if (TREE_CODE (*node) != FUNCTION_DECL)
1143 warning (OPT_Wattributes, "%qE attribute only applies to functions",
1144 name);
1145 *no_add_attrs = true;
1148 cst = TREE_VALUE (args);
1150 if (TREE_CODE (cst) != STRING_CST)
1152 warning (OPT_Wattributes,
1153 "%qE attribute requires a string constant argument",
1154 name);
1155 *no_add_attrs = true;
1158 if (is_attribute_p ("indirect_branch", name)
1159 || is_attribute_p ("indirect_branch_call", name)
1160 || is_attribute_p ("function_return", name)
1161 || is_attribute_p ("function_return_reg", name)
1162 || is_attribute_p ("function_return_mem", name))
1164 if (strcmp (TREE_STRING_POINTER (cst), "keep") != 0
1165 && strcmp (TREE_STRING_POINTER (cst), "thunk") != 0
1166 && strcmp (TREE_STRING_POINTER (cst), "thunk-extern") != 0)
1168 warning (OPT_Wattributes,
1169 "argument to %qE attribute is not "
1170 "(keep|thunk|thunk-extern)", name);
1171 *no_add_attrs = true;
1175 if (is_attribute_p ("indirect_branch_jump", name)
1176 && strcmp (TREE_STRING_POINTER (cst), "keep") != 0
1177 && strcmp (TREE_STRING_POINTER (cst), "thunk") != 0
1178 && strcmp (TREE_STRING_POINTER (cst), "thunk-inline") != 0
1179 && strcmp (TREE_STRING_POINTER (cst), "thunk-extern") != 0)
1181 warning (OPT_Wattributes,
1182 "argument to %qE attribute is not "
1183 "(keep|thunk|thunk-inline|thunk-extern)", name);
1184 *no_add_attrs = true;
1187 return NULL_TREE;
1190 static const struct attribute_spec s390_attribute_table[] = {
1191 { "hotpatch", 2, 2, true, false, false, false,
1192 s390_handle_hotpatch_attribute, NULL },
1193 { "s390_vector_bool", 0, 0, false, true, false, true,
1194 s390_handle_vectorbool_attribute, NULL },
1195 { "indirect_branch", 1, 1, true, false, false, false,
1196 s390_handle_string_attribute, NULL },
1197 { "indirect_branch_jump", 1, 1, true, false, false, false,
1198 s390_handle_string_attribute, NULL },
1199 { "indirect_branch_call", 1, 1, true, false, false, false,
1200 s390_handle_string_attribute, NULL },
1201 { "function_return", 1, 1, true, false, false, false,
1202 s390_handle_string_attribute, NULL },
1203 { "function_return_reg", 1, 1, true, false, false, false,
1204 s390_handle_string_attribute, NULL },
1205 { "function_return_mem", 1, 1, true, false, false, false,
1206 s390_handle_string_attribute, NULL },
1208 /* End element. */
1209 { NULL, 0, 0, false, false, false, false, NULL, NULL }
1212 /* Return the alignment for LABEL. We default to the -falign-labels
1213 value except for the literal pool base label. */
1215 s390_label_align (rtx_insn *label)
1217 rtx_insn *prev_insn = prev_active_insn (label);
1218 rtx set, src;
1220 if (prev_insn == NULL_RTX)
1221 goto old;
1223 set = single_set (prev_insn);
1225 if (set == NULL_RTX)
1226 goto old;
1228 src = SET_SRC (set);
1230 /* Don't align literal pool base labels. */
1231 if (GET_CODE (src) == UNSPEC
1232 && XINT (src, 1) == UNSPEC_MAIN_BASE)
1233 return 0;
1235 old:
1236 return align_labels_log;
1239 static GTY(()) rtx got_symbol;
1241 /* Return the GOT table symbol. The symbol will be created when the
1242 function is invoked for the first time. */
1244 static rtx
1245 s390_got_symbol (void)
1247 if (!got_symbol)
1249 got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
1250 SYMBOL_REF_FLAGS (got_symbol) = SYMBOL_FLAG_LOCAL;
1253 return got_symbol;
1256 static scalar_int_mode
1257 s390_libgcc_cmp_return_mode (void)
1259 return TARGET_64BIT ? DImode : SImode;
1262 static scalar_int_mode
1263 s390_libgcc_shift_count_mode (void)
1265 return TARGET_64BIT ? DImode : SImode;
1268 static scalar_int_mode
1269 s390_unwind_word_mode (void)
1271 return TARGET_64BIT ? DImode : SImode;
1274 /* Return true if the back end supports mode MODE. */
1275 static bool
1276 s390_scalar_mode_supported_p (scalar_mode mode)
1278 /* In contrast to the default implementation reject TImode constants on 31bit
1279 TARGET_ZARCH for ABI compliance. */
1280 if (!TARGET_64BIT && TARGET_ZARCH && mode == TImode)
1281 return false;
1283 if (DECIMAL_FLOAT_MODE_P (mode))
1284 return default_decimal_float_supported_p ();
1286 return default_scalar_mode_supported_p (mode);
1289 /* Return true if the back end supports vector mode MODE. */
1290 static bool
1291 s390_vector_mode_supported_p (machine_mode mode)
1293 machine_mode inner;
1295 if (!VECTOR_MODE_P (mode)
1296 || !TARGET_VX
1297 || GET_MODE_SIZE (mode) > 16)
1298 return false;
1300 inner = GET_MODE_INNER (mode);
1302 switch (inner)
1304 case E_QImode:
1305 case E_HImode:
1306 case E_SImode:
1307 case E_DImode:
1308 case E_TImode:
1309 case E_SFmode:
1310 case E_DFmode:
1311 case E_TFmode:
1312 return true;
1313 default:
1314 return false;
1318 /* Set the has_landing_pad_p flag in struct machine_function to VALUE. */
1320 void
1321 s390_set_has_landing_pad_p (bool value)
1323 cfun->machine->has_landing_pad_p = value;
1326 /* If two condition code modes are compatible, return a condition code
1327 mode which is compatible with both. Otherwise, return
1328 VOIDmode. */
1330 static machine_mode
1331 s390_cc_modes_compatible (machine_mode m1, machine_mode m2)
1333 if (m1 == m2)
1334 return m1;
1336 switch (m1)
1338 case E_CCZmode:
1339 if (m2 == CCUmode || m2 == CCTmode || m2 == CCZ1mode
1340 || m2 == CCSmode || m2 == CCSRmode || m2 == CCURmode)
1341 return m2;
1342 return VOIDmode;
1344 case E_CCSmode:
1345 case E_CCUmode:
1346 case E_CCTmode:
1347 case E_CCSRmode:
1348 case E_CCURmode:
1349 case E_CCZ1mode:
1350 if (m2 == CCZmode)
1351 return m1;
1353 return VOIDmode;
1355 default:
1356 return VOIDmode;
1358 return VOIDmode;
1361 /* Return true if SET either doesn't set the CC register, or else
1362 the source and destination have matching CC modes and that
1363 CC mode is at least as constrained as REQ_MODE. */
1365 static bool
1366 s390_match_ccmode_set (rtx set, machine_mode req_mode)
1368 machine_mode set_mode;
1370 gcc_assert (GET_CODE (set) == SET);
1372 /* These modes are supposed to be used only in CC consumer
1373 patterns. */
1374 gcc_assert (req_mode != CCVIALLmode && req_mode != CCVIANYmode
1375 && req_mode != CCVFALLmode && req_mode != CCVFANYmode);
1377 if (GET_CODE (SET_DEST (set)) != REG || !CC_REGNO_P (REGNO (SET_DEST (set))))
1378 return 1;
1380 set_mode = GET_MODE (SET_DEST (set));
1381 switch (set_mode)
1383 case E_CCZ1mode:
1384 case E_CCSmode:
1385 case E_CCSRmode:
1386 case E_CCUmode:
1387 case E_CCURmode:
1388 case E_CCLmode:
1389 case E_CCL1mode:
1390 case E_CCL2mode:
1391 case E_CCL3mode:
1392 case E_CCT1mode:
1393 case E_CCT2mode:
1394 case E_CCT3mode:
1395 case E_CCVEQmode:
1396 case E_CCVIHmode:
1397 case E_CCVIHUmode:
1398 case E_CCVFHmode:
1399 case E_CCVFHEmode:
1400 if (req_mode != set_mode)
1401 return 0;
1402 break;
1404 case E_CCZmode:
1405 if (req_mode != CCSmode && req_mode != CCUmode && req_mode != CCTmode
1406 && req_mode != CCSRmode && req_mode != CCURmode
1407 && req_mode != CCZ1mode)
1408 return 0;
1409 break;
1411 case E_CCAPmode:
1412 case E_CCANmode:
1413 if (req_mode != CCAmode)
1414 return 0;
1415 break;
1417 default:
1418 gcc_unreachable ();
1421 return (GET_MODE (SET_SRC (set)) == set_mode);
1424 /* Return true if every SET in INSN that sets the CC register
1425 has source and destination with matching CC modes and that
1426 CC mode is at least as constrained as REQ_MODE.
1427 If REQ_MODE is VOIDmode, always return false. */
1429 bool
1430 s390_match_ccmode (rtx_insn *insn, machine_mode req_mode)
1432 int i;
1434 /* s390_tm_ccmode returns VOIDmode to indicate failure. */
1435 if (req_mode == VOIDmode)
1436 return false;
1438 if (GET_CODE (PATTERN (insn)) == SET)
1439 return s390_match_ccmode_set (PATTERN (insn), req_mode);
1441 if (GET_CODE (PATTERN (insn)) == PARALLEL)
1442 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1444 rtx set = XVECEXP (PATTERN (insn), 0, i);
1445 if (GET_CODE (set) == SET)
1446 if (!s390_match_ccmode_set (set, req_mode))
1447 return false;
1450 return true;
1453 /* If a test-under-mask instruction can be used to implement
1454 (compare (and ... OP1) OP2), return the CC mode required
1455 to do that. Otherwise, return VOIDmode.
1456 MIXED is true if the instruction can distinguish between
1457 CC1 and CC2 for mixed selected bits (TMxx), it is false
1458 if the instruction cannot (TM). */
1460 machine_mode
1461 s390_tm_ccmode (rtx op1, rtx op2, bool mixed)
1463 int bit0, bit1;
1465 /* ??? Fixme: should work on CONST_WIDE_INT as well. */
1466 if (GET_CODE (op1) != CONST_INT || GET_CODE (op2) != CONST_INT)
1467 return VOIDmode;
1469 /* Selected bits all zero: CC0.
1470 e.g.: int a; if ((a & (16 + 128)) == 0) */
1471 if (INTVAL (op2) == 0)
1472 return CCTmode;
1474 /* Selected bits all one: CC3.
1475 e.g.: int a; if ((a & (16 + 128)) == 16 + 128) */
1476 if (INTVAL (op2) == INTVAL (op1))
1477 return CCT3mode;
1479 /* Exactly two bits selected, mixed zeroes and ones: CC1 or CC2. e.g.:
1480 int a;
1481 if ((a & (16 + 128)) == 16) -> CCT1
1482 if ((a & (16 + 128)) == 128) -> CCT2 */
1483 if (mixed)
1485 bit1 = exact_log2 (INTVAL (op2));
1486 bit0 = exact_log2 (INTVAL (op1) ^ INTVAL (op2));
1487 if (bit0 != -1 && bit1 != -1)
1488 return bit0 > bit1 ? CCT1mode : CCT2mode;
1491 return VOIDmode;
1494 /* Given a comparison code OP (EQ, NE, etc.) and the operands
1495 OP0 and OP1 of a COMPARE, return the mode to be used for the
1496 comparison. */
1498 machine_mode
1499 s390_select_ccmode (enum rtx_code code, rtx op0, rtx op1)
1501 switch (code)
1503 case EQ:
1504 case NE:
1505 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
1506 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1507 return CCAPmode;
1508 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
1509 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
1510 return CCAPmode;
1511 if ((GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1512 || GET_CODE (op1) == NEG)
1513 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1514 return CCLmode;
1516 if (GET_CODE (op0) == AND)
1518 /* Check whether we can potentially do it via TM. */
1519 machine_mode ccmode;
1520 ccmode = s390_tm_ccmode (XEXP (op0, 1), op1, 1);
1521 if (ccmode != VOIDmode)
1523 /* Relax CCTmode to CCZmode to allow fall-back to AND
1524 if that turns out to be beneficial. */
1525 return ccmode == CCTmode ? CCZmode : ccmode;
1529 if (register_operand (op0, HImode)
1530 && GET_CODE (op1) == CONST_INT
1531 && (INTVAL (op1) == -1 || INTVAL (op1) == 65535))
1532 return CCT3mode;
1533 if (register_operand (op0, QImode)
1534 && GET_CODE (op1) == CONST_INT
1535 && (INTVAL (op1) == -1 || INTVAL (op1) == 255))
1536 return CCT3mode;
1538 return CCZmode;
1540 case LE:
1541 case LT:
1542 case GE:
1543 case GT:
1544 /* The only overflow condition of NEG and ABS happens when
1545 -INT_MAX is used as parameter, which stays negative. So
1546 we have an overflow from a positive value to a negative.
1547 Using CCAP mode the resulting cc can be used for comparisons. */
1548 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
1549 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1550 return CCAPmode;
1552 /* If constants are involved in an add instruction it is possible to use
1553 the resulting cc for comparisons with zero. Knowing the sign of the
1554 constant the overflow behavior gets predictable. e.g.:
1555 int a, b; if ((b = a + c) > 0)
1556 with c as a constant value: c < 0 -> CCAN and c >= 0 -> CCAP */
1557 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
1558 && (CONST_OK_FOR_K (INTVAL (XEXP (op0, 1)))
1559 || (CONST_OK_FOR_CONSTRAINT_P (INTVAL (XEXP (op0, 1)), 'O', "Os")
1560 /* Avoid INT32_MIN on 32 bit. */
1561 && (!TARGET_ZARCH || INTVAL (XEXP (op0, 1)) != -0x7fffffff - 1))))
1563 if (INTVAL (XEXP((op0), 1)) < 0)
1564 return CCANmode;
1565 else
1566 return CCAPmode;
1568 /* Fall through. */
1569 case UNORDERED:
1570 case ORDERED:
1571 case UNEQ:
1572 case UNLE:
1573 case UNLT:
1574 case UNGE:
1575 case UNGT:
1576 case LTGT:
1577 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1578 && GET_CODE (op1) != CONST_INT)
1579 return CCSRmode;
1580 return CCSmode;
1582 case LTU:
1583 case GEU:
1584 if (GET_CODE (op0) == PLUS
1585 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1586 return CCL1mode;
1588 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1589 && GET_CODE (op1) != CONST_INT)
1590 return CCURmode;
1591 return CCUmode;
1593 case LEU:
1594 case GTU:
1595 if (GET_CODE (op0) == MINUS
1596 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1597 return CCL2mode;
1599 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1600 && GET_CODE (op1) != CONST_INT)
1601 return CCURmode;
1602 return CCUmode;
1604 default:
1605 gcc_unreachable ();
1609 /* Replace the comparison OP0 CODE OP1 by a semantically equivalent one
1610 that we can implement more efficiently. */
1612 static void
1613 s390_canonicalize_comparison (int *code, rtx *op0, rtx *op1,
1614 bool op0_preserve_value)
1616 if (op0_preserve_value)
1617 return;
1619 /* Convert ZERO_EXTRACT back to AND to enable TM patterns. */
1620 if ((*code == EQ || *code == NE)
1621 && *op1 == const0_rtx
1622 && GET_CODE (*op0) == ZERO_EXTRACT
1623 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
1624 && GET_CODE (XEXP (*op0, 2)) == CONST_INT
1625 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
1627 rtx inner = XEXP (*op0, 0);
1628 HOST_WIDE_INT modesize = GET_MODE_BITSIZE (GET_MODE (inner));
1629 HOST_WIDE_INT len = INTVAL (XEXP (*op0, 1));
1630 HOST_WIDE_INT pos = INTVAL (XEXP (*op0, 2));
1632 if (len > 0 && len < modesize
1633 && pos >= 0 && pos + len <= modesize
1634 && modesize <= HOST_BITS_PER_WIDE_INT)
1636 unsigned HOST_WIDE_INT block;
1637 block = (HOST_WIDE_INT_1U << len) - 1;
1638 block <<= modesize - pos - len;
1640 *op0 = gen_rtx_AND (GET_MODE (inner), inner,
1641 gen_int_mode (block, GET_MODE (inner)));
1645 /* Narrow AND of memory against immediate to enable TM. */
1646 if ((*code == EQ || *code == NE)
1647 && *op1 == const0_rtx
1648 && GET_CODE (*op0) == AND
1649 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
1650 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
1652 rtx inner = XEXP (*op0, 0);
1653 rtx mask = XEXP (*op0, 1);
1655 /* Ignore paradoxical SUBREGs if all extra bits are masked out. */
1656 if (GET_CODE (inner) == SUBREG
1657 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (inner)))
1658 && (GET_MODE_SIZE (GET_MODE (inner))
1659 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
1660 && ((INTVAL (mask)
1661 & GET_MODE_MASK (GET_MODE (inner))
1662 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (inner))))
1663 == 0))
1664 inner = SUBREG_REG (inner);
1666 /* Do not change volatile MEMs. */
1667 if (MEM_P (inner) && !MEM_VOLATILE_P (inner))
1669 int part = s390_single_part (XEXP (*op0, 1),
1670 GET_MODE (inner), QImode, 0);
1671 if (part >= 0)
1673 mask = gen_int_mode (s390_extract_part (mask, QImode, 0), QImode);
1674 inner = adjust_address_nv (inner, QImode, part);
1675 *op0 = gen_rtx_AND (QImode, inner, mask);
1680 /* Narrow comparisons against 0xffff to HImode if possible. */
1681 if ((*code == EQ || *code == NE)
1682 && GET_CODE (*op1) == CONST_INT
1683 && INTVAL (*op1) == 0xffff
1684 && SCALAR_INT_MODE_P (GET_MODE (*op0))
1685 && (nonzero_bits (*op0, GET_MODE (*op0))
1686 & ~HOST_WIDE_INT_UC (0xffff)) == 0)
1688 *op0 = gen_lowpart (HImode, *op0);
1689 *op1 = constm1_rtx;
1692 /* Remove redundant UNSPEC_STRCMPCC_TO_INT conversions if possible. */
1693 if (GET_CODE (*op0) == UNSPEC
1694 && XINT (*op0, 1) == UNSPEC_STRCMPCC_TO_INT
1695 && XVECLEN (*op0, 0) == 1
1696 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCUmode
1697 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
1698 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
1699 && *op1 == const0_rtx)
1701 enum rtx_code new_code = UNKNOWN;
1702 switch (*code)
1704 case EQ: new_code = EQ; break;
1705 case NE: new_code = NE; break;
1706 case LT: new_code = GTU; break;
1707 case GT: new_code = LTU; break;
1708 case LE: new_code = GEU; break;
1709 case GE: new_code = LEU; break;
1710 default: break;
1713 if (new_code != UNKNOWN)
1715 *op0 = XVECEXP (*op0, 0, 0);
1716 *code = new_code;
1720 /* Remove redundant UNSPEC_CC_TO_INT conversions if possible. */
1721 if (GET_CODE (*op0) == UNSPEC
1722 && XINT (*op0, 1) == UNSPEC_CC_TO_INT
1723 && XVECLEN (*op0, 0) == 1
1724 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
1725 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
1726 && CONST_INT_P (*op1))
1728 enum rtx_code new_code = UNKNOWN;
1729 switch (GET_MODE (XVECEXP (*op0, 0, 0)))
1731 case E_CCZmode:
1732 case E_CCRAWmode:
1733 switch (*code)
1735 case EQ: new_code = EQ; break;
1736 case NE: new_code = NE; break;
1737 default: break;
1739 break;
1740 default: break;
1743 if (new_code != UNKNOWN)
1745 /* For CCRAWmode put the required cc mask into the second
1746 operand. */
1747 if (GET_MODE (XVECEXP (*op0, 0, 0)) == CCRAWmode
1748 && INTVAL (*op1) >= 0 && INTVAL (*op1) <= 3)
1749 *op1 = gen_rtx_CONST_INT (VOIDmode, 1 << (3 - INTVAL (*op1)));
1750 *op0 = XVECEXP (*op0, 0, 0);
1751 *code = new_code;
1755 /* Simplify cascaded EQ, NE with const0_rtx. */
1756 if ((*code == NE || *code == EQ)
1757 && (GET_CODE (*op0) == EQ || GET_CODE (*op0) == NE)
1758 && GET_MODE (*op0) == SImode
1759 && GET_MODE (XEXP (*op0, 0)) == CCZ1mode
1760 && REG_P (XEXP (*op0, 0))
1761 && XEXP (*op0, 1) == const0_rtx
1762 && *op1 == const0_rtx)
1764 if ((*code == EQ && GET_CODE (*op0) == NE)
1765 || (*code == NE && GET_CODE (*op0) == EQ))
1766 *code = EQ;
1767 else
1768 *code = NE;
1769 *op0 = XEXP (*op0, 0);
1772 /* Prefer register over memory as first operand. */
1773 if (MEM_P (*op0) && REG_P (*op1))
1775 rtx tem = *op0; *op0 = *op1; *op1 = tem;
1776 *code = (int)swap_condition ((enum rtx_code)*code);
1779 /* A comparison result is compared against zero. Replace it with
1780 the (perhaps inverted) original comparison.
1781 This probably should be done by simplify_relational_operation. */
1782 if ((*code == EQ || *code == NE)
1783 && *op1 == const0_rtx
1784 && COMPARISON_P (*op0)
1785 && CC_REG_P (XEXP (*op0, 0)))
1787 enum rtx_code new_code;
1789 if (*code == EQ)
1790 new_code = reversed_comparison_code_parts (GET_CODE (*op0),
1791 XEXP (*op0, 0),
1792 XEXP (*op1, 0), NULL);
1793 else
1794 new_code = GET_CODE (*op0);
1796 if (new_code != UNKNOWN)
1798 *code = new_code;
1799 *op1 = XEXP (*op0, 1);
1800 *op0 = XEXP (*op0, 0);
1806 /* Emit a compare instruction suitable to implement the comparison
1807 OP0 CODE OP1. Return the correct condition RTL to be placed in
1808 the IF_THEN_ELSE of the conditional branch testing the result. */
1811 s390_emit_compare (enum rtx_code code, rtx op0, rtx op1)
1813 machine_mode mode = s390_select_ccmode (code, op0, op1);
1814 rtx cc;
1816 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
1818 /* Do not output a redundant compare instruction if a
1819 compare_and_swap pattern already computed the result and the
1820 machine modes are compatible. */
1821 gcc_assert (s390_cc_modes_compatible (GET_MODE (op0), mode)
1822 == GET_MODE (op0));
1823 cc = op0;
1825 else
1827 cc = gen_rtx_REG (mode, CC_REGNUM);
1828 emit_insn (gen_rtx_SET (cc, gen_rtx_COMPARE (mode, op0, op1)));
1831 return gen_rtx_fmt_ee (code, VOIDmode, cc, const0_rtx);
1834 /* Emit a SImode compare and swap instruction setting MEM to NEW_RTX if OLD
1835 matches CMP.
1836 Return the correct condition RTL to be placed in the IF_THEN_ELSE of the
1837 conditional branch testing the result. */
1839 static rtx
1840 s390_emit_compare_and_swap (enum rtx_code code, rtx old, rtx mem,
1841 rtx cmp, rtx new_rtx, machine_mode ccmode)
1843 rtx cc;
1845 cc = gen_rtx_REG (ccmode, CC_REGNUM);
1846 switch (GET_MODE (mem))
1848 case E_SImode:
1849 emit_insn (gen_atomic_compare_and_swapsi_internal (old, mem, cmp,
1850 new_rtx, cc));
1851 break;
1852 case E_DImode:
1853 emit_insn (gen_atomic_compare_and_swapdi_internal (old, mem, cmp,
1854 new_rtx, cc));
1855 break;
1856 case E_TImode:
1857 emit_insn (gen_atomic_compare_and_swapti_internal (old, mem, cmp,
1858 new_rtx, cc));
1859 break;
1860 case E_QImode:
1861 case E_HImode:
1862 default:
1863 gcc_unreachable ();
1865 return s390_emit_compare (code, cc, const0_rtx);
1868 /* Emit a jump instruction to TARGET and return it. If COND is
1869 NULL_RTX, emit an unconditional jump, else a conditional jump under
1870 condition COND. */
1872 rtx_insn *
1873 s390_emit_jump (rtx target, rtx cond)
1875 rtx insn;
1877 target = gen_rtx_LABEL_REF (VOIDmode, target);
1878 if (cond)
1879 target = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, target, pc_rtx);
1881 insn = gen_rtx_SET (pc_rtx, target);
1882 return emit_jump_insn (insn);
1885 /* Return branch condition mask to implement a branch
1886 specified by CODE. Return -1 for invalid comparisons. */
1889 s390_branch_condition_mask (rtx code)
1891 const int CC0 = 1 << 3;
1892 const int CC1 = 1 << 2;
1893 const int CC2 = 1 << 1;
1894 const int CC3 = 1 << 0;
1896 gcc_assert (GET_CODE (XEXP (code, 0)) == REG);
1897 gcc_assert (REGNO (XEXP (code, 0)) == CC_REGNUM);
1898 gcc_assert (XEXP (code, 1) == const0_rtx
1899 || (GET_MODE (XEXP (code, 0)) == CCRAWmode
1900 && CONST_INT_P (XEXP (code, 1))));
1903 switch (GET_MODE (XEXP (code, 0)))
1905 case E_CCZmode:
1906 case E_CCZ1mode:
1907 switch (GET_CODE (code))
1909 case EQ: return CC0;
1910 case NE: return CC1 | CC2 | CC3;
1911 default: return -1;
1913 break;
1915 case E_CCT1mode:
1916 switch (GET_CODE (code))
1918 case EQ: return CC1;
1919 case NE: return CC0 | CC2 | CC3;
1920 default: return -1;
1922 break;
1924 case E_CCT2mode:
1925 switch (GET_CODE (code))
1927 case EQ: return CC2;
1928 case NE: return CC0 | CC1 | CC3;
1929 default: return -1;
1931 break;
1933 case E_CCT3mode:
1934 switch (GET_CODE (code))
1936 case EQ: return CC3;
1937 case NE: return CC0 | CC1 | CC2;
1938 default: return -1;
1940 break;
1942 case E_CCLmode:
1943 switch (GET_CODE (code))
1945 case EQ: return CC0 | CC2;
1946 case NE: return CC1 | CC3;
1947 default: return -1;
1949 break;
1951 case E_CCL1mode:
1952 switch (GET_CODE (code))
1954 case LTU: return CC2 | CC3; /* carry */
1955 case GEU: return CC0 | CC1; /* no carry */
1956 default: return -1;
1958 break;
1960 case E_CCL2mode:
1961 switch (GET_CODE (code))
1963 case GTU: return CC0 | CC1; /* borrow */
1964 case LEU: return CC2 | CC3; /* no borrow */
1965 default: return -1;
1967 break;
1969 case E_CCL3mode:
1970 switch (GET_CODE (code))
1972 case EQ: return CC0 | CC2;
1973 case NE: return CC1 | CC3;
1974 case LTU: return CC1;
1975 case GTU: return CC3;
1976 case LEU: return CC1 | CC2;
1977 case GEU: return CC2 | CC3;
1978 default: return -1;
1981 case E_CCUmode:
1982 switch (GET_CODE (code))
1984 case EQ: return CC0;
1985 case NE: return CC1 | CC2 | CC3;
1986 case LTU: return CC1;
1987 case GTU: return CC2;
1988 case LEU: return CC0 | CC1;
1989 case GEU: return CC0 | CC2;
1990 default: return -1;
1992 break;
1994 case E_CCURmode:
1995 switch (GET_CODE (code))
1997 case EQ: return CC0;
1998 case NE: return CC2 | CC1 | CC3;
1999 case LTU: return CC2;
2000 case GTU: return CC1;
2001 case LEU: return CC0 | CC2;
2002 case GEU: return CC0 | CC1;
2003 default: return -1;
2005 break;
2007 case E_CCAPmode:
2008 switch (GET_CODE (code))
2010 case EQ: return CC0;
2011 case NE: return CC1 | CC2 | CC3;
2012 case LT: return CC1 | CC3;
2013 case GT: return CC2;
2014 case LE: return CC0 | CC1 | CC3;
2015 case GE: return CC0 | CC2;
2016 default: return -1;
2018 break;
2020 case E_CCANmode:
2021 switch (GET_CODE (code))
2023 case EQ: return CC0;
2024 case NE: return CC1 | CC2 | CC3;
2025 case LT: return CC1;
2026 case GT: return CC2 | CC3;
2027 case LE: return CC0 | CC1;
2028 case GE: return CC0 | CC2 | CC3;
2029 default: return -1;
2031 break;
2033 case E_CCSmode:
2034 switch (GET_CODE (code))
2036 case EQ: return CC0;
2037 case NE: return CC1 | CC2 | CC3;
2038 case LT: return CC1;
2039 case GT: return CC2;
2040 case LE: return CC0 | CC1;
2041 case GE: return CC0 | CC2;
2042 case UNORDERED: return CC3;
2043 case ORDERED: return CC0 | CC1 | CC2;
2044 case UNEQ: return CC0 | CC3;
2045 case UNLT: return CC1 | CC3;
2046 case UNGT: return CC2 | CC3;
2047 case UNLE: return CC0 | CC1 | CC3;
2048 case UNGE: return CC0 | CC2 | CC3;
2049 case LTGT: return CC1 | CC2;
2050 default: return -1;
2052 break;
2054 case E_CCSRmode:
2055 switch (GET_CODE (code))
2057 case EQ: return CC0;
2058 case NE: return CC2 | CC1 | CC3;
2059 case LT: return CC2;
2060 case GT: return CC1;
2061 case LE: return CC0 | CC2;
2062 case GE: return CC0 | CC1;
2063 case UNORDERED: return CC3;
2064 case ORDERED: return CC0 | CC2 | CC1;
2065 case UNEQ: return CC0 | CC3;
2066 case UNLT: return CC2 | CC3;
2067 case UNGT: return CC1 | CC3;
2068 case UNLE: return CC0 | CC2 | CC3;
2069 case UNGE: return CC0 | CC1 | CC3;
2070 case LTGT: return CC2 | CC1;
2071 default: return -1;
2073 break;
2075 /* Vector comparison modes. */
2076 /* CC2 will never be set. It however is part of the negated
2077 masks. */
2078 case E_CCVIALLmode:
2079 switch (GET_CODE (code))
2081 case EQ:
2082 case GTU:
2083 case GT:
2084 case GE: return CC0;
2085 /* The inverted modes are in fact *any* modes. */
2086 case NE:
2087 case LEU:
2088 case LE:
2089 case LT: return CC3 | CC1 | CC2;
2090 default: return -1;
2093 case E_CCVIANYmode:
2094 switch (GET_CODE (code))
2096 case EQ:
2097 case GTU:
2098 case GT:
2099 case GE: return CC0 | CC1;
2100 /* The inverted modes are in fact *all* modes. */
2101 case NE:
2102 case LEU:
2103 case LE:
2104 case LT: return CC3 | CC2;
2105 default: return -1;
2107 case E_CCVFALLmode:
2108 switch (GET_CODE (code))
2110 case EQ:
2111 case GT:
2112 case GE: return CC0;
2113 /* The inverted modes are in fact *any* modes. */
2114 case NE:
2115 case UNLE:
2116 case UNLT: return CC3 | CC1 | CC2;
2117 default: return -1;
2120 case E_CCVFANYmode:
2121 switch (GET_CODE (code))
2123 case EQ:
2124 case GT:
2125 case GE: return CC0 | CC1;
2126 /* The inverted modes are in fact *all* modes. */
2127 case NE:
2128 case UNLE:
2129 case UNLT: return CC3 | CC2;
2130 default: return -1;
2133 case E_CCRAWmode:
2134 switch (GET_CODE (code))
2136 case EQ:
2137 return INTVAL (XEXP (code, 1));
2138 case NE:
2139 return (INTVAL (XEXP (code, 1))) ^ 0xf;
2140 default:
2141 gcc_unreachable ();
2144 default:
2145 return -1;
2150 /* Return branch condition mask to implement a compare and branch
2151 specified by CODE. Return -1 for invalid comparisons. */
2154 s390_compare_and_branch_condition_mask (rtx code)
2156 const int CC0 = 1 << 3;
2157 const int CC1 = 1 << 2;
2158 const int CC2 = 1 << 1;
2160 switch (GET_CODE (code))
2162 case EQ:
2163 return CC0;
2164 case NE:
2165 return CC1 | CC2;
2166 case LT:
2167 case LTU:
2168 return CC1;
2169 case GT:
2170 case GTU:
2171 return CC2;
2172 case LE:
2173 case LEU:
2174 return CC0 | CC1;
2175 case GE:
2176 case GEU:
2177 return CC0 | CC2;
2178 default:
2179 gcc_unreachable ();
2181 return -1;
2184 /* If INV is false, return assembler mnemonic string to implement
2185 a branch specified by CODE. If INV is true, return mnemonic
2186 for the corresponding inverted branch. */
2188 static const char *
2189 s390_branch_condition_mnemonic (rtx code, int inv)
2191 int mask;
2193 static const char *const mnemonic[16] =
2195 NULL, "o", "h", "nle",
2196 "l", "nhe", "lh", "ne",
2197 "e", "nlh", "he", "nl",
2198 "le", "nh", "no", NULL
2201 if (GET_CODE (XEXP (code, 0)) == REG
2202 && REGNO (XEXP (code, 0)) == CC_REGNUM
2203 && (XEXP (code, 1) == const0_rtx
2204 || (GET_MODE (XEXP (code, 0)) == CCRAWmode
2205 && CONST_INT_P (XEXP (code, 1)))))
2206 mask = s390_branch_condition_mask (code);
2207 else
2208 mask = s390_compare_and_branch_condition_mask (code);
2210 gcc_assert (mask >= 0);
2212 if (inv)
2213 mask ^= 15;
2215 gcc_assert (mask >= 1 && mask <= 14);
2217 return mnemonic[mask];
2220 /* Return the part of op which has a value different from def.
2221 The size of the part is determined by mode.
2222 Use this function only if you already know that op really
2223 contains such a part. */
2225 unsigned HOST_WIDE_INT
2226 s390_extract_part (rtx op, machine_mode mode, int def)
2228 unsigned HOST_WIDE_INT value = 0;
2229 int max_parts = HOST_BITS_PER_WIDE_INT / GET_MODE_BITSIZE (mode);
2230 int part_bits = GET_MODE_BITSIZE (mode);
2231 unsigned HOST_WIDE_INT part_mask = (HOST_WIDE_INT_1U << part_bits) - 1;
2232 int i;
2234 for (i = 0; i < max_parts; i++)
2236 if (i == 0)
2237 value = UINTVAL (op);
2238 else
2239 value >>= part_bits;
2241 if ((value & part_mask) != (def & part_mask))
2242 return value & part_mask;
2245 gcc_unreachable ();
2248 /* If OP is an integer constant of mode MODE with exactly one
2249 part of mode PART_MODE unequal to DEF, return the number of that
2250 part. Otherwise, return -1. */
2253 s390_single_part (rtx op,
2254 machine_mode mode,
2255 machine_mode part_mode,
2256 int def)
2258 unsigned HOST_WIDE_INT value = 0;
2259 int n_parts = GET_MODE_SIZE (mode) / GET_MODE_SIZE (part_mode);
2260 unsigned HOST_WIDE_INT part_mask
2261 = (HOST_WIDE_INT_1U << GET_MODE_BITSIZE (part_mode)) - 1;
2262 int i, part = -1;
2264 if (GET_CODE (op) != CONST_INT)
2265 return -1;
2267 for (i = 0; i < n_parts; i++)
2269 if (i == 0)
2270 value = UINTVAL (op);
2271 else
2272 value >>= GET_MODE_BITSIZE (part_mode);
2274 if ((value & part_mask) != (def & part_mask))
2276 if (part != -1)
2277 return -1;
2278 else
2279 part = i;
2282 return part == -1 ? -1 : n_parts - 1 - part;
2285 /* Return true if IN contains a contiguous bitfield in the lower SIZE
2286 bits and no other bits are set in (the lower SIZE bits of) IN.
2288 PSTART and PEND can be used to obtain the start and end
2289 position (inclusive) of the bitfield relative to 64
2290 bits. *PSTART / *PEND gives the position of the first/last bit
2291 of the bitfield counting from the highest order bit starting
2292 with zero. */
2294 bool
2295 s390_contiguous_bitmask_nowrap_p (unsigned HOST_WIDE_INT in, int size,
2296 int *pstart, int *pend)
2298 int start;
2299 int end = -1;
2300 int lowbit = HOST_BITS_PER_WIDE_INT - 1;
2301 int highbit = HOST_BITS_PER_WIDE_INT - size;
2302 unsigned HOST_WIDE_INT bitmask = HOST_WIDE_INT_1U;
2304 gcc_assert (!!pstart == !!pend);
2305 for (start = lowbit; start >= highbit; bitmask <<= 1, start--)
2306 if (end == -1)
2308 /* Look for the rightmost bit of a contiguous range of ones. */
2309 if (bitmask & in)
2310 /* Found it. */
2311 end = start;
2313 else
2315 /* Look for the firt zero bit after the range of ones. */
2316 if (! (bitmask & in))
2317 /* Found it. */
2318 break;
2320 /* We're one past the last one-bit. */
2321 start++;
2323 if (end == -1)
2324 /* No one bits found. */
2325 return false;
2327 if (start > highbit)
2329 unsigned HOST_WIDE_INT mask;
2331 /* Calculate a mask for all bits beyond the contiguous bits. */
2332 mask = ((~HOST_WIDE_INT_0U >> highbit)
2333 & (~HOST_WIDE_INT_0U << (lowbit - start + 1)));
2334 if (mask & in)
2335 /* There are more bits set beyond the first range of one bits. */
2336 return false;
2339 if (pstart)
2341 *pstart = start;
2342 *pend = end;
2345 return true;
2348 /* Same as s390_contiguous_bitmask_nowrap_p but also returns true
2349 if ~IN contains a contiguous bitfield. In that case, *END is <
2350 *START.
2352 If WRAP_P is true, a bitmask that wraps around is also tested.
2353 When a wraparoud occurs *START is greater than *END (in
2354 non-null pointers), and the uppermost (64 - SIZE) bits are thus
2355 part of the range. If WRAP_P is false, no wraparound is
2356 tested. */
2358 bool
2359 s390_contiguous_bitmask_p (unsigned HOST_WIDE_INT in, bool wrap_p,
2360 int size, int *start, int *end)
2362 int bs = HOST_BITS_PER_WIDE_INT;
2363 bool b;
2365 gcc_assert (!!start == !!end);
2366 if ((in & ((~HOST_WIDE_INT_0U) >> (bs - size))) == 0)
2367 /* This cannot be expressed as a contiguous bitmask. Exit early because
2368 the second call of s390_contiguous_bitmask_nowrap_p would accept this as
2369 a valid bitmask. */
2370 return false;
2371 b = s390_contiguous_bitmask_nowrap_p (in, size, start, end);
2372 if (b)
2373 return true;
2374 if (! wrap_p)
2375 return false;
2376 b = s390_contiguous_bitmask_nowrap_p (~in, size, start, end);
2377 if (b && start)
2379 int s = *start;
2380 int e = *end;
2382 gcc_assert (s >= 1);
2383 *start = ((e + 1) & (bs - 1));
2384 *end = ((s - 1 + bs) & (bs - 1));
2387 return b;
2390 /* Return true if OP contains the same contiguous bitfield in *all*
2391 its elements. START and END can be used to obtain the start and
2392 end position of the bitfield.
2394 START/STOP give the position of the first/last bit of the bitfield
2395 counting from the lowest order bit starting with zero. In order to
2396 use these values for S/390 instructions this has to be converted to
2397 "bits big endian" style. */
2399 bool
2400 s390_contiguous_bitmask_vector_p (rtx op, int *start, int *end)
2402 unsigned HOST_WIDE_INT mask;
2403 int size;
2404 rtx elt;
2405 bool b;
2407 gcc_assert (!!start == !!end);
2408 if (!const_vec_duplicate_p (op, &elt)
2409 || !CONST_INT_P (elt))
2410 return false;
2412 size = GET_MODE_UNIT_BITSIZE (GET_MODE (op));
2414 /* We cannot deal with V1TI/V1TF. This would require a vgmq. */
2415 if (size > 64)
2416 return false;
2418 mask = UINTVAL (elt);
2420 b = s390_contiguous_bitmask_p (mask, true, size, start, end);
2421 if (b)
2423 if (start)
2425 *start -= (HOST_BITS_PER_WIDE_INT - size);
2426 *end -= (HOST_BITS_PER_WIDE_INT - size);
2428 return true;
2430 else
2431 return false;
2434 /* Return true if C consists only of byte chunks being either 0 or
2435 0xff. If MASK is !=NULL a byte mask is generated which is
2436 appropriate for the vector generate byte mask instruction. */
2438 bool
2439 s390_bytemask_vector_p (rtx op, unsigned *mask)
2441 int i;
2442 unsigned tmp_mask = 0;
2443 int nunit, unit_size;
2445 if (!VECTOR_MODE_P (GET_MODE (op))
2446 || GET_CODE (op) != CONST_VECTOR
2447 || !CONST_INT_P (XVECEXP (op, 0, 0)))
2448 return false;
2450 nunit = GET_MODE_NUNITS (GET_MODE (op));
2451 unit_size = GET_MODE_UNIT_SIZE (GET_MODE (op));
2453 for (i = 0; i < nunit; i++)
2455 unsigned HOST_WIDE_INT c;
2456 int j;
2458 if (!CONST_INT_P (XVECEXP (op, 0, i)))
2459 return false;
2461 c = UINTVAL (XVECEXP (op, 0, i));
2462 for (j = 0; j < unit_size; j++)
2464 if ((c & 0xff) != 0 && (c & 0xff) != 0xff)
2465 return false;
2466 tmp_mask |= (c & 1) << ((nunit - 1 - i) * unit_size + j);
2467 c = c >> BITS_PER_UNIT;
2471 if (mask != NULL)
2472 *mask = tmp_mask;
2474 return true;
2477 /* Check whether a rotate of ROTL followed by an AND of CONTIG is
2478 equivalent to a shift followed by the AND. In particular, CONTIG
2479 should not overlap the (rotated) bit 0/bit 63 gap. Negative values
2480 for ROTL indicate a rotate to the right. */
2482 bool
2483 s390_extzv_shift_ok (int bitsize, int rotl, unsigned HOST_WIDE_INT contig)
2485 int start, end;
2486 bool ok;
2488 ok = s390_contiguous_bitmask_nowrap_p (contig, bitsize, &start, &end);
2489 gcc_assert (ok);
2491 if (rotl >= 0)
2492 return (64 - end >= rotl);
2493 else
2495 /* Translate "- rotate right" in BITSIZE mode to "rotate left" in
2496 DIMode. */
2497 rotl = -rotl + (64 - bitsize);
2498 return (start >= rotl);
2502 /* Check whether we can (and want to) split a double-word
2503 move in mode MODE from SRC to DST into two single-word
2504 moves, moving the subword FIRST_SUBWORD first. */
2506 bool
2507 s390_split_ok_p (rtx dst, rtx src, machine_mode mode, int first_subword)
2509 /* Floating point and vector registers cannot be split. */
2510 if (FP_REG_P (src) || FP_REG_P (dst) || VECTOR_REG_P (src) || VECTOR_REG_P (dst))
2511 return false;
2513 /* Non-offsettable memory references cannot be split. */
2514 if ((GET_CODE (src) == MEM && !offsettable_memref_p (src))
2515 || (GET_CODE (dst) == MEM && !offsettable_memref_p (dst)))
2516 return false;
2518 /* Moving the first subword must not clobber a register
2519 needed to move the second subword. */
2520 if (register_operand (dst, mode))
2522 rtx subreg = operand_subword (dst, first_subword, 0, mode);
2523 if (reg_overlap_mentioned_p (subreg, src))
2524 return false;
2527 return true;
2530 /* Return true if it can be proven that [MEM1, MEM1 + SIZE]
2531 and [MEM2, MEM2 + SIZE] do overlap and false
2532 otherwise. */
2534 bool
2535 s390_overlap_p (rtx mem1, rtx mem2, HOST_WIDE_INT size)
2537 rtx addr1, addr2, addr_delta;
2538 HOST_WIDE_INT delta;
2540 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
2541 return true;
2543 if (size == 0)
2544 return false;
2546 addr1 = XEXP (mem1, 0);
2547 addr2 = XEXP (mem2, 0);
2549 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
2551 /* This overlapping check is used by peepholes merging memory block operations.
2552 Overlapping operations would otherwise be recognized by the S/390 hardware
2553 and would fall back to a slower implementation. Allowing overlapping
2554 operations would lead to slow code but not to wrong code. Therefore we are
2555 somewhat optimistic if we cannot prove that the memory blocks are
2556 overlapping.
2557 That's why we return false here although this may accept operations on
2558 overlapping memory areas. */
2559 if (!addr_delta || GET_CODE (addr_delta) != CONST_INT)
2560 return false;
2562 delta = INTVAL (addr_delta);
2564 if (delta == 0
2565 || (delta > 0 && delta < size)
2566 || (delta < 0 && -delta < size))
2567 return true;
2569 return false;
2572 /* Check whether the address of memory reference MEM2 equals exactly
2573 the address of memory reference MEM1 plus DELTA. Return true if
2574 we can prove this to be the case, false otherwise. */
2576 bool
2577 s390_offset_p (rtx mem1, rtx mem2, rtx delta)
2579 rtx addr1, addr2, addr_delta;
2581 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
2582 return false;
2584 addr1 = XEXP (mem1, 0);
2585 addr2 = XEXP (mem2, 0);
2587 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
2588 if (!addr_delta || !rtx_equal_p (addr_delta, delta))
2589 return false;
2591 return true;
2594 /* Expand logical operator CODE in mode MODE with operands OPERANDS. */
2596 void
2597 s390_expand_logical_operator (enum rtx_code code, machine_mode mode,
2598 rtx *operands)
2600 machine_mode wmode = mode;
2601 rtx dst = operands[0];
2602 rtx src1 = operands[1];
2603 rtx src2 = operands[2];
2604 rtx op, clob, tem;
2606 /* If we cannot handle the operation directly, use a temp register. */
2607 if (!s390_logical_operator_ok_p (operands))
2608 dst = gen_reg_rtx (mode);
2610 /* QImode and HImode patterns make sense only if we have a destination
2611 in memory. Otherwise perform the operation in SImode. */
2612 if ((mode == QImode || mode == HImode) && GET_CODE (dst) != MEM)
2613 wmode = SImode;
2615 /* Widen operands if required. */
2616 if (mode != wmode)
2618 if (GET_CODE (dst) == SUBREG
2619 && (tem = simplify_subreg (wmode, dst, mode, 0)) != 0)
2620 dst = tem;
2621 else if (REG_P (dst))
2622 dst = gen_rtx_SUBREG (wmode, dst, 0);
2623 else
2624 dst = gen_reg_rtx (wmode);
2626 if (GET_CODE (src1) == SUBREG
2627 && (tem = simplify_subreg (wmode, src1, mode, 0)) != 0)
2628 src1 = tem;
2629 else if (GET_MODE (src1) != VOIDmode)
2630 src1 = gen_rtx_SUBREG (wmode, force_reg (mode, src1), 0);
2632 if (GET_CODE (src2) == SUBREG
2633 && (tem = simplify_subreg (wmode, src2, mode, 0)) != 0)
2634 src2 = tem;
2635 else if (GET_MODE (src2) != VOIDmode)
2636 src2 = gen_rtx_SUBREG (wmode, force_reg (mode, src2), 0);
2639 /* Emit the instruction. */
2640 op = gen_rtx_SET (dst, gen_rtx_fmt_ee (code, wmode, src1, src2));
2641 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
2642 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
2644 /* Fix up the destination if needed. */
2645 if (dst != operands[0])
2646 emit_move_insn (operands[0], gen_lowpart (mode, dst));
2649 /* Check whether OPERANDS are OK for a logical operation (AND, IOR, XOR). */
2651 bool
2652 s390_logical_operator_ok_p (rtx *operands)
2654 /* If the destination operand is in memory, it needs to coincide
2655 with one of the source operands. After reload, it has to be
2656 the first source operand. */
2657 if (GET_CODE (operands[0]) == MEM)
2658 return rtx_equal_p (operands[0], operands[1])
2659 || (!reload_completed && rtx_equal_p (operands[0], operands[2]));
2661 return true;
2664 /* Narrow logical operation CODE of memory operand MEMOP with immediate
2665 operand IMMOP to switch from SS to SI type instructions. */
2667 void
2668 s390_narrow_logical_operator (enum rtx_code code, rtx *memop, rtx *immop)
2670 int def = code == AND ? -1 : 0;
2671 HOST_WIDE_INT mask;
2672 int part;
2674 gcc_assert (GET_CODE (*memop) == MEM);
2675 gcc_assert (!MEM_VOLATILE_P (*memop));
2677 mask = s390_extract_part (*immop, QImode, def);
2678 part = s390_single_part (*immop, GET_MODE (*memop), QImode, def);
2679 gcc_assert (part >= 0);
2681 *memop = adjust_address (*memop, QImode, part);
2682 *immop = gen_int_mode (mask, QImode);
2686 /* How to allocate a 'struct machine_function'. */
2688 static struct machine_function *
2689 s390_init_machine_status (void)
2691 return ggc_cleared_alloc<machine_function> ();
2694 /* Map for smallest class containing reg regno. */
2696 const enum reg_class regclass_map[FIRST_PSEUDO_REGISTER] =
2697 { GENERAL_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 0 */
2698 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 4 */
2699 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 8 */
2700 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 12 */
2701 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 16 */
2702 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 20 */
2703 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 24 */
2704 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 28 */
2705 ADDR_REGS, CC_REGS, ADDR_REGS, ADDR_REGS, /* 32 */
2706 ACCESS_REGS, ACCESS_REGS, VEC_REGS, VEC_REGS, /* 36 */
2707 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 40 */
2708 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 44 */
2709 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 48 */
2710 VEC_REGS, VEC_REGS /* 52 */
2713 /* Return attribute type of insn. */
2715 static enum attr_type
2716 s390_safe_attr_type (rtx_insn *insn)
2718 if (recog_memoized (insn) >= 0)
2719 return get_attr_type (insn);
2720 else
2721 return TYPE_NONE;
2724 /* Return true if DISP is a valid short displacement. */
2726 static bool
2727 s390_short_displacement (rtx disp)
2729 /* No displacement is OK. */
2730 if (!disp)
2731 return true;
2733 /* Without the long displacement facility we don't need to
2734 distingiush between long and short displacement. */
2735 if (!TARGET_LONG_DISPLACEMENT)
2736 return true;
2738 /* Integer displacement in range. */
2739 if (GET_CODE (disp) == CONST_INT)
2740 return INTVAL (disp) >= 0 && INTVAL (disp) < 4096;
2742 /* GOT offset is not OK, the GOT can be large. */
2743 if (GET_CODE (disp) == CONST
2744 && GET_CODE (XEXP (disp, 0)) == UNSPEC
2745 && (XINT (XEXP (disp, 0), 1) == UNSPEC_GOT
2746 || XINT (XEXP (disp, 0), 1) == UNSPEC_GOTNTPOFF))
2747 return false;
2749 /* All other symbolic constants are literal pool references,
2750 which are OK as the literal pool must be small. */
2751 if (GET_CODE (disp) == CONST)
2752 return true;
2754 return false;
2757 /* Decompose a RTL expression ADDR for a memory address into
2758 its components, returned in OUT.
2760 Returns false if ADDR is not a valid memory address, true
2761 otherwise. If OUT is NULL, don't return the components,
2762 but check for validity only.
2764 Note: Only addresses in canonical form are recognized.
2765 LEGITIMIZE_ADDRESS should convert non-canonical forms to the
2766 canonical form so that they will be recognized. */
2768 static int
2769 s390_decompose_address (rtx addr, struct s390_address *out)
2771 HOST_WIDE_INT offset = 0;
2772 rtx base = NULL_RTX;
2773 rtx indx = NULL_RTX;
2774 rtx disp = NULL_RTX;
2775 rtx orig_disp;
2776 bool pointer = false;
2777 bool base_ptr = false;
2778 bool indx_ptr = false;
2779 bool literal_pool = false;
2781 /* We may need to substitute the literal pool base register into the address
2782 below. However, at this point we do not know which register is going to
2783 be used as base, so we substitute the arg pointer register. This is going
2784 to be treated as holding a pointer below -- it shouldn't be used for any
2785 other purpose. */
2786 rtx fake_pool_base = gen_rtx_REG (Pmode, ARG_POINTER_REGNUM);
2788 /* Decompose address into base + index + displacement. */
2790 if (GET_CODE (addr) == REG || GET_CODE (addr) == UNSPEC)
2791 base = addr;
2793 else if (GET_CODE (addr) == PLUS)
2795 rtx op0 = XEXP (addr, 0);
2796 rtx op1 = XEXP (addr, 1);
2797 enum rtx_code code0 = GET_CODE (op0);
2798 enum rtx_code code1 = GET_CODE (op1);
2800 if (code0 == REG || code0 == UNSPEC)
2802 if (code1 == REG || code1 == UNSPEC)
2804 indx = op0; /* index + base */
2805 base = op1;
2808 else
2810 base = op0; /* base + displacement */
2811 disp = op1;
2815 else if (code0 == PLUS)
2817 indx = XEXP (op0, 0); /* index + base + disp */
2818 base = XEXP (op0, 1);
2819 disp = op1;
2822 else
2824 return false;
2828 else
2829 disp = addr; /* displacement */
2831 /* Extract integer part of displacement. */
2832 orig_disp = disp;
2833 if (disp)
2835 if (GET_CODE (disp) == CONST_INT)
2837 offset = INTVAL (disp);
2838 disp = NULL_RTX;
2840 else if (GET_CODE (disp) == CONST
2841 && GET_CODE (XEXP (disp, 0)) == PLUS
2842 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
2844 offset = INTVAL (XEXP (XEXP (disp, 0), 1));
2845 disp = XEXP (XEXP (disp, 0), 0);
2849 /* Strip off CONST here to avoid special case tests later. */
2850 if (disp && GET_CODE (disp) == CONST)
2851 disp = XEXP (disp, 0);
2853 /* We can convert literal pool addresses to
2854 displacements by basing them off the base register. */
2855 if (disp && GET_CODE (disp) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (disp))
2857 if (base || indx)
2858 return false;
2860 base = fake_pool_base, literal_pool = true;
2862 /* Mark up the displacement. */
2863 disp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, disp),
2864 UNSPEC_LTREL_OFFSET);
2867 /* Validate base register. */
2868 if (base)
2870 if (GET_CODE (base) == UNSPEC)
2871 switch (XINT (base, 1))
2873 case UNSPEC_LTREF:
2874 if (!disp)
2875 disp = gen_rtx_UNSPEC (Pmode,
2876 gen_rtvec (1, XVECEXP (base, 0, 0)),
2877 UNSPEC_LTREL_OFFSET);
2878 else
2879 return false;
2881 base = XVECEXP (base, 0, 1);
2882 break;
2884 case UNSPEC_LTREL_BASE:
2885 if (XVECLEN (base, 0) == 1)
2886 base = fake_pool_base, literal_pool = true;
2887 else
2888 base = XVECEXP (base, 0, 1);
2889 break;
2891 default:
2892 return false;
2895 if (!REG_P (base) || GET_MODE (base) != Pmode)
2896 return false;
2898 if (REGNO (base) == STACK_POINTER_REGNUM
2899 || REGNO (base) == FRAME_POINTER_REGNUM
2900 || ((reload_completed || reload_in_progress)
2901 && frame_pointer_needed
2902 && REGNO (base) == HARD_FRAME_POINTER_REGNUM)
2903 || REGNO (base) == ARG_POINTER_REGNUM
2904 || (flag_pic
2905 && REGNO (base) == PIC_OFFSET_TABLE_REGNUM))
2906 pointer = base_ptr = true;
2908 if ((reload_completed || reload_in_progress)
2909 && base == cfun->machine->base_reg)
2910 pointer = base_ptr = literal_pool = true;
2913 /* Validate index register. */
2914 if (indx)
2916 if (GET_CODE (indx) == UNSPEC)
2917 switch (XINT (indx, 1))
2919 case UNSPEC_LTREF:
2920 if (!disp)
2921 disp = gen_rtx_UNSPEC (Pmode,
2922 gen_rtvec (1, XVECEXP (indx, 0, 0)),
2923 UNSPEC_LTREL_OFFSET);
2924 else
2925 return false;
2927 indx = XVECEXP (indx, 0, 1);
2928 break;
2930 case UNSPEC_LTREL_BASE:
2931 if (XVECLEN (indx, 0) == 1)
2932 indx = fake_pool_base, literal_pool = true;
2933 else
2934 indx = XVECEXP (indx, 0, 1);
2935 break;
2937 default:
2938 return false;
2941 if (!REG_P (indx) || GET_MODE (indx) != Pmode)
2942 return false;
2944 if (REGNO (indx) == STACK_POINTER_REGNUM
2945 || REGNO (indx) == FRAME_POINTER_REGNUM
2946 || ((reload_completed || reload_in_progress)
2947 && frame_pointer_needed
2948 && REGNO (indx) == HARD_FRAME_POINTER_REGNUM)
2949 || REGNO (indx) == ARG_POINTER_REGNUM
2950 || (flag_pic
2951 && REGNO (indx) == PIC_OFFSET_TABLE_REGNUM))
2952 pointer = indx_ptr = true;
2954 if ((reload_completed || reload_in_progress)
2955 && indx == cfun->machine->base_reg)
2956 pointer = indx_ptr = literal_pool = true;
2959 /* Prefer to use pointer as base, not index. */
2960 if (base && indx && !base_ptr
2961 && (indx_ptr || (!REG_POINTER (base) && REG_POINTER (indx))))
2963 rtx tmp = base;
2964 base = indx;
2965 indx = tmp;
2968 /* Validate displacement. */
2969 if (!disp)
2971 /* If virtual registers are involved, the displacement will change later
2972 anyway as the virtual registers get eliminated. This could make a
2973 valid displacement invalid, but it is more likely to make an invalid
2974 displacement valid, because we sometimes access the register save area
2975 via negative offsets to one of those registers.
2976 Thus we don't check the displacement for validity here. If after
2977 elimination the displacement turns out to be invalid after all,
2978 this is fixed up by reload in any case. */
2979 /* LRA maintains always displacements up to date and we need to
2980 know the displacement is right during all LRA not only at the
2981 final elimination. */
2982 if (lra_in_progress
2983 || (base != arg_pointer_rtx
2984 && indx != arg_pointer_rtx
2985 && base != return_address_pointer_rtx
2986 && indx != return_address_pointer_rtx
2987 && base != frame_pointer_rtx
2988 && indx != frame_pointer_rtx
2989 && base != virtual_stack_vars_rtx
2990 && indx != virtual_stack_vars_rtx))
2991 if (!DISP_IN_RANGE (offset))
2992 return false;
2994 else
2996 /* All the special cases are pointers. */
2997 pointer = true;
2999 /* In the small-PIC case, the linker converts @GOT
3000 and @GOTNTPOFF offsets to possible displacements. */
3001 if (GET_CODE (disp) == UNSPEC
3002 && (XINT (disp, 1) == UNSPEC_GOT
3003 || XINT (disp, 1) == UNSPEC_GOTNTPOFF)
3004 && flag_pic == 1)
3009 /* Accept pool label offsets. */
3010 else if (GET_CODE (disp) == UNSPEC
3011 && XINT (disp, 1) == UNSPEC_POOL_OFFSET)
3014 /* Accept literal pool references. */
3015 else if (GET_CODE (disp) == UNSPEC
3016 && XINT (disp, 1) == UNSPEC_LTREL_OFFSET)
3018 /* In case CSE pulled a non literal pool reference out of
3019 the pool we have to reject the address. This is
3020 especially important when loading the GOT pointer on non
3021 zarch CPUs. In this case the literal pool contains an lt
3022 relative offset to the _GLOBAL_OFFSET_TABLE_ label which
3023 will most likely exceed the displacement. */
3024 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
3025 || !CONSTANT_POOL_ADDRESS_P (XVECEXP (disp, 0, 0)))
3026 return false;
3028 orig_disp = gen_rtx_CONST (Pmode, disp);
3029 if (offset)
3031 /* If we have an offset, make sure it does not
3032 exceed the size of the constant pool entry. */
3033 rtx sym = XVECEXP (disp, 0, 0);
3034 if (offset >= GET_MODE_SIZE (get_pool_mode (sym)))
3035 return false;
3037 orig_disp = plus_constant (Pmode, orig_disp, offset);
3041 else
3042 return false;
3045 if (!base && !indx)
3046 pointer = true;
3048 if (out)
3050 out->base = base;
3051 out->indx = indx;
3052 out->disp = orig_disp;
3053 out->pointer = pointer;
3054 out->literal_pool = literal_pool;
3057 return true;
3060 /* Decompose a RTL expression OP for an address style operand into its
3061 components, and return the base register in BASE and the offset in
3062 OFFSET. While OP looks like an address it is never supposed to be
3063 used as such.
3065 Return true if OP is a valid address operand, false if not. */
3067 bool
3068 s390_decompose_addrstyle_without_index (rtx op, rtx *base,
3069 HOST_WIDE_INT *offset)
3071 rtx off = NULL_RTX;
3073 /* We can have an integer constant, an address register,
3074 or a sum of the two. */
3075 if (CONST_SCALAR_INT_P (op))
3077 off = op;
3078 op = NULL_RTX;
3080 if (op && GET_CODE (op) == PLUS && CONST_SCALAR_INT_P (XEXP (op, 1)))
3082 off = XEXP (op, 1);
3083 op = XEXP (op, 0);
3085 while (op && GET_CODE (op) == SUBREG)
3086 op = SUBREG_REG (op);
3088 if (op && GET_CODE (op) != REG)
3089 return false;
3091 if (offset)
3093 if (off == NULL_RTX)
3094 *offset = 0;
3095 else if (CONST_INT_P (off))
3096 *offset = INTVAL (off);
3097 else if (CONST_WIDE_INT_P (off))
3098 /* The offset will anyway be cut down to 12 bits so take just
3099 the lowest order chunk of the wide int. */
3100 *offset = CONST_WIDE_INT_ELT (off, 0);
3101 else
3102 gcc_unreachable ();
3104 if (base)
3105 *base = op;
3107 return true;
3111 /* Return true if CODE is a valid address without index. */
3113 bool
3114 s390_legitimate_address_without_index_p (rtx op)
3116 struct s390_address addr;
3118 if (!s390_decompose_address (XEXP (op, 0), &addr))
3119 return false;
3120 if (addr.indx)
3121 return false;
3123 return true;
3127 /* Return TRUE if ADDR is an operand valid for a load/store relative
3128 instruction. Be aware that the alignment of the operand needs to
3129 be checked separately.
3130 Valid addresses are single references or a sum of a reference and a
3131 constant integer. Return these parts in SYMREF and ADDEND. You can
3132 pass NULL in REF and/or ADDEND if you are not interested in these
3133 values. Literal pool references are *not* considered symbol
3134 references. */
3136 static bool
3137 s390_loadrelative_operand_p (rtx addr, rtx *symref, HOST_WIDE_INT *addend)
3139 HOST_WIDE_INT tmpaddend = 0;
3141 if (GET_CODE (addr) == CONST)
3142 addr = XEXP (addr, 0);
3144 if (GET_CODE (addr) == PLUS)
3146 if (!CONST_INT_P (XEXP (addr, 1)))
3147 return false;
3149 tmpaddend = INTVAL (XEXP (addr, 1));
3150 addr = XEXP (addr, 0);
3153 if ((GET_CODE (addr) == SYMBOL_REF && !CONSTANT_POOL_ADDRESS_P (addr))
3154 || (GET_CODE (addr) == UNSPEC
3155 && (XINT (addr, 1) == UNSPEC_GOTENT
3156 || (TARGET_CPU_ZARCH && XINT (addr, 1) == UNSPEC_PLT))))
3158 if (symref)
3159 *symref = addr;
3160 if (addend)
3161 *addend = tmpaddend;
3163 return true;
3165 return false;
3168 /* Return true if the address in OP is valid for constraint letter C
3169 if wrapped in a MEM rtx. Set LIT_POOL_OK to true if it literal
3170 pool MEMs should be accepted. Only the Q, R, S, T constraint
3171 letters are allowed for C. */
3173 static int
3174 s390_check_qrst_address (char c, rtx op, bool lit_pool_ok)
3176 struct s390_address addr;
3177 bool decomposed = false;
3179 if (!address_operand (op, GET_MODE (op)))
3180 return 0;
3182 /* This check makes sure that no symbolic address (except literal
3183 pool references) are accepted by the R or T constraints. */
3184 if (s390_loadrelative_operand_p (op, NULL, NULL))
3185 return 0;
3187 /* Ensure literal pool references are only accepted if LIT_POOL_OK. */
3188 if (!lit_pool_ok)
3190 if (!s390_decompose_address (op, &addr))
3191 return 0;
3192 if (addr.literal_pool)
3193 return 0;
3194 decomposed = true;
3197 /* With reload, we sometimes get intermediate address forms that are
3198 actually invalid as-is, but we need to accept them in the most
3199 generic cases below ('R' or 'T'), since reload will in fact fix
3200 them up. LRA behaves differently here; we never see such forms,
3201 but on the other hand, we need to strictly reject every invalid
3202 address form. Perform this check right up front. */
3203 if (lra_in_progress)
3205 if (!decomposed && !s390_decompose_address (op, &addr))
3206 return 0;
3207 decomposed = true;
3210 switch (c)
3212 case 'Q': /* no index short displacement */
3213 if (!decomposed && !s390_decompose_address (op, &addr))
3214 return 0;
3215 if (addr.indx)
3216 return 0;
3217 if (!s390_short_displacement (addr.disp))
3218 return 0;
3219 break;
3221 case 'R': /* with index short displacement */
3222 if (TARGET_LONG_DISPLACEMENT)
3224 if (!decomposed && !s390_decompose_address (op, &addr))
3225 return 0;
3226 if (!s390_short_displacement (addr.disp))
3227 return 0;
3229 /* Any invalid address here will be fixed up by reload,
3230 so accept it for the most generic constraint. */
3231 break;
3233 case 'S': /* no index long displacement */
3234 if (!decomposed && !s390_decompose_address (op, &addr))
3235 return 0;
3236 if (addr.indx)
3237 return 0;
3238 break;
3240 case 'T': /* with index long displacement */
3241 /* Any invalid address here will be fixed up by reload,
3242 so accept it for the most generic constraint. */
3243 break;
3245 default:
3246 return 0;
3248 return 1;
3252 /* Evaluates constraint strings described by the regular expression
3253 ([A|B|Z](Q|R|S|T))|Y and returns 1 if OP is a valid operand for
3254 the constraint given in STR, or 0 else. */
3257 s390_mem_constraint (const char *str, rtx op)
3259 char c = str[0];
3261 switch (c)
3263 case 'A':
3264 /* Check for offsettable variants of memory constraints. */
3265 if (!MEM_P (op) || MEM_VOLATILE_P (op))
3266 return 0;
3267 if ((reload_completed || reload_in_progress)
3268 ? !offsettable_memref_p (op) : !offsettable_nonstrict_memref_p (op))
3269 return 0;
3270 return s390_check_qrst_address (str[1], XEXP (op, 0), true);
3271 case 'B':
3272 /* Check for non-literal-pool variants of memory constraints. */
3273 if (!MEM_P (op))
3274 return 0;
3275 return s390_check_qrst_address (str[1], XEXP (op, 0), false);
3276 case 'Q':
3277 case 'R':
3278 case 'S':
3279 case 'T':
3280 if (GET_CODE (op) != MEM)
3281 return 0;
3282 return s390_check_qrst_address (c, XEXP (op, 0), true);
3283 case 'Y':
3284 /* Simply check for the basic form of a shift count. Reload will
3285 take care of making sure we have a proper base register. */
3286 if (!s390_decompose_addrstyle_without_index (op, NULL, NULL))
3287 return 0;
3288 break;
3289 case 'Z':
3290 return s390_check_qrst_address (str[1], op, true);
3291 default:
3292 return 0;
3294 return 1;
3298 /* Evaluates constraint strings starting with letter O. Input
3299 parameter C is the second letter following the "O" in the constraint
3300 string. Returns 1 if VALUE meets the respective constraint and 0
3301 otherwise. */
3304 s390_O_constraint_str (const char c, HOST_WIDE_INT value)
3306 if (!TARGET_EXTIMM)
3307 return 0;
3309 switch (c)
3311 case 's':
3312 return trunc_int_for_mode (value, SImode) == value;
3314 case 'p':
3315 return value == 0
3316 || s390_single_part (GEN_INT (value), DImode, SImode, 0) == 1;
3318 case 'n':
3319 return s390_single_part (GEN_INT (value - 1), DImode, SImode, -1) == 1;
3321 default:
3322 gcc_unreachable ();
3327 /* Evaluates constraint strings starting with letter N. Parameter STR
3328 contains the letters following letter "N" in the constraint string.
3329 Returns true if VALUE matches the constraint. */
3332 s390_N_constraint_str (const char *str, HOST_WIDE_INT value)
3334 machine_mode mode, part_mode;
3335 int def;
3336 int part, part_goal;
3339 if (str[0] == 'x')
3340 part_goal = -1;
3341 else
3342 part_goal = str[0] - '0';
3344 switch (str[1])
3346 case 'Q':
3347 part_mode = QImode;
3348 break;
3349 case 'H':
3350 part_mode = HImode;
3351 break;
3352 case 'S':
3353 part_mode = SImode;
3354 break;
3355 default:
3356 return 0;
3359 switch (str[2])
3361 case 'H':
3362 mode = HImode;
3363 break;
3364 case 'S':
3365 mode = SImode;
3366 break;
3367 case 'D':
3368 mode = DImode;
3369 break;
3370 default:
3371 return 0;
3374 switch (str[3])
3376 case '0':
3377 def = 0;
3378 break;
3379 case 'F':
3380 def = -1;
3381 break;
3382 default:
3383 return 0;
3386 if (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (part_mode))
3387 return 0;
3389 part = s390_single_part (GEN_INT (value), mode, part_mode, def);
3390 if (part < 0)
3391 return 0;
3392 if (part_goal != -1 && part_goal != part)
3393 return 0;
3395 return 1;
3399 /* Returns true if the input parameter VALUE is a float zero. */
3402 s390_float_const_zero_p (rtx value)
3404 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
3405 && value == CONST0_RTX (GET_MODE (value)));
3408 /* Implement TARGET_REGISTER_MOVE_COST. */
3410 static int
3411 s390_register_move_cost (machine_mode mode,
3412 reg_class_t from, reg_class_t to)
3414 /* On s390, copy between fprs and gprs is expensive. */
3416 /* It becomes somewhat faster having ldgr/lgdr. */
3417 if (TARGET_Z10 && GET_MODE_SIZE (mode) == 8)
3419 /* ldgr is single cycle. */
3420 if (reg_classes_intersect_p (from, GENERAL_REGS)
3421 && reg_classes_intersect_p (to, FP_REGS))
3422 return 1;
3423 /* lgdr needs 3 cycles. */
3424 if (reg_classes_intersect_p (to, GENERAL_REGS)
3425 && reg_classes_intersect_p (from, FP_REGS))
3426 return 3;
3429 /* Otherwise copying is done via memory. */
3430 if ((reg_classes_intersect_p (from, GENERAL_REGS)
3431 && reg_classes_intersect_p (to, FP_REGS))
3432 || (reg_classes_intersect_p (from, FP_REGS)
3433 && reg_classes_intersect_p (to, GENERAL_REGS)))
3434 return 10;
3436 return 1;
3439 /* Implement TARGET_MEMORY_MOVE_COST. */
3441 static int
3442 s390_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
3443 reg_class_t rclass ATTRIBUTE_UNUSED,
3444 bool in ATTRIBUTE_UNUSED)
3446 return 2;
3449 /* Compute a (partial) cost for rtx X. Return true if the complete
3450 cost has been computed, and false if subexpressions should be
3451 scanned. In either case, *TOTAL contains the cost result. The
3452 initial value of *TOTAL is the default value computed by
3453 rtx_cost. It may be left unmodified. OUTER_CODE contains the
3454 code of the superexpression of x. */
3456 static bool
3457 s390_rtx_costs (rtx x, machine_mode mode, int outer_code,
3458 int opno ATTRIBUTE_UNUSED,
3459 int *total, bool speed ATTRIBUTE_UNUSED)
3461 int code = GET_CODE (x);
3462 switch (code)
3464 case CONST:
3465 case CONST_INT:
3466 case LABEL_REF:
3467 case SYMBOL_REF:
3468 case CONST_DOUBLE:
3469 case CONST_WIDE_INT:
3470 case MEM:
3471 *total = 0;
3472 return true;
3474 case SET:
3476 /* Without this a conditional move instruction would be
3477 accounted as 3 * COSTS_N_INSNS (set, if_then_else,
3478 comparison operator). That's a bit pessimistic. */
3480 if (!TARGET_Z196 || GET_CODE (SET_SRC (x)) != IF_THEN_ELSE)
3481 return false;
3483 rtx cond = XEXP (SET_SRC (x), 0);
3485 if (!CC_REG_P (XEXP (cond, 0)) || !CONST_INT_P (XEXP (cond, 1)))
3486 return false;
3488 /* It is going to be a load/store on condition. Make it
3489 slightly more expensive than a normal load. */
3490 *total = COSTS_N_INSNS (1) + 1;
3492 rtx dst = SET_DEST (x);
3493 rtx then = XEXP (SET_SRC (x), 1);
3494 rtx els = XEXP (SET_SRC (x), 2);
3496 /* It is a real IF-THEN-ELSE. An additional move will be
3497 needed to implement that. */
3498 if (reload_completed
3499 && !rtx_equal_p (dst, then)
3500 && !rtx_equal_p (dst, els))
3501 *total += COSTS_N_INSNS (1) / 2;
3503 /* A minor penalty for constants we cannot directly handle. */
3504 if ((CONST_INT_P (then) || CONST_INT_P (els))
3505 && (!TARGET_Z13 || MEM_P (dst)
3506 || (CONST_INT_P (then) && !satisfies_constraint_K (then))
3507 || (CONST_INT_P (els) && !satisfies_constraint_K (els))))
3508 *total += COSTS_N_INSNS (1) / 2;
3510 /* A store on condition can only handle register src operands. */
3511 if (MEM_P (dst) && (!REG_P (then) || !REG_P (els)))
3512 *total += COSTS_N_INSNS (1) / 2;
3514 return true;
3516 case IOR:
3517 /* risbg */
3518 if (GET_CODE (XEXP (x, 0)) == AND
3519 && GET_CODE (XEXP (x, 1)) == ASHIFT
3520 && REG_P (XEXP (XEXP (x, 0), 0))
3521 && REG_P (XEXP (XEXP (x, 1), 0))
3522 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
3523 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
3524 && (UINTVAL (XEXP (XEXP (x, 0), 1)) ==
3525 (HOST_WIDE_INT_1U << UINTVAL (XEXP (XEXP (x, 1), 1))) - 1))
3527 *total = COSTS_N_INSNS (2);
3528 return true;
3531 /* ~AND on a 128 bit mode. This can be done using a vector
3532 instruction. */
3533 if (TARGET_VXE
3534 && GET_CODE (XEXP (x, 0)) == NOT
3535 && GET_CODE (XEXP (x, 1)) == NOT
3536 && REG_P (XEXP (XEXP (x, 0), 0))
3537 && REG_P (XEXP (XEXP (x, 1), 0))
3538 && GET_MODE_SIZE (GET_MODE (XEXP (XEXP (x, 0), 0))) == 16
3539 && s390_hard_regno_mode_ok (VR0_REGNUM,
3540 GET_MODE (XEXP (XEXP (x, 0), 0))))
3542 *total = COSTS_N_INSNS (1);
3543 return true;
3545 /* fallthrough */
3546 case ASHIFT:
3547 case ASHIFTRT:
3548 case LSHIFTRT:
3549 case ROTATE:
3550 case ROTATERT:
3551 case AND:
3552 case XOR:
3553 case NEG:
3554 case NOT:
3555 *total = COSTS_N_INSNS (1);
3556 return false;
3558 case PLUS:
3559 case MINUS:
3560 *total = COSTS_N_INSNS (1);
3561 return false;
3563 case MULT:
3564 switch (mode)
3566 case E_SImode:
3568 rtx left = XEXP (x, 0);
3569 rtx right = XEXP (x, 1);
3570 if (GET_CODE (right) == CONST_INT
3571 && CONST_OK_FOR_K (INTVAL (right)))
3572 *total = s390_cost->mhi;
3573 else if (GET_CODE (left) == SIGN_EXTEND)
3574 *total = s390_cost->mh;
3575 else
3576 *total = s390_cost->ms; /* msr, ms, msy */
3577 break;
3579 case E_DImode:
3581 rtx left = XEXP (x, 0);
3582 rtx right = XEXP (x, 1);
3583 if (TARGET_ZARCH)
3585 if (GET_CODE (right) == CONST_INT
3586 && CONST_OK_FOR_K (INTVAL (right)))
3587 *total = s390_cost->mghi;
3588 else if (GET_CODE (left) == SIGN_EXTEND)
3589 *total = s390_cost->msgf;
3590 else
3591 *total = s390_cost->msg; /* msgr, msg */
3593 else /* TARGET_31BIT */
3595 if (GET_CODE (left) == SIGN_EXTEND
3596 && GET_CODE (right) == SIGN_EXTEND)
3597 /* mulsidi case: mr, m */
3598 *total = s390_cost->m;
3599 else if (GET_CODE (left) == ZERO_EXTEND
3600 && GET_CODE (right) == ZERO_EXTEND
3601 && TARGET_CPU_ZARCH)
3602 /* umulsidi case: ml, mlr */
3603 *total = s390_cost->ml;
3604 else
3605 /* Complex calculation is required. */
3606 *total = COSTS_N_INSNS (40);
3608 break;
3610 case E_SFmode:
3611 case E_DFmode:
3612 *total = s390_cost->mult_df;
3613 break;
3614 case E_TFmode:
3615 *total = s390_cost->mxbr;
3616 break;
3617 default:
3618 return false;
3620 return false;
3622 case FMA:
3623 switch (mode)
3625 case E_DFmode:
3626 *total = s390_cost->madbr;
3627 break;
3628 case E_SFmode:
3629 *total = s390_cost->maebr;
3630 break;
3631 default:
3632 return false;
3634 /* Negate in the third argument is free: FMSUB. */
3635 if (GET_CODE (XEXP (x, 2)) == NEG)
3637 *total += (rtx_cost (XEXP (x, 0), mode, FMA, 0, speed)
3638 + rtx_cost (XEXP (x, 1), mode, FMA, 1, speed)
3639 + rtx_cost (XEXP (XEXP (x, 2), 0), mode, FMA, 2, speed));
3640 return true;
3642 return false;
3644 case UDIV:
3645 case UMOD:
3646 if (mode == TImode) /* 128 bit division */
3647 *total = s390_cost->dlgr;
3648 else if (mode == DImode)
3650 rtx right = XEXP (x, 1);
3651 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
3652 *total = s390_cost->dlr;
3653 else /* 64 by 64 bit division */
3654 *total = s390_cost->dlgr;
3656 else if (mode == SImode) /* 32 bit division */
3657 *total = s390_cost->dlr;
3658 return false;
3660 case DIV:
3661 case MOD:
3662 if (mode == DImode)
3664 rtx right = XEXP (x, 1);
3665 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
3666 if (TARGET_ZARCH)
3667 *total = s390_cost->dsgfr;
3668 else
3669 *total = s390_cost->dr;
3670 else /* 64 by 64 bit division */
3671 *total = s390_cost->dsgr;
3673 else if (mode == SImode) /* 32 bit division */
3674 *total = s390_cost->dlr;
3675 else if (mode == SFmode)
3677 *total = s390_cost->debr;
3679 else if (mode == DFmode)
3681 *total = s390_cost->ddbr;
3683 else if (mode == TFmode)
3685 *total = s390_cost->dxbr;
3687 return false;
3689 case SQRT:
3690 if (mode == SFmode)
3691 *total = s390_cost->sqebr;
3692 else if (mode == DFmode)
3693 *total = s390_cost->sqdbr;
3694 else /* TFmode */
3695 *total = s390_cost->sqxbr;
3696 return false;
3698 case SIGN_EXTEND:
3699 case ZERO_EXTEND:
3700 if (outer_code == MULT || outer_code == DIV || outer_code == MOD
3701 || outer_code == PLUS || outer_code == MINUS
3702 || outer_code == COMPARE)
3703 *total = 0;
3704 return false;
3706 case COMPARE:
3707 *total = COSTS_N_INSNS (1);
3708 if (GET_CODE (XEXP (x, 0)) == AND
3709 && GET_CODE (XEXP (x, 1)) == CONST_INT
3710 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
3712 rtx op0 = XEXP (XEXP (x, 0), 0);
3713 rtx op1 = XEXP (XEXP (x, 0), 1);
3714 rtx op2 = XEXP (x, 1);
3716 if (memory_operand (op0, GET_MODE (op0))
3717 && s390_tm_ccmode (op1, op2, 0) != VOIDmode)
3718 return true;
3719 if (register_operand (op0, GET_MODE (op0))
3720 && s390_tm_ccmode (op1, op2, 1) != VOIDmode)
3721 return true;
3723 return false;
3725 default:
3726 return false;
3730 /* Return the cost of an address rtx ADDR. */
3732 static int
3733 s390_address_cost (rtx addr, machine_mode mode ATTRIBUTE_UNUSED,
3734 addr_space_t as ATTRIBUTE_UNUSED,
3735 bool speed ATTRIBUTE_UNUSED)
3737 struct s390_address ad;
3738 if (!s390_decompose_address (addr, &ad))
3739 return 1000;
3741 return ad.indx? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (1);
3744 /* Implement targetm.vectorize.builtin_vectorization_cost. */
3745 static int
3746 s390_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
3747 tree vectype,
3748 int misalign ATTRIBUTE_UNUSED)
3750 switch (type_of_cost)
3752 case scalar_stmt:
3753 case scalar_load:
3754 case scalar_store:
3755 case vector_stmt:
3756 case vector_load:
3757 case vector_store:
3758 case vector_gather_load:
3759 case vector_scatter_store:
3760 case vec_to_scalar:
3761 case scalar_to_vec:
3762 case cond_branch_not_taken:
3763 case vec_perm:
3764 case vec_promote_demote:
3765 case unaligned_load:
3766 case unaligned_store:
3767 return 1;
3769 case cond_branch_taken:
3770 return 3;
3772 case vec_construct:
3773 return TYPE_VECTOR_SUBPARTS (vectype) - 1;
3775 default:
3776 gcc_unreachable ();
3780 /* If OP is a SYMBOL_REF of a thread-local symbol, return its TLS mode,
3781 otherwise return 0. */
3784 tls_symbolic_operand (rtx op)
3786 if (GET_CODE (op) != SYMBOL_REF)
3787 return 0;
3788 return SYMBOL_REF_TLS_MODEL (op);
3791 /* Split DImode access register reference REG (on 64-bit) into its constituent
3792 low and high parts, and store them into LO and HI. Note that gen_lowpart/
3793 gen_highpart cannot be used as they assume all registers are word-sized,
3794 while our access registers have only half that size. */
3796 void
3797 s390_split_access_reg (rtx reg, rtx *lo, rtx *hi)
3799 gcc_assert (TARGET_64BIT);
3800 gcc_assert (ACCESS_REG_P (reg));
3801 gcc_assert (GET_MODE (reg) == DImode);
3802 gcc_assert (!(REGNO (reg) & 1));
3804 *lo = gen_rtx_REG (SImode, REGNO (reg) + 1);
3805 *hi = gen_rtx_REG (SImode, REGNO (reg));
3808 /* Return true if OP contains a symbol reference */
3810 bool
3811 symbolic_reference_mentioned_p (rtx op)
3813 const char *fmt;
3814 int i;
3816 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
3817 return 1;
3819 fmt = GET_RTX_FORMAT (GET_CODE (op));
3820 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
3822 if (fmt[i] == 'E')
3824 int j;
3826 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
3827 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
3828 return 1;
3831 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
3832 return 1;
3835 return 0;
3838 /* Return true if OP contains a reference to a thread-local symbol. */
3840 bool
3841 tls_symbolic_reference_mentioned_p (rtx op)
3843 const char *fmt;
3844 int i;
3846 if (GET_CODE (op) == SYMBOL_REF)
3847 return tls_symbolic_operand (op);
3849 fmt = GET_RTX_FORMAT (GET_CODE (op));
3850 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
3852 if (fmt[i] == 'E')
3854 int j;
3856 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
3857 if (tls_symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
3858 return true;
3861 else if (fmt[i] == 'e' && tls_symbolic_reference_mentioned_p (XEXP (op, i)))
3862 return true;
3865 return false;
3869 /* Return true if OP is a legitimate general operand when
3870 generating PIC code. It is given that flag_pic is on
3871 and that OP satisfies CONSTANT_P. */
3874 legitimate_pic_operand_p (rtx op)
3876 /* Accept all non-symbolic constants. */
3877 if (!SYMBOLIC_CONST (op))
3878 return 1;
3880 /* Reject everything else; must be handled
3881 via emit_symbolic_move. */
3882 return 0;
3885 /* Returns true if the constant value OP is a legitimate general operand.
3886 It is given that OP satisfies CONSTANT_P. */
3888 static bool
3889 s390_legitimate_constant_p (machine_mode mode, rtx op)
3891 if (TARGET_VX && VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
3893 if (GET_MODE_SIZE (mode) != 16)
3894 return 0;
3896 if (!satisfies_constraint_j00 (op)
3897 && !satisfies_constraint_jm1 (op)
3898 && !satisfies_constraint_jKK (op)
3899 && !satisfies_constraint_jxx (op)
3900 && !satisfies_constraint_jyy (op))
3901 return 0;
3904 /* Accept all non-symbolic constants. */
3905 if (!SYMBOLIC_CONST (op))
3906 return 1;
3908 /* Accept immediate LARL operands. */
3909 if (TARGET_CPU_ZARCH && larl_operand (op, mode))
3910 return 1;
3912 /* Thread-local symbols are never legal constants. This is
3913 so that emit_call knows that computing such addresses
3914 might require a function call. */
3915 if (TLS_SYMBOLIC_CONST (op))
3916 return 0;
3918 /* In the PIC case, symbolic constants must *not* be
3919 forced into the literal pool. We accept them here,
3920 so that they will be handled by emit_symbolic_move. */
3921 if (flag_pic)
3922 return 1;
3924 /* All remaining non-PIC symbolic constants are
3925 forced into the literal pool. */
3926 return 0;
3929 /* Determine if it's legal to put X into the constant pool. This
3930 is not possible if X contains the address of a symbol that is
3931 not constant (TLS) or not known at final link time (PIC). */
3933 static bool
3934 s390_cannot_force_const_mem (machine_mode mode, rtx x)
3936 switch (GET_CODE (x))
3938 case CONST_INT:
3939 case CONST_DOUBLE:
3940 case CONST_WIDE_INT:
3941 case CONST_VECTOR:
3942 /* Accept all non-symbolic constants. */
3943 return false;
3945 case LABEL_REF:
3946 /* Labels are OK iff we are non-PIC. */
3947 return flag_pic != 0;
3949 case SYMBOL_REF:
3950 /* 'Naked' TLS symbol references are never OK,
3951 non-TLS symbols are OK iff we are non-PIC. */
3952 if (tls_symbolic_operand (x))
3953 return true;
3954 else
3955 return flag_pic != 0;
3957 case CONST:
3958 return s390_cannot_force_const_mem (mode, XEXP (x, 0));
3959 case PLUS:
3960 case MINUS:
3961 return s390_cannot_force_const_mem (mode, XEXP (x, 0))
3962 || s390_cannot_force_const_mem (mode, XEXP (x, 1));
3964 case UNSPEC:
3965 switch (XINT (x, 1))
3967 /* Only lt-relative or GOT-relative UNSPECs are OK. */
3968 case UNSPEC_LTREL_OFFSET:
3969 case UNSPEC_GOT:
3970 case UNSPEC_GOTOFF:
3971 case UNSPEC_PLTOFF:
3972 case UNSPEC_TLSGD:
3973 case UNSPEC_TLSLDM:
3974 case UNSPEC_NTPOFF:
3975 case UNSPEC_DTPOFF:
3976 case UNSPEC_GOTNTPOFF:
3977 case UNSPEC_INDNTPOFF:
3978 return false;
3980 /* If the literal pool shares the code section, be put
3981 execute template placeholders into the pool as well. */
3982 case UNSPEC_INSN:
3983 return TARGET_CPU_ZARCH;
3985 default:
3986 return true;
3988 break;
3990 default:
3991 gcc_unreachable ();
3995 /* Returns true if the constant value OP is a legitimate general
3996 operand during and after reload. The difference to
3997 legitimate_constant_p is that this function will not accept
3998 a constant that would need to be forced to the literal pool
3999 before it can be used as operand.
4000 This function accepts all constants which can be loaded directly
4001 into a GPR. */
4003 bool
4004 legitimate_reload_constant_p (rtx op)
4006 /* Accept la(y) operands. */
4007 if (GET_CODE (op) == CONST_INT
4008 && DISP_IN_RANGE (INTVAL (op)))
4009 return true;
4011 /* Accept l(g)hi/l(g)fi operands. */
4012 if (GET_CODE (op) == CONST_INT
4013 && (CONST_OK_FOR_K (INTVAL (op)) || CONST_OK_FOR_Os (INTVAL (op))))
4014 return true;
4016 /* Accept lliXX operands. */
4017 if (TARGET_ZARCH
4018 && GET_CODE (op) == CONST_INT
4019 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
4020 && s390_single_part (op, word_mode, HImode, 0) >= 0)
4021 return true;
4023 if (TARGET_EXTIMM
4024 && GET_CODE (op) == CONST_INT
4025 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
4026 && s390_single_part (op, word_mode, SImode, 0) >= 0)
4027 return true;
4029 /* Accept larl operands. */
4030 if (TARGET_CPU_ZARCH
4031 && larl_operand (op, VOIDmode))
4032 return true;
4034 /* Accept floating-point zero operands that fit into a single GPR. */
4035 if (GET_CODE (op) == CONST_DOUBLE
4036 && s390_float_const_zero_p (op)
4037 && GET_MODE_SIZE (GET_MODE (op)) <= UNITS_PER_WORD)
4038 return true;
4040 /* Accept double-word operands that can be split. */
4041 if (GET_CODE (op) == CONST_WIDE_INT
4042 || (GET_CODE (op) == CONST_INT
4043 && trunc_int_for_mode (INTVAL (op), word_mode) != INTVAL (op)))
4045 machine_mode dword_mode = word_mode == SImode ? DImode : TImode;
4046 rtx hi = operand_subword (op, 0, 0, dword_mode);
4047 rtx lo = operand_subword (op, 1, 0, dword_mode);
4048 return legitimate_reload_constant_p (hi)
4049 && legitimate_reload_constant_p (lo);
4052 /* Everything else cannot be handled without reload. */
4053 return false;
4056 /* Returns true if the constant value OP is a legitimate fp operand
4057 during and after reload.
4058 This function accepts all constants which can be loaded directly
4059 into an FPR. */
4061 static bool
4062 legitimate_reload_fp_constant_p (rtx op)
4064 /* Accept floating-point zero operands if the load zero instruction
4065 can be used. Prior to z196 the load fp zero instruction caused a
4066 performance penalty if the result is used as BFP number. */
4067 if (TARGET_Z196
4068 && GET_CODE (op) == CONST_DOUBLE
4069 && s390_float_const_zero_p (op))
4070 return true;
4072 return false;
4075 /* Returns true if the constant value OP is a legitimate vector operand
4076 during and after reload.
4077 This function accepts all constants which can be loaded directly
4078 into an VR. */
4080 static bool
4081 legitimate_reload_vector_constant_p (rtx op)
4083 if (TARGET_VX && GET_MODE_SIZE (GET_MODE (op)) == 16
4084 && (satisfies_constraint_j00 (op)
4085 || satisfies_constraint_jm1 (op)
4086 || satisfies_constraint_jKK (op)
4087 || satisfies_constraint_jxx (op)
4088 || satisfies_constraint_jyy (op)))
4089 return true;
4091 return false;
4094 /* Given an rtx OP being reloaded into a reg required to be in class RCLASS,
4095 return the class of reg to actually use. */
4097 static reg_class_t
4098 s390_preferred_reload_class (rtx op, reg_class_t rclass)
4100 switch (GET_CODE (op))
4102 /* Constants we cannot reload into general registers
4103 must be forced into the literal pool. */
4104 case CONST_VECTOR:
4105 case CONST_DOUBLE:
4106 case CONST_INT:
4107 case CONST_WIDE_INT:
4108 if (reg_class_subset_p (GENERAL_REGS, rclass)
4109 && legitimate_reload_constant_p (op))
4110 return GENERAL_REGS;
4111 else if (reg_class_subset_p (ADDR_REGS, rclass)
4112 && legitimate_reload_constant_p (op))
4113 return ADDR_REGS;
4114 else if (reg_class_subset_p (FP_REGS, rclass)
4115 && legitimate_reload_fp_constant_p (op))
4116 return FP_REGS;
4117 else if (reg_class_subset_p (VEC_REGS, rclass)
4118 && legitimate_reload_vector_constant_p (op))
4119 return VEC_REGS;
4121 return NO_REGS;
4123 /* If a symbolic constant or a PLUS is reloaded,
4124 it is most likely being used as an address, so
4125 prefer ADDR_REGS. If 'class' is not a superset
4126 of ADDR_REGS, e.g. FP_REGS, reject this reload. */
4127 case CONST:
4128 /* Symrefs cannot be pushed into the literal pool with -fPIC
4129 so we *MUST NOT* return NO_REGS for these cases
4130 (s390_cannot_force_const_mem will return true).
4132 On the other hand we MUST return NO_REGS for symrefs with
4133 invalid addend which might have been pushed to the literal
4134 pool (no -fPIC). Usually we would expect them to be
4135 handled via secondary reload but this does not happen if
4136 they are used as literal pool slot replacement in reload
4137 inheritance (see emit_input_reload_insns). */
4138 if (TARGET_CPU_ZARCH
4139 && GET_CODE (XEXP (op, 0)) == PLUS
4140 && GET_CODE (XEXP (XEXP(op, 0), 0)) == SYMBOL_REF
4141 && GET_CODE (XEXP (XEXP(op, 0), 1)) == CONST_INT)
4143 if (flag_pic && reg_class_subset_p (ADDR_REGS, rclass))
4144 return ADDR_REGS;
4145 else
4146 return NO_REGS;
4148 /* fallthrough */
4149 case LABEL_REF:
4150 case SYMBOL_REF:
4151 if (!legitimate_reload_constant_p (op))
4152 return NO_REGS;
4153 /* fallthrough */
4154 case PLUS:
4155 /* load address will be used. */
4156 if (reg_class_subset_p (ADDR_REGS, rclass))
4157 return ADDR_REGS;
4158 else
4159 return NO_REGS;
4161 default:
4162 break;
4165 return rclass;
4168 /* Return true if ADDR is SYMBOL_REF + addend with addend being a
4169 multiple of ALIGNMENT and the SYMBOL_REF being naturally
4170 aligned. */
4172 bool
4173 s390_check_symref_alignment (rtx addr, HOST_WIDE_INT alignment)
4175 HOST_WIDE_INT addend;
4176 rtx symref;
4178 /* The "required alignment" might be 0 (e.g. for certain structs
4179 accessed via BLKmode). Early abort in this case, as well as when
4180 an alignment > 8 is required. */
4181 if (alignment < 2 || alignment > 8)
4182 return false;
4184 if (!s390_loadrelative_operand_p (addr, &symref, &addend))
4185 return false;
4187 if (addend & (alignment - 1))
4188 return false;
4190 if (GET_CODE (symref) == SYMBOL_REF)
4192 /* We have load-relative instructions for 2-byte, 4-byte, and
4193 8-byte alignment so allow only these. */
4194 switch (alignment)
4196 case 8: return !SYMBOL_FLAG_NOTALIGN8_P (symref);
4197 case 4: return !SYMBOL_FLAG_NOTALIGN4_P (symref);
4198 case 2: return !SYMBOL_FLAG_NOTALIGN2_P (symref);
4199 default: return false;
4203 if (GET_CODE (symref) == UNSPEC
4204 && alignment <= UNITS_PER_LONG)
4205 return true;
4207 return false;
4210 /* ADDR is moved into REG using larl. If ADDR isn't a valid larl
4211 operand SCRATCH is used to reload the even part of the address and
4212 adding one. */
4214 void
4215 s390_reload_larl_operand (rtx reg, rtx addr, rtx scratch)
4217 HOST_WIDE_INT addend;
4218 rtx symref;
4220 if (!s390_loadrelative_operand_p (addr, &symref, &addend))
4221 gcc_unreachable ();
4223 if (!(addend & 1))
4224 /* Easy case. The addend is even so larl will do fine. */
4225 emit_move_insn (reg, addr);
4226 else
4228 /* We can leave the scratch register untouched if the target
4229 register is a valid base register. */
4230 if (REGNO (reg) < FIRST_PSEUDO_REGISTER
4231 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS)
4232 scratch = reg;
4234 gcc_assert (REGNO (scratch) < FIRST_PSEUDO_REGISTER);
4235 gcc_assert (REGNO_REG_CLASS (REGNO (scratch)) == ADDR_REGS);
4237 if (addend != 1)
4238 emit_move_insn (scratch,
4239 gen_rtx_CONST (Pmode,
4240 gen_rtx_PLUS (Pmode, symref,
4241 GEN_INT (addend - 1))));
4242 else
4243 emit_move_insn (scratch, symref);
4245 /* Increment the address using la in order to avoid clobbering cc. */
4246 s390_load_address (reg, gen_rtx_PLUS (Pmode, scratch, const1_rtx));
4250 /* Generate what is necessary to move between REG and MEM using
4251 SCRATCH. The direction is given by TOMEM. */
4253 void
4254 s390_reload_symref_address (rtx reg, rtx mem, rtx scratch, bool tomem)
4256 /* Reload might have pulled a constant out of the literal pool.
4257 Force it back in. */
4258 if (CONST_INT_P (mem) || GET_CODE (mem) == CONST_DOUBLE
4259 || GET_CODE (mem) == CONST_WIDE_INT
4260 || GET_CODE (mem) == CONST_VECTOR
4261 || GET_CODE (mem) == CONST)
4262 mem = force_const_mem (GET_MODE (reg), mem);
4264 gcc_assert (MEM_P (mem));
4266 /* For a load from memory we can leave the scratch register
4267 untouched if the target register is a valid base register. */
4268 if (!tomem
4269 && REGNO (reg) < FIRST_PSEUDO_REGISTER
4270 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS
4271 && GET_MODE (reg) == GET_MODE (scratch))
4272 scratch = reg;
4274 /* Load address into scratch register. Since we can't have a
4275 secondary reload for a secondary reload we have to cover the case
4276 where larl would need a secondary reload here as well. */
4277 s390_reload_larl_operand (scratch, XEXP (mem, 0), scratch);
4279 /* Now we can use a standard load/store to do the move. */
4280 if (tomem)
4281 emit_move_insn (replace_equiv_address (mem, scratch), reg);
4282 else
4283 emit_move_insn (reg, replace_equiv_address (mem, scratch));
4286 /* Inform reload about cases where moving X with a mode MODE to a register in
4287 RCLASS requires an extra scratch or immediate register. Return the class
4288 needed for the immediate register. */
4290 static reg_class_t
4291 s390_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
4292 machine_mode mode, secondary_reload_info *sri)
4294 enum reg_class rclass = (enum reg_class) rclass_i;
4296 /* Intermediate register needed. */
4297 if (reg_classes_intersect_p (CC_REGS, rclass))
4298 return GENERAL_REGS;
4300 if (TARGET_VX)
4302 /* The vst/vl vector move instructions allow only for short
4303 displacements. */
4304 if (MEM_P (x)
4305 && GET_CODE (XEXP (x, 0)) == PLUS
4306 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4307 && !SHORT_DISP_IN_RANGE(INTVAL (XEXP (XEXP (x, 0), 1)))
4308 && reg_class_subset_p (rclass, VEC_REGS)
4309 && (!reg_class_subset_p (rclass, FP_REGS)
4310 || (GET_MODE_SIZE (mode) > 8
4311 && s390_class_max_nregs (FP_REGS, mode) == 1)))
4313 if (in_p)
4314 sri->icode = (TARGET_64BIT ?
4315 CODE_FOR_reloaddi_la_in :
4316 CODE_FOR_reloadsi_la_in);
4317 else
4318 sri->icode = (TARGET_64BIT ?
4319 CODE_FOR_reloaddi_la_out :
4320 CODE_FOR_reloadsi_la_out);
4324 if (TARGET_Z10)
4326 HOST_WIDE_INT offset;
4327 rtx symref;
4329 /* On z10 several optimizer steps may generate larl operands with
4330 an odd addend. */
4331 if (in_p
4332 && s390_loadrelative_operand_p (x, &symref, &offset)
4333 && mode == Pmode
4334 && !SYMBOL_FLAG_NOTALIGN2_P (symref)
4335 && (offset & 1) == 1)
4336 sri->icode = ((mode == DImode) ? CODE_FOR_reloaddi_larl_odd_addend_z10
4337 : CODE_FOR_reloadsi_larl_odd_addend_z10);
4339 /* Handle all the (mem (symref)) accesses we cannot use the z10
4340 instructions for. */
4341 if (MEM_P (x)
4342 && s390_loadrelative_operand_p (XEXP (x, 0), NULL, NULL)
4343 && (mode == QImode
4344 || !reg_class_subset_p (rclass, GENERAL_REGS)
4345 || GET_MODE_SIZE (mode) > UNITS_PER_WORD
4346 || !s390_check_symref_alignment (XEXP (x, 0),
4347 GET_MODE_SIZE (mode))))
4349 #define __SECONDARY_RELOAD_CASE(M,m) \
4350 case E_##M##mode: \
4351 if (TARGET_64BIT) \
4352 sri->icode = in_p ? CODE_FOR_reload##m##di_toreg_z10 : \
4353 CODE_FOR_reload##m##di_tomem_z10; \
4354 else \
4355 sri->icode = in_p ? CODE_FOR_reload##m##si_toreg_z10 : \
4356 CODE_FOR_reload##m##si_tomem_z10; \
4357 break;
4359 switch (GET_MODE (x))
4361 __SECONDARY_RELOAD_CASE (QI, qi);
4362 __SECONDARY_RELOAD_CASE (HI, hi);
4363 __SECONDARY_RELOAD_CASE (SI, si);
4364 __SECONDARY_RELOAD_CASE (DI, di);
4365 __SECONDARY_RELOAD_CASE (TI, ti);
4366 __SECONDARY_RELOAD_CASE (SF, sf);
4367 __SECONDARY_RELOAD_CASE (DF, df);
4368 __SECONDARY_RELOAD_CASE (TF, tf);
4369 __SECONDARY_RELOAD_CASE (SD, sd);
4370 __SECONDARY_RELOAD_CASE (DD, dd);
4371 __SECONDARY_RELOAD_CASE (TD, td);
4372 __SECONDARY_RELOAD_CASE (V1QI, v1qi);
4373 __SECONDARY_RELOAD_CASE (V2QI, v2qi);
4374 __SECONDARY_RELOAD_CASE (V4QI, v4qi);
4375 __SECONDARY_RELOAD_CASE (V8QI, v8qi);
4376 __SECONDARY_RELOAD_CASE (V16QI, v16qi);
4377 __SECONDARY_RELOAD_CASE (V1HI, v1hi);
4378 __SECONDARY_RELOAD_CASE (V2HI, v2hi);
4379 __SECONDARY_RELOAD_CASE (V4HI, v4hi);
4380 __SECONDARY_RELOAD_CASE (V8HI, v8hi);
4381 __SECONDARY_RELOAD_CASE (V1SI, v1si);
4382 __SECONDARY_RELOAD_CASE (V2SI, v2si);
4383 __SECONDARY_RELOAD_CASE (V4SI, v4si);
4384 __SECONDARY_RELOAD_CASE (V1DI, v1di);
4385 __SECONDARY_RELOAD_CASE (V2DI, v2di);
4386 __SECONDARY_RELOAD_CASE (V1TI, v1ti);
4387 __SECONDARY_RELOAD_CASE (V1SF, v1sf);
4388 __SECONDARY_RELOAD_CASE (V2SF, v2sf);
4389 __SECONDARY_RELOAD_CASE (V4SF, v4sf);
4390 __SECONDARY_RELOAD_CASE (V1DF, v1df);
4391 __SECONDARY_RELOAD_CASE (V2DF, v2df);
4392 __SECONDARY_RELOAD_CASE (V1TF, v1tf);
4393 default:
4394 gcc_unreachable ();
4396 #undef __SECONDARY_RELOAD_CASE
4400 /* We need a scratch register when loading a PLUS expression which
4401 is not a legitimate operand of the LOAD ADDRESS instruction. */
4402 /* LRA can deal with transformation of plus op very well -- so we
4403 don't need to prompt LRA in this case. */
4404 if (! lra_in_progress && in_p && s390_plus_operand (x, mode))
4405 sri->icode = (TARGET_64BIT ?
4406 CODE_FOR_reloaddi_plus : CODE_FOR_reloadsi_plus);
4408 /* Performing a multiword move from or to memory we have to make sure the
4409 second chunk in memory is addressable without causing a displacement
4410 overflow. If that would be the case we calculate the address in
4411 a scratch register. */
4412 if (MEM_P (x)
4413 && GET_CODE (XEXP (x, 0)) == PLUS
4414 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4415 && !DISP_IN_RANGE (INTVAL (XEXP (XEXP (x, 0), 1))
4416 + GET_MODE_SIZE (mode) - 1))
4418 /* For GENERAL_REGS a displacement overflow is no problem if occurring
4419 in a s_operand address since we may fallback to lm/stm. So we only
4420 have to care about overflows in the b+i+d case. */
4421 if ((reg_classes_intersect_p (GENERAL_REGS, rclass)
4422 && s390_class_max_nregs (GENERAL_REGS, mode) > 1
4423 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
4424 /* For FP_REGS no lm/stm is available so this check is triggered
4425 for displacement overflows in b+i+d and b+d like addresses. */
4426 || (reg_classes_intersect_p (FP_REGS, rclass)
4427 && s390_class_max_nregs (FP_REGS, mode) > 1))
4429 if (in_p)
4430 sri->icode = (TARGET_64BIT ?
4431 CODE_FOR_reloaddi_la_in :
4432 CODE_FOR_reloadsi_la_in);
4433 else
4434 sri->icode = (TARGET_64BIT ?
4435 CODE_FOR_reloaddi_la_out :
4436 CODE_FOR_reloadsi_la_out);
4440 /* A scratch address register is needed when a symbolic constant is
4441 copied to r0 compiling with -fPIC. In other cases the target
4442 register might be used as temporary (see legitimize_pic_address). */
4443 if (in_p && SYMBOLIC_CONST (x) && flag_pic == 2 && rclass != ADDR_REGS)
4444 sri->icode = (TARGET_64BIT ?
4445 CODE_FOR_reloaddi_PIC_addr :
4446 CODE_FOR_reloadsi_PIC_addr);
4448 /* Either scratch or no register needed. */
4449 return NO_REGS;
4452 /* Implement TARGET_SECONDARY_MEMORY_NEEDED.
4454 We need secondary memory to move data between GPRs and FPRs.
4456 - With DFP the ldgr lgdr instructions are available. Due to the
4457 different alignment we cannot use them for SFmode. For 31 bit a
4458 64 bit value in GPR would be a register pair so here we still
4459 need to go via memory.
4461 - With z13 we can do the SF/SImode moves with vlgvf. Due to the
4462 overlapping of FPRs and VRs we still disallow TF/TD modes to be
4463 in full VRs so as before also on z13 we do these moves via
4464 memory.
4466 FIXME: Should we try splitting it into two vlgvg's/vlvg's instead? */
4468 static bool
4469 s390_secondary_memory_needed (machine_mode mode,
4470 reg_class_t class1, reg_class_t class2)
4472 return (((reg_classes_intersect_p (class1, VEC_REGS)
4473 && reg_classes_intersect_p (class2, GENERAL_REGS))
4474 || (reg_classes_intersect_p (class1, GENERAL_REGS)
4475 && reg_classes_intersect_p (class2, VEC_REGS)))
4476 && (!TARGET_DFP || !TARGET_64BIT || GET_MODE_SIZE (mode) != 8)
4477 && (!TARGET_VX || (SCALAR_FLOAT_MODE_P (mode)
4478 && GET_MODE_SIZE (mode) > 8)));
4481 /* Implement TARGET_SECONDARY_MEMORY_NEEDED_MODE.
4483 get_secondary_mem widens its argument to BITS_PER_WORD which loses on 64bit
4484 because the movsi and movsf patterns don't handle r/f moves. */
4486 static machine_mode
4487 s390_secondary_memory_needed_mode (machine_mode mode)
4489 if (GET_MODE_BITSIZE (mode) < 32)
4490 return mode_for_size (32, GET_MODE_CLASS (mode), 0).require ();
4491 return mode;
4494 /* Generate code to load SRC, which is PLUS that is not a
4495 legitimate operand for the LA instruction, into TARGET.
4496 SCRATCH may be used as scratch register. */
4498 void
4499 s390_expand_plus_operand (rtx target, rtx src,
4500 rtx scratch)
4502 rtx sum1, sum2;
4503 struct s390_address ad;
4505 /* src must be a PLUS; get its two operands. */
4506 gcc_assert (GET_CODE (src) == PLUS);
4507 gcc_assert (GET_MODE (src) == Pmode);
4509 /* Check if any of the two operands is already scheduled
4510 for replacement by reload. This can happen e.g. when
4511 float registers occur in an address. */
4512 sum1 = find_replacement (&XEXP (src, 0));
4513 sum2 = find_replacement (&XEXP (src, 1));
4514 src = gen_rtx_PLUS (Pmode, sum1, sum2);
4516 /* If the address is already strictly valid, there's nothing to do. */
4517 if (!s390_decompose_address (src, &ad)
4518 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
4519 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
4521 /* Otherwise, one of the operands cannot be an address register;
4522 we reload its value into the scratch register. */
4523 if (true_regnum (sum1) < 1 || true_regnum (sum1) > 15)
4525 emit_move_insn (scratch, sum1);
4526 sum1 = scratch;
4528 if (true_regnum (sum2) < 1 || true_regnum (sum2) > 15)
4530 emit_move_insn (scratch, sum2);
4531 sum2 = scratch;
4534 /* According to the way these invalid addresses are generated
4535 in reload.c, it should never happen (at least on s390) that
4536 *neither* of the PLUS components, after find_replacements
4537 was applied, is an address register. */
4538 if (sum1 == scratch && sum2 == scratch)
4540 debug_rtx (src);
4541 gcc_unreachable ();
4544 src = gen_rtx_PLUS (Pmode, sum1, sum2);
4547 /* Emit the LOAD ADDRESS pattern. Note that reload of PLUS
4548 is only ever performed on addresses, so we can mark the
4549 sum as legitimate for LA in any case. */
4550 s390_load_address (target, src);
4554 /* Return true if ADDR is a valid memory address.
4555 STRICT specifies whether strict register checking applies. */
4557 static bool
4558 s390_legitimate_address_p (machine_mode mode, rtx addr, bool strict)
4560 struct s390_address ad;
4562 if (TARGET_Z10
4563 && larl_operand (addr, VOIDmode)
4564 && (mode == VOIDmode
4565 || s390_check_symref_alignment (addr, GET_MODE_SIZE (mode))))
4566 return true;
4568 if (!s390_decompose_address (addr, &ad))
4569 return false;
4571 if (strict)
4573 if (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
4574 return false;
4576 if (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx)))
4577 return false;
4579 else
4581 if (ad.base
4582 && !(REGNO (ad.base) >= FIRST_PSEUDO_REGISTER
4583 || REGNO_REG_CLASS (REGNO (ad.base)) == ADDR_REGS))
4584 return false;
4586 if (ad.indx
4587 && !(REGNO (ad.indx) >= FIRST_PSEUDO_REGISTER
4588 || REGNO_REG_CLASS (REGNO (ad.indx)) == ADDR_REGS))
4589 return false;
4591 return true;
4594 /* Return true if OP is a valid operand for the LA instruction.
4595 In 31-bit, we need to prove that the result is used as an
4596 address, as LA performs only a 31-bit addition. */
4598 bool
4599 legitimate_la_operand_p (rtx op)
4601 struct s390_address addr;
4602 if (!s390_decompose_address (op, &addr))
4603 return false;
4605 return (TARGET_64BIT || addr.pointer);
4608 /* Return true if it is valid *and* preferable to use LA to
4609 compute the sum of OP1 and OP2. */
4611 bool
4612 preferred_la_operand_p (rtx op1, rtx op2)
4614 struct s390_address addr;
4616 if (op2 != const0_rtx)
4617 op1 = gen_rtx_PLUS (Pmode, op1, op2);
4619 if (!s390_decompose_address (op1, &addr))
4620 return false;
4621 if (addr.base && !REGNO_OK_FOR_BASE_P (REGNO (addr.base)))
4622 return false;
4623 if (addr.indx && !REGNO_OK_FOR_INDEX_P (REGNO (addr.indx)))
4624 return false;
4626 /* Avoid LA instructions with index register on z196; it is
4627 preferable to use regular add instructions when possible.
4628 Starting with zEC12 the la with index register is "uncracked"
4629 again. */
4630 if (addr.indx && s390_tune == PROCESSOR_2817_Z196)
4631 return false;
4633 if (!TARGET_64BIT && !addr.pointer)
4634 return false;
4636 if (addr.pointer)
4637 return true;
4639 if ((addr.base && REG_P (addr.base) && REG_POINTER (addr.base))
4640 || (addr.indx && REG_P (addr.indx) && REG_POINTER (addr.indx)))
4641 return true;
4643 return false;
4646 /* Emit a forced load-address operation to load SRC into DST.
4647 This will use the LOAD ADDRESS instruction even in situations
4648 where legitimate_la_operand_p (SRC) returns false. */
4650 void
4651 s390_load_address (rtx dst, rtx src)
4653 if (TARGET_64BIT)
4654 emit_move_insn (dst, src);
4655 else
4656 emit_insn (gen_force_la_31 (dst, src));
4659 /* Return true if it ok to use SYMBOL_REF in a relative address. */
4661 bool
4662 s390_rel_address_ok_p (rtx symbol_ref)
4664 tree decl;
4666 if (symbol_ref == s390_got_symbol () || CONSTANT_POOL_ADDRESS_P (symbol_ref))
4667 return true;
4669 decl = SYMBOL_REF_DECL (symbol_ref);
4671 if (!flag_pic || SYMBOL_REF_LOCAL_P (symbol_ref))
4672 return (s390_pic_data_is_text_relative
4673 || (decl
4674 && TREE_CODE (decl) == FUNCTION_DECL));
4676 return false;
4679 /* Return a legitimate reference for ORIG (an address) using the
4680 register REG. If REG is 0, a new pseudo is generated.
4682 There are two types of references that must be handled:
4684 1. Global data references must load the address from the GOT, via
4685 the PIC reg. An insn is emitted to do this load, and the reg is
4686 returned.
4688 2. Static data references, constant pool addresses, and code labels
4689 compute the address as an offset from the GOT, whose base is in
4690 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
4691 differentiate them from global data objects. The returned
4692 address is the PIC reg + an unspec constant.
4694 TARGET_LEGITIMIZE_ADDRESS_P rejects symbolic references unless the PIC
4695 reg also appears in the address. */
4698 legitimize_pic_address (rtx orig, rtx reg)
4700 rtx addr = orig;
4701 rtx addend = const0_rtx;
4702 rtx new_rtx = orig;
4704 gcc_assert (!TLS_SYMBOLIC_CONST (addr));
4706 if (GET_CODE (addr) == CONST)
4707 addr = XEXP (addr, 0);
4709 if (GET_CODE (addr) == PLUS)
4711 addend = XEXP (addr, 1);
4712 addr = XEXP (addr, 0);
4715 if ((GET_CODE (addr) == LABEL_REF
4716 || (SYMBOL_REF_P (addr) && s390_rel_address_ok_p (addr))
4717 || (GET_CODE (addr) == UNSPEC &&
4718 (XINT (addr, 1) == UNSPEC_GOTENT
4719 || (TARGET_CPU_ZARCH && XINT (addr, 1) == UNSPEC_PLT))))
4720 && GET_CODE (addend) == CONST_INT)
4722 /* This can be locally addressed. */
4724 /* larl_operand requires UNSPECs to be wrapped in a const rtx. */
4725 rtx const_addr = (GET_CODE (addr) == UNSPEC ?
4726 gen_rtx_CONST (Pmode, addr) : addr);
4728 if (TARGET_CPU_ZARCH
4729 && larl_operand (const_addr, VOIDmode)
4730 && INTVAL (addend) < HOST_WIDE_INT_1 << 31
4731 && INTVAL (addend) >= -(HOST_WIDE_INT_1 << 31))
4733 if (INTVAL (addend) & 1)
4735 /* LARL can't handle odd offsets, so emit a pair of LARL
4736 and LA. */
4737 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4739 if (!DISP_IN_RANGE (INTVAL (addend)))
4741 HOST_WIDE_INT even = INTVAL (addend) - 1;
4742 addr = gen_rtx_PLUS (Pmode, addr, GEN_INT (even));
4743 addr = gen_rtx_CONST (Pmode, addr);
4744 addend = const1_rtx;
4747 emit_move_insn (temp, addr);
4748 new_rtx = gen_rtx_PLUS (Pmode, temp, addend);
4750 if (reg != 0)
4752 s390_load_address (reg, new_rtx);
4753 new_rtx = reg;
4756 else
4758 /* If the offset is even, we can just use LARL. This
4759 will happen automatically. */
4762 else
4764 /* No larl - Access local symbols relative to the GOT. */
4766 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4768 if (reload_in_progress || reload_completed)
4769 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4771 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
4772 if (addend != const0_rtx)
4773 addr = gen_rtx_PLUS (Pmode, addr, addend);
4774 addr = gen_rtx_CONST (Pmode, addr);
4775 addr = force_const_mem (Pmode, addr);
4776 emit_move_insn (temp, addr);
4778 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4779 if (reg != 0)
4781 s390_load_address (reg, new_rtx);
4782 new_rtx = reg;
4786 else if (GET_CODE (addr) == SYMBOL_REF && addend == const0_rtx)
4788 /* A non-local symbol reference without addend.
4790 The symbol ref is wrapped into an UNSPEC to make sure the
4791 proper operand modifier (@GOT or @GOTENT) will be emitted.
4792 This will tell the linker to put the symbol into the GOT.
4794 Additionally the code dereferencing the GOT slot is emitted here.
4796 An addend to the symref needs to be added afterwards.
4797 legitimize_pic_address calls itself recursively to handle
4798 that case. So no need to do it here. */
4800 if (reg == 0)
4801 reg = gen_reg_rtx (Pmode);
4803 if (TARGET_Z10)
4805 /* Use load relative if possible.
4806 lgrl <target>, sym@GOTENT */
4807 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
4808 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4809 new_rtx = gen_const_mem (GET_MODE (reg), new_rtx);
4811 emit_move_insn (reg, new_rtx);
4812 new_rtx = reg;
4814 else if (flag_pic == 1)
4816 /* Assume GOT offset is a valid displacement operand (< 4k
4817 or < 512k with z990). This is handled the same way in
4818 both 31- and 64-bit code (@GOT).
4819 lg <target>, sym@GOT(r12) */
4821 if (reload_in_progress || reload_completed)
4822 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4824 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
4825 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4826 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
4827 new_rtx = gen_const_mem (Pmode, new_rtx);
4828 emit_move_insn (reg, new_rtx);
4829 new_rtx = reg;
4831 else if (TARGET_CPU_ZARCH)
4833 /* If the GOT offset might be >= 4k, we determine the position
4834 of the GOT entry via a PC-relative LARL (@GOTENT).
4835 larl temp, sym@GOTENT
4836 lg <target>, 0(temp) */
4838 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
4840 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
4841 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
4843 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
4844 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4845 emit_move_insn (temp, new_rtx);
4847 new_rtx = gen_const_mem (Pmode, temp);
4848 emit_move_insn (reg, new_rtx);
4850 new_rtx = reg;
4852 else
4854 /* If the GOT offset might be >= 4k, we have to load it
4855 from the literal pool (@GOT).
4857 lg temp, lit-litbase(r13)
4858 lg <target>, 0(temp)
4859 lit: .long sym@GOT */
4861 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
4863 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
4864 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
4866 if (reload_in_progress || reload_completed)
4867 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4869 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
4870 addr = gen_rtx_CONST (Pmode, addr);
4871 addr = force_const_mem (Pmode, addr);
4872 emit_move_insn (temp, addr);
4874 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4875 new_rtx = gen_const_mem (Pmode, new_rtx);
4876 emit_move_insn (reg, new_rtx);
4877 new_rtx = reg;
4880 else if (GET_CODE (addr) == UNSPEC && GET_CODE (addend) == CONST_INT)
4882 gcc_assert (XVECLEN (addr, 0) == 1);
4883 switch (XINT (addr, 1))
4885 /* These address symbols (or PLT slots) relative to the GOT
4886 (not GOT slots!). In general this will exceed the
4887 displacement range so these value belong into the literal
4888 pool. */
4889 case UNSPEC_GOTOFF:
4890 case UNSPEC_PLTOFF:
4891 new_rtx = force_const_mem (Pmode, orig);
4892 break;
4894 /* For -fPIC the GOT size might exceed the displacement
4895 range so make sure the value is in the literal pool. */
4896 case UNSPEC_GOT:
4897 if (flag_pic == 2)
4898 new_rtx = force_const_mem (Pmode, orig);
4899 break;
4901 /* For @GOTENT larl is used. This is handled like local
4902 symbol refs. */
4903 case UNSPEC_GOTENT:
4904 gcc_unreachable ();
4905 break;
4907 /* @PLT is OK as is on 64-bit, must be converted to
4908 GOT-relative @PLTOFF on 31-bit. */
4909 case UNSPEC_PLT:
4910 if (!TARGET_CPU_ZARCH)
4912 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4914 if (reload_in_progress || reload_completed)
4915 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4917 addr = XVECEXP (addr, 0, 0);
4918 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr),
4919 UNSPEC_PLTOFF);
4920 if (addend != const0_rtx)
4921 addr = gen_rtx_PLUS (Pmode, addr, addend);
4922 addr = gen_rtx_CONST (Pmode, addr);
4923 addr = force_const_mem (Pmode, addr);
4924 emit_move_insn (temp, addr);
4926 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4927 if (reg != 0)
4929 s390_load_address (reg, new_rtx);
4930 new_rtx = reg;
4933 else
4934 /* On 64 bit larl can be used. This case is handled like
4935 local symbol refs. */
4936 gcc_unreachable ();
4937 break;
4939 /* Everything else cannot happen. */
4940 default:
4941 gcc_unreachable ();
4944 else if (addend != const0_rtx)
4946 /* Otherwise, compute the sum. */
4948 rtx base = legitimize_pic_address (addr, reg);
4949 new_rtx = legitimize_pic_address (addend,
4950 base == reg ? NULL_RTX : reg);
4951 if (GET_CODE (new_rtx) == CONST_INT)
4952 new_rtx = plus_constant (Pmode, base, INTVAL (new_rtx));
4953 else
4955 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
4957 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
4958 new_rtx = XEXP (new_rtx, 1);
4960 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
4963 if (GET_CODE (new_rtx) == CONST)
4964 new_rtx = XEXP (new_rtx, 0);
4965 new_rtx = force_operand (new_rtx, 0);
4968 return new_rtx;
4971 /* Load the thread pointer into a register. */
4974 s390_get_thread_pointer (void)
4976 rtx tp = gen_reg_rtx (Pmode);
4978 emit_move_insn (tp, gen_rtx_REG (Pmode, TP_REGNUM));
4979 mark_reg_pointer (tp, BITS_PER_WORD);
4981 return tp;
4984 /* Emit a tls call insn. The call target is the SYMBOL_REF stored
4985 in s390_tls_symbol which always refers to __tls_get_offset.
4986 The returned offset is written to RESULT_REG and an USE rtx is
4987 generated for TLS_CALL. */
4989 static GTY(()) rtx s390_tls_symbol;
4991 static void
4992 s390_emit_tls_call_insn (rtx result_reg, rtx tls_call)
4994 rtx insn;
4996 if (!flag_pic)
4997 emit_insn (s390_load_got ());
4999 if (!s390_tls_symbol)
5000 s390_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_offset");
5002 insn = s390_emit_call (s390_tls_symbol, tls_call, result_reg,
5003 gen_rtx_REG (Pmode, RETURN_REGNUM));
5005 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), result_reg);
5006 RTL_CONST_CALL_P (insn) = 1;
5009 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
5010 this (thread-local) address. REG may be used as temporary. */
5012 static rtx
5013 legitimize_tls_address (rtx addr, rtx reg)
5015 rtx new_rtx, tls_call, temp, base, r2;
5016 rtx_insn *insn;
5018 if (GET_CODE (addr) == SYMBOL_REF)
5019 switch (tls_symbolic_operand (addr))
5021 case TLS_MODEL_GLOBAL_DYNAMIC:
5022 start_sequence ();
5023 r2 = gen_rtx_REG (Pmode, 2);
5024 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_TLSGD);
5025 new_rtx = gen_rtx_CONST (Pmode, tls_call);
5026 new_rtx = force_const_mem (Pmode, new_rtx);
5027 emit_move_insn (r2, new_rtx);
5028 s390_emit_tls_call_insn (r2, tls_call);
5029 insn = get_insns ();
5030 end_sequence ();
5032 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
5033 temp = gen_reg_rtx (Pmode);
5034 emit_libcall_block (insn, temp, r2, new_rtx);
5036 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
5037 if (reg != 0)
5039 s390_load_address (reg, new_rtx);
5040 new_rtx = reg;
5042 break;
5044 case TLS_MODEL_LOCAL_DYNAMIC:
5045 start_sequence ();
5046 r2 = gen_rtx_REG (Pmode, 2);
5047 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM);
5048 new_rtx = gen_rtx_CONST (Pmode, tls_call);
5049 new_rtx = force_const_mem (Pmode, new_rtx);
5050 emit_move_insn (r2, new_rtx);
5051 s390_emit_tls_call_insn (r2, tls_call);
5052 insn = get_insns ();
5053 end_sequence ();
5055 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM_NTPOFF);
5056 temp = gen_reg_rtx (Pmode);
5057 emit_libcall_block (insn, temp, r2, new_rtx);
5059 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
5060 base = gen_reg_rtx (Pmode);
5061 s390_load_address (base, new_rtx);
5063 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_DTPOFF);
5064 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
5065 new_rtx = force_const_mem (Pmode, new_rtx);
5066 temp = gen_reg_rtx (Pmode);
5067 emit_move_insn (temp, new_rtx);
5069 new_rtx = gen_rtx_PLUS (Pmode, base, temp);
5070 if (reg != 0)
5072 s390_load_address (reg, new_rtx);
5073 new_rtx = reg;
5075 break;
5077 case TLS_MODEL_INITIAL_EXEC:
5078 if (flag_pic == 1)
5080 /* Assume GOT offset < 4k. This is handled the same way
5081 in both 31- and 64-bit code. */
5083 if (reload_in_progress || reload_completed)
5084 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
5086 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
5087 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
5088 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
5089 new_rtx = gen_const_mem (Pmode, new_rtx);
5090 temp = gen_reg_rtx (Pmode);
5091 emit_move_insn (temp, new_rtx);
5093 else if (TARGET_CPU_ZARCH)
5095 /* If the GOT offset might be >= 4k, we determine the position
5096 of the GOT entry via a PC-relative LARL. */
5098 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
5099 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
5100 temp = gen_reg_rtx (Pmode);
5101 emit_move_insn (temp, new_rtx);
5103 new_rtx = gen_const_mem (Pmode, temp);
5104 temp = gen_reg_rtx (Pmode);
5105 emit_move_insn (temp, new_rtx);
5107 else if (flag_pic)
5109 /* If the GOT offset might be >= 4k, we have to load it
5110 from the literal pool. */
5112 if (reload_in_progress || reload_completed)
5113 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
5115 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
5116 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
5117 new_rtx = force_const_mem (Pmode, new_rtx);
5118 temp = gen_reg_rtx (Pmode);
5119 emit_move_insn (temp, new_rtx);
5121 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
5122 new_rtx = gen_const_mem (Pmode, new_rtx);
5124 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
5125 temp = gen_reg_rtx (Pmode);
5126 emit_insn (gen_rtx_SET (temp, new_rtx));
5128 else
5130 /* In position-dependent code, load the absolute address of
5131 the GOT entry from the literal pool. */
5133 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
5134 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
5135 new_rtx = force_const_mem (Pmode, new_rtx);
5136 temp = gen_reg_rtx (Pmode);
5137 emit_move_insn (temp, new_rtx);
5139 new_rtx = temp;
5140 new_rtx = gen_const_mem (Pmode, new_rtx);
5141 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
5142 temp = gen_reg_rtx (Pmode);
5143 emit_insn (gen_rtx_SET (temp, new_rtx));
5146 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
5147 if (reg != 0)
5149 s390_load_address (reg, new_rtx);
5150 new_rtx = reg;
5152 break;
5154 case TLS_MODEL_LOCAL_EXEC:
5155 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
5156 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
5157 new_rtx = force_const_mem (Pmode, new_rtx);
5158 temp = gen_reg_rtx (Pmode);
5159 emit_move_insn (temp, new_rtx);
5161 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
5162 if (reg != 0)
5164 s390_load_address (reg, new_rtx);
5165 new_rtx = reg;
5167 break;
5169 default:
5170 gcc_unreachable ();
5173 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == UNSPEC)
5175 switch (XINT (XEXP (addr, 0), 1))
5177 case UNSPEC_INDNTPOFF:
5178 gcc_assert (TARGET_CPU_ZARCH);
5179 new_rtx = addr;
5180 break;
5182 default:
5183 gcc_unreachable ();
5187 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
5188 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
5190 new_rtx = XEXP (XEXP (addr, 0), 0);
5191 if (GET_CODE (new_rtx) != SYMBOL_REF)
5192 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
5194 new_rtx = legitimize_tls_address (new_rtx, reg);
5195 new_rtx = plus_constant (Pmode, new_rtx,
5196 INTVAL (XEXP (XEXP (addr, 0), 1)));
5197 new_rtx = force_operand (new_rtx, 0);
5200 else
5201 gcc_unreachable (); /* for now ... */
5203 return new_rtx;
5206 /* Emit insns making the address in operands[1] valid for a standard
5207 move to operands[0]. operands[1] is replaced by an address which
5208 should be used instead of the former RTX to emit the move
5209 pattern. */
5211 void
5212 emit_symbolic_move (rtx *operands)
5214 rtx temp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (Pmode);
5216 if (GET_CODE (operands[0]) == MEM)
5217 operands[1] = force_reg (Pmode, operands[1]);
5218 else if (TLS_SYMBOLIC_CONST (operands[1]))
5219 operands[1] = legitimize_tls_address (operands[1], temp);
5220 else if (flag_pic)
5221 operands[1] = legitimize_pic_address (operands[1], temp);
5224 /* Try machine-dependent ways of modifying an illegitimate address X
5225 to be legitimate. If we find one, return the new, valid address.
5227 OLDX is the address as it was before break_out_memory_refs was called.
5228 In some cases it is useful to look at this to decide what needs to be done.
5230 MODE is the mode of the operand pointed to by X.
5232 When -fpic is used, special handling is needed for symbolic references.
5233 See comments by legitimize_pic_address for details. */
5235 static rtx
5236 s390_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
5237 machine_mode mode ATTRIBUTE_UNUSED)
5239 rtx constant_term = const0_rtx;
5241 if (TLS_SYMBOLIC_CONST (x))
5243 x = legitimize_tls_address (x, 0);
5245 if (s390_legitimate_address_p (mode, x, FALSE))
5246 return x;
5248 else if (GET_CODE (x) == PLUS
5249 && (TLS_SYMBOLIC_CONST (XEXP (x, 0))
5250 || TLS_SYMBOLIC_CONST (XEXP (x, 1))))
5252 return x;
5254 else if (flag_pic)
5256 if (SYMBOLIC_CONST (x)
5257 || (GET_CODE (x) == PLUS
5258 && (SYMBOLIC_CONST (XEXP (x, 0))
5259 || SYMBOLIC_CONST (XEXP (x, 1)))))
5260 x = legitimize_pic_address (x, 0);
5262 if (s390_legitimate_address_p (mode, x, FALSE))
5263 return x;
5266 x = eliminate_constant_term (x, &constant_term);
5268 /* Optimize loading of large displacements by splitting them
5269 into the multiple of 4K and the rest; this allows the
5270 former to be CSE'd if possible.
5272 Don't do this if the displacement is added to a register
5273 pointing into the stack frame, as the offsets will
5274 change later anyway. */
5276 if (GET_CODE (constant_term) == CONST_INT
5277 && !TARGET_LONG_DISPLACEMENT
5278 && !DISP_IN_RANGE (INTVAL (constant_term))
5279 && !(REG_P (x) && REGNO_PTR_FRAME_P (REGNO (x))))
5281 HOST_WIDE_INT lower = INTVAL (constant_term) & 0xfff;
5282 HOST_WIDE_INT upper = INTVAL (constant_term) ^ lower;
5284 rtx temp = gen_reg_rtx (Pmode);
5285 rtx val = force_operand (GEN_INT (upper), temp);
5286 if (val != temp)
5287 emit_move_insn (temp, val);
5289 x = gen_rtx_PLUS (Pmode, x, temp);
5290 constant_term = GEN_INT (lower);
5293 if (GET_CODE (x) == PLUS)
5295 if (GET_CODE (XEXP (x, 0)) == REG)
5297 rtx temp = gen_reg_rtx (Pmode);
5298 rtx val = force_operand (XEXP (x, 1), temp);
5299 if (val != temp)
5300 emit_move_insn (temp, val);
5302 x = gen_rtx_PLUS (Pmode, XEXP (x, 0), temp);
5305 else if (GET_CODE (XEXP (x, 1)) == REG)
5307 rtx temp = gen_reg_rtx (Pmode);
5308 rtx val = force_operand (XEXP (x, 0), temp);
5309 if (val != temp)
5310 emit_move_insn (temp, val);
5312 x = gen_rtx_PLUS (Pmode, temp, XEXP (x, 1));
5316 if (constant_term != const0_rtx)
5317 x = gen_rtx_PLUS (Pmode, x, constant_term);
5319 return x;
5322 /* Try a machine-dependent way of reloading an illegitimate address AD
5323 operand. If we find one, push the reload and return the new address.
5325 MODE is the mode of the enclosing MEM. OPNUM is the operand number
5326 and TYPE is the reload type of the current reload. */
5329 legitimize_reload_address (rtx ad, machine_mode mode ATTRIBUTE_UNUSED,
5330 int opnum, int type)
5332 if (!optimize || TARGET_LONG_DISPLACEMENT)
5333 return NULL_RTX;
5335 if (GET_CODE (ad) == PLUS)
5337 rtx tem = simplify_binary_operation (PLUS, Pmode,
5338 XEXP (ad, 0), XEXP (ad, 1));
5339 if (tem)
5340 ad = tem;
5343 if (GET_CODE (ad) == PLUS
5344 && GET_CODE (XEXP (ad, 0)) == REG
5345 && GET_CODE (XEXP (ad, 1)) == CONST_INT
5346 && !DISP_IN_RANGE (INTVAL (XEXP (ad, 1))))
5348 HOST_WIDE_INT lower = INTVAL (XEXP (ad, 1)) & 0xfff;
5349 HOST_WIDE_INT upper = INTVAL (XEXP (ad, 1)) ^ lower;
5350 rtx cst, tem, new_rtx;
5352 cst = GEN_INT (upper);
5353 if (!legitimate_reload_constant_p (cst))
5354 cst = force_const_mem (Pmode, cst);
5356 tem = gen_rtx_PLUS (Pmode, XEXP (ad, 0), cst);
5357 new_rtx = gen_rtx_PLUS (Pmode, tem, GEN_INT (lower));
5359 push_reload (XEXP (tem, 1), 0, &XEXP (tem, 1), 0,
5360 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
5361 opnum, (enum reload_type) type);
5362 return new_rtx;
5365 return NULL_RTX;
5368 /* Emit code to move LEN bytes from DST to SRC. */
5370 bool
5371 s390_expand_movmem (rtx dst, rtx src, rtx len)
5373 /* When tuning for z10 or higher we rely on the Glibc functions to
5374 do the right thing. Only for constant lengths below 64k we will
5375 generate inline code. */
5376 if (s390_tune >= PROCESSOR_2097_Z10
5377 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
5378 return false;
5380 /* Expand memcpy for constant length operands without a loop if it
5381 is shorter that way.
5383 With a constant length argument a
5384 memcpy loop (without pfd) is 36 bytes -> 6 * mvc */
5385 if (GET_CODE (len) == CONST_INT
5386 && INTVAL (len) >= 0
5387 && INTVAL (len) <= 256 * 6
5388 && (!TARGET_MVCLE || INTVAL (len) <= 256))
5390 HOST_WIDE_INT o, l;
5392 for (l = INTVAL (len), o = 0; l > 0; l -= 256, o += 256)
5394 rtx newdst = adjust_address (dst, BLKmode, o);
5395 rtx newsrc = adjust_address (src, BLKmode, o);
5396 emit_insn (gen_movmem_short (newdst, newsrc,
5397 GEN_INT (l > 256 ? 255 : l - 1)));
5401 else if (TARGET_MVCLE)
5403 emit_insn (gen_movmem_long (dst, src, convert_to_mode (Pmode, len, 1)));
5406 else
5408 rtx dst_addr, src_addr, count, blocks, temp;
5409 rtx_code_label *loop_start_label = gen_label_rtx ();
5410 rtx_code_label *loop_end_label = gen_label_rtx ();
5411 rtx_code_label *end_label = gen_label_rtx ();
5412 machine_mode mode;
5414 mode = GET_MODE (len);
5415 if (mode == VOIDmode)
5416 mode = Pmode;
5418 dst_addr = gen_reg_rtx (Pmode);
5419 src_addr = gen_reg_rtx (Pmode);
5420 count = gen_reg_rtx (mode);
5421 blocks = gen_reg_rtx (mode);
5423 convert_move (count, len, 1);
5424 emit_cmp_and_jump_insns (count, const0_rtx,
5425 EQ, NULL_RTX, mode, 1, end_label);
5427 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
5428 emit_move_insn (src_addr, force_operand (XEXP (src, 0), NULL_RTX));
5429 dst = change_address (dst, VOIDmode, dst_addr);
5430 src = change_address (src, VOIDmode, src_addr);
5432 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
5433 OPTAB_DIRECT);
5434 if (temp != count)
5435 emit_move_insn (count, temp);
5437 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5438 OPTAB_DIRECT);
5439 if (temp != blocks)
5440 emit_move_insn (blocks, temp);
5442 emit_cmp_and_jump_insns (blocks, const0_rtx,
5443 EQ, NULL_RTX, mode, 1, loop_end_label);
5445 emit_label (loop_start_label);
5447 if (TARGET_Z10
5448 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 768))
5450 rtx prefetch;
5452 /* Issue a read prefetch for the +3 cache line. */
5453 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, src_addr, GEN_INT (768)),
5454 const0_rtx, const0_rtx);
5455 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5456 emit_insn (prefetch);
5458 /* Issue a write prefetch for the +3 cache line. */
5459 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (768)),
5460 const1_rtx, const0_rtx);
5461 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5462 emit_insn (prefetch);
5465 emit_insn (gen_movmem_short (dst, src, GEN_INT (255)));
5466 s390_load_address (dst_addr,
5467 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
5468 s390_load_address (src_addr,
5469 gen_rtx_PLUS (Pmode, src_addr, GEN_INT (256)));
5471 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5472 OPTAB_DIRECT);
5473 if (temp != blocks)
5474 emit_move_insn (blocks, temp);
5476 emit_cmp_and_jump_insns (blocks, const0_rtx,
5477 EQ, NULL_RTX, mode, 1, loop_end_label);
5479 emit_jump (loop_start_label);
5480 emit_label (loop_end_label);
5482 emit_insn (gen_movmem_short (dst, src,
5483 convert_to_mode (Pmode, count, 1)));
5484 emit_label (end_label);
5486 return true;
5489 /* Emit code to set LEN bytes at DST to VAL.
5490 Make use of clrmem if VAL is zero. */
5492 void
5493 s390_expand_setmem (rtx dst, rtx len, rtx val)
5495 if (GET_CODE (len) == CONST_INT && INTVAL (len) <= 0)
5496 return;
5498 gcc_assert (GET_CODE (val) == CONST_INT || GET_MODE (val) == QImode);
5500 /* Expand setmem/clrmem for a constant length operand without a
5501 loop if it will be shorter that way.
5502 With a constant length and without pfd argument a
5503 clrmem loop is 32 bytes -> 5.3 * xc
5504 setmem loop is 36 bytes -> 3.6 * (mvi/stc + mvc) */
5505 if (GET_CODE (len) == CONST_INT
5506 && ((INTVAL (len) <= 256 * 5 && val == const0_rtx)
5507 || INTVAL (len) <= 257 * 3)
5508 && (!TARGET_MVCLE || INTVAL (len) <= 256))
5510 HOST_WIDE_INT o, l;
5512 if (val == const0_rtx)
5513 /* clrmem: emit 256 byte blockwise XCs. */
5514 for (l = INTVAL (len), o = 0; l > 0; l -= 256, o += 256)
5516 rtx newdst = adjust_address (dst, BLKmode, o);
5517 emit_insn (gen_clrmem_short (newdst,
5518 GEN_INT (l > 256 ? 255 : l - 1)));
5520 else
5521 /* setmem: emit 1(mvi) + 256(mvc) byte blockwise memsets by
5522 setting first byte to val and using a 256 byte mvc with one
5523 byte overlap to propagate the byte. */
5524 for (l = INTVAL (len), o = 0; l > 0; l -= 257, o += 257)
5526 rtx newdst = adjust_address (dst, BLKmode, o);
5527 emit_move_insn (adjust_address (dst, QImode, o), val);
5528 if (l > 1)
5530 rtx newdstp1 = adjust_address (dst, BLKmode, o + 1);
5531 emit_insn (gen_movmem_short (newdstp1, newdst,
5532 GEN_INT (l > 257 ? 255 : l - 2)));
5537 else if (TARGET_MVCLE)
5539 val = force_not_mem (convert_modes (Pmode, QImode, val, 1));
5540 if (TARGET_64BIT)
5541 emit_insn (gen_setmem_long_di (dst, convert_to_mode (Pmode, len, 1),
5542 val));
5543 else
5544 emit_insn (gen_setmem_long_si (dst, convert_to_mode (Pmode, len, 1),
5545 val));
5548 else
5550 rtx dst_addr, count, blocks, temp, dstp1 = NULL_RTX;
5551 rtx_code_label *loop_start_label = gen_label_rtx ();
5552 rtx_code_label *onebyte_end_label = gen_label_rtx ();
5553 rtx_code_label *zerobyte_end_label = gen_label_rtx ();
5554 rtx_code_label *restbyte_end_label = gen_label_rtx ();
5555 machine_mode mode;
5557 mode = GET_MODE (len);
5558 if (mode == VOIDmode)
5559 mode = Pmode;
5561 dst_addr = gen_reg_rtx (Pmode);
5562 count = gen_reg_rtx (mode);
5563 blocks = gen_reg_rtx (mode);
5565 convert_move (count, len, 1);
5566 emit_cmp_and_jump_insns (count, const0_rtx,
5567 EQ, NULL_RTX, mode, 1, zerobyte_end_label,
5568 profile_probability::very_unlikely ());
5570 /* We need to make a copy of the target address since memset is
5571 supposed to return it unmodified. We have to make it here
5572 already since the new reg is used at onebyte_end_label. */
5573 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
5574 dst = change_address (dst, VOIDmode, dst_addr);
5576 if (val != const0_rtx)
5578 /* When using the overlapping mvc the original target
5579 address is only accessed as single byte entity (even by
5580 the mvc reading this value). */
5581 set_mem_size (dst, 1);
5582 dstp1 = adjust_address (dst, VOIDmode, 1);
5583 emit_cmp_and_jump_insns (count,
5584 const1_rtx, EQ, NULL_RTX, mode, 1,
5585 onebyte_end_label,
5586 profile_probability::very_unlikely ());
5589 /* There is one unconditional (mvi+mvc)/xc after the loop
5590 dealing with the rest of the bytes, subtracting two (mvi+mvc)
5591 or one (xc) here leaves this number of bytes to be handled by
5592 it. */
5593 temp = expand_binop (mode, add_optab, count,
5594 val == const0_rtx ? constm1_rtx : GEN_INT (-2),
5595 count, 1, OPTAB_DIRECT);
5596 if (temp != count)
5597 emit_move_insn (count, temp);
5599 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5600 OPTAB_DIRECT);
5601 if (temp != blocks)
5602 emit_move_insn (blocks, temp);
5604 emit_cmp_and_jump_insns (blocks, const0_rtx,
5605 EQ, NULL_RTX, mode, 1, restbyte_end_label);
5607 emit_jump (loop_start_label);
5609 if (val != const0_rtx)
5611 /* The 1 byte != 0 special case. Not handled efficiently
5612 since we require two jumps for that. However, this
5613 should be very rare. */
5614 emit_label (onebyte_end_label);
5615 emit_move_insn (adjust_address (dst, QImode, 0), val);
5616 emit_jump (zerobyte_end_label);
5619 emit_label (loop_start_label);
5621 if (TARGET_Z10
5622 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 1024))
5624 /* Issue a write prefetch for the +4 cache line. */
5625 rtx prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr,
5626 GEN_INT (1024)),
5627 const1_rtx, const0_rtx);
5628 emit_insn (prefetch);
5629 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5632 if (val == const0_rtx)
5633 emit_insn (gen_clrmem_short (dst, GEN_INT (255)));
5634 else
5636 /* Set the first byte in the block to the value and use an
5637 overlapping mvc for the block. */
5638 emit_move_insn (adjust_address (dst, QImode, 0), val);
5639 emit_insn (gen_movmem_short (dstp1, dst, GEN_INT (254)));
5641 s390_load_address (dst_addr,
5642 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
5644 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5645 OPTAB_DIRECT);
5646 if (temp != blocks)
5647 emit_move_insn (blocks, temp);
5649 emit_cmp_and_jump_insns (blocks, const0_rtx,
5650 NE, NULL_RTX, mode, 1, loop_start_label);
5652 emit_label (restbyte_end_label);
5654 if (val == const0_rtx)
5655 emit_insn (gen_clrmem_short (dst, convert_to_mode (Pmode, count, 1)));
5656 else
5658 /* Set the first byte in the block to the value and use an
5659 overlapping mvc for the block. */
5660 emit_move_insn (adjust_address (dst, QImode, 0), val);
5661 /* execute only uses the lowest 8 bits of count that's
5662 exactly what we need here. */
5663 emit_insn (gen_movmem_short (dstp1, dst,
5664 convert_to_mode (Pmode, count, 1)));
5667 emit_label (zerobyte_end_label);
5671 /* Emit code to compare LEN bytes at OP0 with those at OP1,
5672 and return the result in TARGET. */
5674 bool
5675 s390_expand_cmpmem (rtx target, rtx op0, rtx op1, rtx len)
5677 rtx ccreg = gen_rtx_REG (CCUmode, CC_REGNUM);
5678 rtx tmp;
5680 /* When tuning for z10 or higher we rely on the Glibc functions to
5681 do the right thing. Only for constant lengths below 64k we will
5682 generate inline code. */
5683 if (s390_tune >= PROCESSOR_2097_Z10
5684 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
5685 return false;
5687 /* As the result of CMPINT is inverted compared to what we need,
5688 we have to swap the operands. */
5689 tmp = op0; op0 = op1; op1 = tmp;
5691 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
5693 if (INTVAL (len) > 0)
5695 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (INTVAL (len) - 1)));
5696 emit_insn (gen_cmpint (target, ccreg));
5698 else
5699 emit_move_insn (target, const0_rtx);
5701 else if (TARGET_MVCLE)
5703 emit_insn (gen_cmpmem_long (op0, op1, convert_to_mode (Pmode, len, 1)));
5704 emit_insn (gen_cmpint (target, ccreg));
5706 else
5708 rtx addr0, addr1, count, blocks, temp;
5709 rtx_code_label *loop_start_label = gen_label_rtx ();
5710 rtx_code_label *loop_end_label = gen_label_rtx ();
5711 rtx_code_label *end_label = gen_label_rtx ();
5712 machine_mode mode;
5714 mode = GET_MODE (len);
5715 if (mode == VOIDmode)
5716 mode = Pmode;
5718 addr0 = gen_reg_rtx (Pmode);
5719 addr1 = gen_reg_rtx (Pmode);
5720 count = gen_reg_rtx (mode);
5721 blocks = gen_reg_rtx (mode);
5723 convert_move (count, len, 1);
5724 emit_cmp_and_jump_insns (count, const0_rtx,
5725 EQ, NULL_RTX, mode, 1, end_label);
5727 emit_move_insn (addr0, force_operand (XEXP (op0, 0), NULL_RTX));
5728 emit_move_insn (addr1, force_operand (XEXP (op1, 0), NULL_RTX));
5729 op0 = change_address (op0, VOIDmode, addr0);
5730 op1 = change_address (op1, VOIDmode, addr1);
5732 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
5733 OPTAB_DIRECT);
5734 if (temp != count)
5735 emit_move_insn (count, temp);
5737 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5738 OPTAB_DIRECT);
5739 if (temp != blocks)
5740 emit_move_insn (blocks, temp);
5742 emit_cmp_and_jump_insns (blocks, const0_rtx,
5743 EQ, NULL_RTX, mode, 1, loop_end_label);
5745 emit_label (loop_start_label);
5747 if (TARGET_Z10
5748 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 512))
5750 rtx prefetch;
5752 /* Issue a read prefetch for the +2 cache line of operand 1. */
5753 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr0, GEN_INT (512)),
5754 const0_rtx, const0_rtx);
5755 emit_insn (prefetch);
5756 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5758 /* Issue a read prefetch for the +2 cache line of operand 2. */
5759 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr1, GEN_INT (512)),
5760 const0_rtx, const0_rtx);
5761 emit_insn (prefetch);
5762 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5765 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (255)));
5766 temp = gen_rtx_NE (VOIDmode, ccreg, const0_rtx);
5767 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
5768 gen_rtx_LABEL_REF (VOIDmode, end_label), pc_rtx);
5769 temp = gen_rtx_SET (pc_rtx, temp);
5770 emit_jump_insn (temp);
5772 s390_load_address (addr0,
5773 gen_rtx_PLUS (Pmode, addr0, GEN_INT (256)));
5774 s390_load_address (addr1,
5775 gen_rtx_PLUS (Pmode, addr1, GEN_INT (256)));
5777 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5778 OPTAB_DIRECT);
5779 if (temp != blocks)
5780 emit_move_insn (blocks, temp);
5782 emit_cmp_and_jump_insns (blocks, const0_rtx,
5783 EQ, NULL_RTX, mode, 1, loop_end_label);
5785 emit_jump (loop_start_label);
5786 emit_label (loop_end_label);
5788 emit_insn (gen_cmpmem_short (op0, op1,
5789 convert_to_mode (Pmode, count, 1)));
5790 emit_label (end_label);
5792 emit_insn (gen_cmpint (target, ccreg));
5794 return true;
5797 /* Emit a conditional jump to LABEL for condition code mask MASK using
5798 comparsion operator COMPARISON. Return the emitted jump insn. */
5800 static rtx_insn *
5801 s390_emit_ccraw_jump (HOST_WIDE_INT mask, enum rtx_code comparison, rtx label)
5803 rtx temp;
5805 gcc_assert (comparison == EQ || comparison == NE);
5806 gcc_assert (mask > 0 && mask < 15);
5808 temp = gen_rtx_fmt_ee (comparison, VOIDmode,
5809 gen_rtx_REG (CCRAWmode, CC_REGNUM), GEN_INT (mask));
5810 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
5811 gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx);
5812 temp = gen_rtx_SET (pc_rtx, temp);
5813 return emit_jump_insn (temp);
5816 /* Emit the instructions to implement strlen of STRING and store the
5817 result in TARGET. The string has the known ALIGNMENT. This
5818 version uses vector instructions and is therefore not appropriate
5819 for targets prior to z13. */
5821 void
5822 s390_expand_vec_strlen (rtx target, rtx string, rtx alignment)
5824 rtx highest_index_to_load_reg = gen_reg_rtx (Pmode);
5825 rtx str_reg = gen_reg_rtx (V16QImode);
5826 rtx str_addr_base_reg = gen_reg_rtx (Pmode);
5827 rtx str_idx_reg = gen_reg_rtx (Pmode);
5828 rtx result_reg = gen_reg_rtx (V16QImode);
5829 rtx is_aligned_label = gen_label_rtx ();
5830 rtx into_loop_label = NULL_RTX;
5831 rtx loop_start_label = gen_label_rtx ();
5832 rtx temp;
5833 rtx len = gen_reg_rtx (QImode);
5834 rtx cond;
5836 s390_load_address (str_addr_base_reg, XEXP (string, 0));
5837 emit_move_insn (str_idx_reg, const0_rtx);
5839 if (INTVAL (alignment) < 16)
5841 /* Check whether the address happens to be aligned properly so
5842 jump directly to the aligned loop. */
5843 emit_cmp_and_jump_insns (gen_rtx_AND (Pmode,
5844 str_addr_base_reg, GEN_INT (15)),
5845 const0_rtx, EQ, NULL_RTX,
5846 Pmode, 1, is_aligned_label);
5848 temp = gen_reg_rtx (Pmode);
5849 temp = expand_binop (Pmode, and_optab, str_addr_base_reg,
5850 GEN_INT (15), temp, 1, OPTAB_DIRECT);
5851 gcc_assert (REG_P (temp));
5852 highest_index_to_load_reg =
5853 expand_binop (Pmode, sub_optab, GEN_INT (15), temp,
5854 highest_index_to_load_reg, 1, OPTAB_DIRECT);
5855 gcc_assert (REG_P (highest_index_to_load_reg));
5856 emit_insn (gen_vllv16qi (str_reg,
5857 convert_to_mode (SImode, highest_index_to_load_reg, 1),
5858 gen_rtx_MEM (BLKmode, str_addr_base_reg)));
5860 into_loop_label = gen_label_rtx ();
5861 s390_emit_jump (into_loop_label, NULL_RTX);
5862 emit_barrier ();
5865 emit_label (is_aligned_label);
5866 LABEL_NUSES (is_aligned_label) = INTVAL (alignment) < 16 ? 2 : 1;
5868 /* Reaching this point we are only performing 16 bytes aligned
5869 loads. */
5870 emit_move_insn (highest_index_to_load_reg, GEN_INT (15));
5872 emit_label (loop_start_label);
5873 LABEL_NUSES (loop_start_label) = 1;
5875 /* Load 16 bytes of the string into VR. */
5876 emit_move_insn (str_reg,
5877 gen_rtx_MEM (V16QImode,
5878 gen_rtx_PLUS (Pmode, str_idx_reg,
5879 str_addr_base_reg)));
5880 if (into_loop_label != NULL_RTX)
5882 emit_label (into_loop_label);
5883 LABEL_NUSES (into_loop_label) = 1;
5886 /* Increment string index by 16 bytes. */
5887 expand_binop (Pmode, add_optab, str_idx_reg, GEN_INT (16),
5888 str_idx_reg, 1, OPTAB_DIRECT);
5890 emit_insn (gen_vec_vfenesv16qi (result_reg, str_reg, str_reg,
5891 GEN_INT (VSTRING_FLAG_ZS | VSTRING_FLAG_CS)));
5893 add_int_reg_note (s390_emit_ccraw_jump (8, NE, loop_start_label),
5894 REG_BR_PROB,
5895 profile_probability::very_likely ().to_reg_br_prob_note ());
5896 emit_insn (gen_vec_extractv16qiqi (len, result_reg, GEN_INT (7)));
5898 /* If the string pointer wasn't aligned we have loaded less then 16
5899 bytes and the remaining bytes got filled with zeros (by vll).
5900 Now we have to check whether the resulting index lies within the
5901 bytes actually part of the string. */
5903 cond = s390_emit_compare (GT, convert_to_mode (Pmode, len, 1),
5904 highest_index_to_load_reg);
5905 s390_load_address (highest_index_to_load_reg,
5906 gen_rtx_PLUS (Pmode, highest_index_to_load_reg,
5907 const1_rtx));
5908 if (TARGET_64BIT)
5909 emit_insn (gen_movdicc (str_idx_reg, cond,
5910 highest_index_to_load_reg, str_idx_reg));
5911 else
5912 emit_insn (gen_movsicc (str_idx_reg, cond,
5913 highest_index_to_load_reg, str_idx_reg));
5915 add_reg_br_prob_note (s390_emit_jump (is_aligned_label, cond),
5916 profile_probability::very_unlikely ());
5918 expand_binop (Pmode, add_optab, str_idx_reg,
5919 GEN_INT (-16), str_idx_reg, 1, OPTAB_DIRECT);
5920 /* FIXME: len is already zero extended - so avoid the llgcr emitted
5921 here. */
5922 temp = expand_binop (Pmode, add_optab, str_idx_reg,
5923 convert_to_mode (Pmode, len, 1),
5924 target, 1, OPTAB_DIRECT);
5925 if (temp != target)
5926 emit_move_insn (target, temp);
5929 void
5930 s390_expand_vec_movstr (rtx result, rtx dst, rtx src)
5932 rtx temp = gen_reg_rtx (Pmode);
5933 rtx src_addr = XEXP (src, 0);
5934 rtx dst_addr = XEXP (dst, 0);
5935 rtx src_addr_reg = gen_reg_rtx (Pmode);
5936 rtx dst_addr_reg = gen_reg_rtx (Pmode);
5937 rtx offset = gen_reg_rtx (Pmode);
5938 rtx vsrc = gen_reg_rtx (V16QImode);
5939 rtx vpos = gen_reg_rtx (V16QImode);
5940 rtx loadlen = gen_reg_rtx (SImode);
5941 rtx gpos_qi = gen_reg_rtx(QImode);
5942 rtx gpos = gen_reg_rtx (SImode);
5943 rtx done_label = gen_label_rtx ();
5944 rtx loop_label = gen_label_rtx ();
5945 rtx exit_label = gen_label_rtx ();
5946 rtx full_label = gen_label_rtx ();
5948 /* Perform a quick check for string ending on the first up to 16
5949 bytes and exit early if successful. */
5951 emit_insn (gen_vlbb (vsrc, src, GEN_INT (6)));
5952 emit_insn (gen_lcbb (loadlen, src_addr, GEN_INT (6)));
5953 emit_insn (gen_vfenezv16qi (vpos, vsrc, vsrc));
5954 emit_insn (gen_vec_extractv16qiqi (gpos_qi, vpos, GEN_INT (7)));
5955 emit_move_insn (gpos, gen_rtx_SUBREG (SImode, gpos_qi, 0));
5956 /* gpos is the byte index if a zero was found and 16 otherwise.
5957 So if it is lower than the loaded bytes we have a hit. */
5958 emit_cmp_and_jump_insns (gpos, loadlen, GE, NULL_RTX, SImode, 1,
5959 full_label);
5960 emit_insn (gen_vstlv16qi (vsrc, gpos, dst));
5962 force_expand_binop (Pmode, add_optab, dst_addr, gpos, result,
5963 1, OPTAB_DIRECT);
5964 emit_jump (exit_label);
5965 emit_barrier ();
5967 emit_label (full_label);
5968 LABEL_NUSES (full_label) = 1;
5970 /* Calculate `offset' so that src + offset points to the last byte
5971 before 16 byte alignment. */
5973 /* temp = src_addr & 0xf */
5974 force_expand_binop (Pmode, and_optab, src_addr, GEN_INT (15), temp,
5975 1, OPTAB_DIRECT);
5977 /* offset = 0xf - temp */
5978 emit_move_insn (offset, GEN_INT (15));
5979 force_expand_binop (Pmode, sub_optab, offset, temp, offset,
5980 1, OPTAB_DIRECT);
5982 /* Store `offset' bytes in the dstination string. The quick check
5983 has loaded at least `offset' bytes into vsrc. */
5985 emit_insn (gen_vstlv16qi (vsrc, gen_lowpart (SImode, offset), dst));
5987 /* Advance to the next byte to be loaded. */
5988 force_expand_binop (Pmode, add_optab, offset, const1_rtx, offset,
5989 1, OPTAB_DIRECT);
5991 /* Make sure the addresses are single regs which can be used as a
5992 base. */
5993 emit_move_insn (src_addr_reg, src_addr);
5994 emit_move_insn (dst_addr_reg, dst_addr);
5996 /* MAIN LOOP */
5998 emit_label (loop_label);
5999 LABEL_NUSES (loop_label) = 1;
6001 emit_move_insn (vsrc,
6002 gen_rtx_MEM (V16QImode,
6003 gen_rtx_PLUS (Pmode, src_addr_reg, offset)));
6005 emit_insn (gen_vec_vfenesv16qi (vpos, vsrc, vsrc,
6006 GEN_INT (VSTRING_FLAG_ZS | VSTRING_FLAG_CS)));
6007 add_int_reg_note (s390_emit_ccraw_jump (8, EQ, done_label),
6008 REG_BR_PROB, profile_probability::very_unlikely ()
6009 .to_reg_br_prob_note ());
6011 emit_move_insn (gen_rtx_MEM (V16QImode,
6012 gen_rtx_PLUS (Pmode, dst_addr_reg, offset)),
6013 vsrc);
6014 /* offset += 16 */
6015 force_expand_binop (Pmode, add_optab, offset, GEN_INT (16),
6016 offset, 1, OPTAB_DIRECT);
6018 emit_jump (loop_label);
6019 emit_barrier ();
6021 /* REGULAR EXIT */
6023 /* We are done. Add the offset of the zero character to the dst_addr
6024 pointer to get the result. */
6026 emit_label (done_label);
6027 LABEL_NUSES (done_label) = 1;
6029 force_expand_binop (Pmode, add_optab, dst_addr_reg, offset, dst_addr_reg,
6030 1, OPTAB_DIRECT);
6032 emit_insn (gen_vec_extractv16qiqi (gpos_qi, vpos, GEN_INT (7)));
6033 emit_move_insn (gpos, gen_rtx_SUBREG (SImode, gpos_qi, 0));
6035 emit_insn (gen_vstlv16qi (vsrc, gpos, gen_rtx_MEM (BLKmode, dst_addr_reg)));
6037 force_expand_binop (Pmode, add_optab, dst_addr_reg, gpos, result,
6038 1, OPTAB_DIRECT);
6040 /* EARLY EXIT */
6042 emit_label (exit_label);
6043 LABEL_NUSES (exit_label) = 1;
6047 /* Expand conditional increment or decrement using alc/slb instructions.
6048 Should generate code setting DST to either SRC or SRC + INCREMENT,
6049 depending on the result of the comparison CMP_OP0 CMP_CODE CMP_OP1.
6050 Returns true if successful, false otherwise.
6052 That makes it possible to implement some if-constructs without jumps e.g.:
6053 (borrow = CC0 | CC1 and carry = CC2 | CC3)
6054 unsigned int a, b, c;
6055 if (a < b) c++; -> CCU b > a -> CC2; c += carry;
6056 if (a < b) c--; -> CCL3 a - b -> borrow; c -= borrow;
6057 if (a <= b) c++; -> CCL3 b - a -> borrow; c += carry;
6058 if (a <= b) c--; -> CCU a <= b -> borrow; c -= borrow;
6060 Checks for EQ and NE with a nonzero value need an additional xor e.g.:
6061 if (a == b) c++; -> CCL3 a ^= b; 0 - a -> borrow; c += carry;
6062 if (a == b) c--; -> CCU a ^= b; a <= 0 -> CC0 | CC1; c -= borrow;
6063 if (a != b) c++; -> CCU a ^= b; a > 0 -> CC2; c += carry;
6064 if (a != b) c--; -> CCL3 a ^= b; 0 - a -> borrow; c -= borrow; */
6066 bool
6067 s390_expand_addcc (enum rtx_code cmp_code, rtx cmp_op0, rtx cmp_op1,
6068 rtx dst, rtx src, rtx increment)
6070 machine_mode cmp_mode;
6071 machine_mode cc_mode;
6072 rtx op_res;
6073 rtx insn;
6074 rtvec p;
6075 int ret;
6077 if ((GET_MODE (cmp_op0) == SImode || GET_MODE (cmp_op0) == VOIDmode)
6078 && (GET_MODE (cmp_op1) == SImode || GET_MODE (cmp_op1) == VOIDmode))
6079 cmp_mode = SImode;
6080 else if ((GET_MODE (cmp_op0) == DImode || GET_MODE (cmp_op0) == VOIDmode)
6081 && (GET_MODE (cmp_op1) == DImode || GET_MODE (cmp_op1) == VOIDmode))
6082 cmp_mode = DImode;
6083 else
6084 return false;
6086 /* Try ADD LOGICAL WITH CARRY. */
6087 if (increment == const1_rtx)
6089 /* Determine CC mode to use. */
6090 if (cmp_code == EQ || cmp_code == NE)
6092 if (cmp_op1 != const0_rtx)
6094 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
6095 NULL_RTX, 0, OPTAB_WIDEN);
6096 cmp_op1 = const0_rtx;
6099 cmp_code = cmp_code == EQ ? LEU : GTU;
6102 if (cmp_code == LTU || cmp_code == LEU)
6104 rtx tem = cmp_op0;
6105 cmp_op0 = cmp_op1;
6106 cmp_op1 = tem;
6107 cmp_code = swap_condition (cmp_code);
6110 switch (cmp_code)
6112 case GTU:
6113 cc_mode = CCUmode;
6114 break;
6116 case GEU:
6117 cc_mode = CCL3mode;
6118 break;
6120 default:
6121 return false;
6124 /* Emit comparison instruction pattern. */
6125 if (!register_operand (cmp_op0, cmp_mode))
6126 cmp_op0 = force_reg (cmp_mode, cmp_op0);
6128 insn = gen_rtx_SET (gen_rtx_REG (cc_mode, CC_REGNUM),
6129 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
6130 /* We use insn_invalid_p here to add clobbers if required. */
6131 ret = insn_invalid_p (emit_insn (insn), false);
6132 gcc_assert (!ret);
6134 /* Emit ALC instruction pattern. */
6135 op_res = gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
6136 gen_rtx_REG (cc_mode, CC_REGNUM),
6137 const0_rtx);
6139 if (src != const0_rtx)
6141 if (!register_operand (src, GET_MODE (dst)))
6142 src = force_reg (GET_MODE (dst), src);
6144 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, src);
6145 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, const0_rtx);
6148 p = rtvec_alloc (2);
6149 RTVEC_ELT (p, 0) =
6150 gen_rtx_SET (dst, op_res);
6151 RTVEC_ELT (p, 1) =
6152 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
6153 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
6155 return true;
6158 /* Try SUBTRACT LOGICAL WITH BORROW. */
6159 if (increment == constm1_rtx)
6161 /* Determine CC mode to use. */
6162 if (cmp_code == EQ || cmp_code == NE)
6164 if (cmp_op1 != const0_rtx)
6166 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
6167 NULL_RTX, 0, OPTAB_WIDEN);
6168 cmp_op1 = const0_rtx;
6171 cmp_code = cmp_code == EQ ? LEU : GTU;
6174 if (cmp_code == GTU || cmp_code == GEU)
6176 rtx tem = cmp_op0;
6177 cmp_op0 = cmp_op1;
6178 cmp_op1 = tem;
6179 cmp_code = swap_condition (cmp_code);
6182 switch (cmp_code)
6184 case LEU:
6185 cc_mode = CCUmode;
6186 break;
6188 case LTU:
6189 cc_mode = CCL3mode;
6190 break;
6192 default:
6193 return false;
6196 /* Emit comparison instruction pattern. */
6197 if (!register_operand (cmp_op0, cmp_mode))
6198 cmp_op0 = force_reg (cmp_mode, cmp_op0);
6200 insn = gen_rtx_SET (gen_rtx_REG (cc_mode, CC_REGNUM),
6201 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
6202 /* We use insn_invalid_p here to add clobbers if required. */
6203 ret = insn_invalid_p (emit_insn (insn), false);
6204 gcc_assert (!ret);
6206 /* Emit SLB instruction pattern. */
6207 if (!register_operand (src, GET_MODE (dst)))
6208 src = force_reg (GET_MODE (dst), src);
6210 op_res = gen_rtx_MINUS (GET_MODE (dst),
6211 gen_rtx_MINUS (GET_MODE (dst), src, const0_rtx),
6212 gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
6213 gen_rtx_REG (cc_mode, CC_REGNUM),
6214 const0_rtx));
6215 p = rtvec_alloc (2);
6216 RTVEC_ELT (p, 0) =
6217 gen_rtx_SET (dst, op_res);
6218 RTVEC_ELT (p, 1) =
6219 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
6220 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
6222 return true;
6225 return false;
6228 /* Expand code for the insv template. Return true if successful. */
6230 bool
6231 s390_expand_insv (rtx dest, rtx op1, rtx op2, rtx src)
6233 int bitsize = INTVAL (op1);
6234 int bitpos = INTVAL (op2);
6235 machine_mode mode = GET_MODE (dest);
6236 machine_mode smode;
6237 int smode_bsize, mode_bsize;
6238 rtx op, clobber;
6240 if (bitsize + bitpos > GET_MODE_BITSIZE (mode))
6241 return false;
6243 /* Generate INSERT IMMEDIATE (IILL et al). */
6244 /* (set (ze (reg)) (const_int)). */
6245 if (TARGET_ZARCH
6246 && register_operand (dest, word_mode)
6247 && (bitpos % 16) == 0
6248 && (bitsize % 16) == 0
6249 && const_int_operand (src, VOIDmode))
6251 HOST_WIDE_INT val = INTVAL (src);
6252 int regpos = bitpos + bitsize;
6254 while (regpos > bitpos)
6256 machine_mode putmode;
6257 int putsize;
6259 if (TARGET_EXTIMM && (regpos % 32 == 0) && (regpos >= bitpos + 32))
6260 putmode = SImode;
6261 else
6262 putmode = HImode;
6264 putsize = GET_MODE_BITSIZE (putmode);
6265 regpos -= putsize;
6266 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
6267 GEN_INT (putsize),
6268 GEN_INT (regpos)),
6269 gen_int_mode (val, putmode));
6270 val >>= putsize;
6272 gcc_assert (regpos == bitpos);
6273 return true;
6276 smode = smallest_int_mode_for_size (bitsize);
6277 smode_bsize = GET_MODE_BITSIZE (smode);
6278 mode_bsize = GET_MODE_BITSIZE (mode);
6280 /* Generate STORE CHARACTERS UNDER MASK (STCM et al). */
6281 if (bitpos == 0
6282 && (bitsize % BITS_PER_UNIT) == 0
6283 && MEM_P (dest)
6284 && (register_operand (src, word_mode)
6285 || const_int_operand (src, VOIDmode)))
6287 /* Emit standard pattern if possible. */
6288 if (smode_bsize == bitsize)
6290 emit_move_insn (adjust_address (dest, smode, 0),
6291 gen_lowpart (smode, src));
6292 return true;
6295 /* (set (ze (mem)) (const_int)). */
6296 else if (const_int_operand (src, VOIDmode))
6298 int size = bitsize / BITS_PER_UNIT;
6299 rtx src_mem = adjust_address (force_const_mem (word_mode, src),
6300 BLKmode,
6301 UNITS_PER_WORD - size);
6303 dest = adjust_address (dest, BLKmode, 0);
6304 set_mem_size (dest, size);
6305 s390_expand_movmem (dest, src_mem, GEN_INT (size));
6306 return true;
6309 /* (set (ze (mem)) (reg)). */
6310 else if (register_operand (src, word_mode))
6312 if (bitsize <= 32)
6313 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, op1,
6314 const0_rtx), src);
6315 else
6317 /* Emit st,stcmh sequence. */
6318 int stcmh_width = bitsize - 32;
6319 int size = stcmh_width / BITS_PER_UNIT;
6321 emit_move_insn (adjust_address (dest, SImode, size),
6322 gen_lowpart (SImode, src));
6323 set_mem_size (dest, size);
6324 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
6325 GEN_INT (stcmh_width),
6326 const0_rtx),
6327 gen_rtx_LSHIFTRT (word_mode, src, GEN_INT (32)));
6329 return true;
6333 /* Generate INSERT CHARACTERS UNDER MASK (IC, ICM et al). */
6334 if ((bitpos % BITS_PER_UNIT) == 0
6335 && (bitsize % BITS_PER_UNIT) == 0
6336 && (bitpos & 32) == ((bitpos + bitsize - 1) & 32)
6337 && MEM_P (src)
6338 && (mode == DImode || mode == SImode)
6339 && register_operand (dest, mode))
6341 /* Emit a strict_low_part pattern if possible. */
6342 if (smode_bsize == bitsize && bitpos == mode_bsize - smode_bsize)
6344 op = gen_rtx_STRICT_LOW_PART (VOIDmode, gen_lowpart (smode, dest));
6345 op = gen_rtx_SET (op, gen_lowpart (smode, src));
6346 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
6347 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber)));
6348 return true;
6351 /* ??? There are more powerful versions of ICM that are not
6352 completely represented in the md file. */
6355 /* For z10, generate ROTATE THEN INSERT SELECTED BITS (RISBG et al). */
6356 if (TARGET_Z10 && (mode == DImode || mode == SImode))
6358 machine_mode mode_s = GET_MODE (src);
6360 if (CONSTANT_P (src))
6362 /* For constant zero values the representation with AND
6363 appears to be folded in more situations than the (set
6364 (zero_extract) ...).
6365 We only do this when the start and end of the bitfield
6366 remain in the same SImode chunk. That way nihf or nilf
6367 can be used.
6368 The AND patterns might still generate a risbg for this. */
6369 if (src == const0_rtx && bitpos / 32 == (bitpos + bitsize - 1) / 32)
6370 return false;
6371 else
6372 src = force_reg (mode, src);
6374 else if (mode_s != mode)
6376 gcc_assert (GET_MODE_BITSIZE (mode_s) >= bitsize);
6377 src = force_reg (mode_s, src);
6378 src = gen_lowpart (mode, src);
6381 op = gen_rtx_ZERO_EXTRACT (mode, dest, op1, op2),
6382 op = gen_rtx_SET (op, src);
6384 if (!TARGET_ZEC12)
6386 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
6387 op = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber));
6389 emit_insn (op);
6391 return true;
6394 return false;
6397 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic which returns a
6398 register that holds VAL of mode MODE shifted by COUNT bits. */
6400 static inline rtx
6401 s390_expand_mask_and_shift (rtx val, machine_mode mode, rtx count)
6403 val = expand_simple_binop (SImode, AND, val, GEN_INT (GET_MODE_MASK (mode)),
6404 NULL_RTX, 1, OPTAB_DIRECT);
6405 return expand_simple_binop (SImode, ASHIFT, val, count,
6406 NULL_RTX, 1, OPTAB_DIRECT);
6409 /* Generate a vector comparison COND of CMP_OP1 and CMP_OP2 and store
6410 the result in TARGET. */
6412 void
6413 s390_expand_vec_compare (rtx target, enum rtx_code cond,
6414 rtx cmp_op1, rtx cmp_op2)
6416 machine_mode mode = GET_MODE (target);
6417 bool neg_p = false, swap_p = false;
6418 rtx tmp;
6420 if (GET_MODE_CLASS (GET_MODE (cmp_op1)) == MODE_VECTOR_FLOAT)
6422 switch (cond)
6424 /* NE a != b -> !(a == b) */
6425 case NE: cond = EQ; neg_p = true; break;
6426 /* UNGT a u> b -> !(b >= a) */
6427 case UNGT: cond = GE; neg_p = true; swap_p = true; break;
6428 /* UNGE a u>= b -> !(b > a) */
6429 case UNGE: cond = GT; neg_p = true; swap_p = true; break;
6430 /* LE: a <= b -> b >= a */
6431 case LE: cond = GE; swap_p = true; break;
6432 /* UNLE: a u<= b -> !(a > b) */
6433 case UNLE: cond = GT; neg_p = true; break;
6434 /* LT: a < b -> b > a */
6435 case LT: cond = GT; swap_p = true; break;
6436 /* UNLT: a u< b -> !(a >= b) */
6437 case UNLT: cond = GE; neg_p = true; break;
6438 case UNEQ:
6439 emit_insn (gen_vec_cmpuneq (target, cmp_op1, cmp_op2));
6440 return;
6441 case LTGT:
6442 emit_insn (gen_vec_cmpltgt (target, cmp_op1, cmp_op2));
6443 return;
6444 case ORDERED:
6445 emit_insn (gen_vec_ordered (target, cmp_op1, cmp_op2));
6446 return;
6447 case UNORDERED:
6448 emit_insn (gen_vec_unordered (target, cmp_op1, cmp_op2));
6449 return;
6450 default: break;
6453 else
6455 switch (cond)
6457 /* NE: a != b -> !(a == b) */
6458 case NE: cond = EQ; neg_p = true; break;
6459 /* GE: a >= b -> !(b > a) */
6460 case GE: cond = GT; neg_p = true; swap_p = true; break;
6461 /* GEU: a >= b -> !(b > a) */
6462 case GEU: cond = GTU; neg_p = true; swap_p = true; break;
6463 /* LE: a <= b -> !(a > b) */
6464 case LE: cond = GT; neg_p = true; break;
6465 /* LEU: a <= b -> !(a > b) */
6466 case LEU: cond = GTU; neg_p = true; break;
6467 /* LT: a < b -> b > a */
6468 case LT: cond = GT; swap_p = true; break;
6469 /* LTU: a < b -> b > a */
6470 case LTU: cond = GTU; swap_p = true; break;
6471 default: break;
6475 if (swap_p)
6477 tmp = cmp_op1; cmp_op1 = cmp_op2; cmp_op2 = tmp;
6480 emit_insn (gen_rtx_SET (target, gen_rtx_fmt_ee (cond,
6481 mode,
6482 cmp_op1, cmp_op2)));
6483 if (neg_p)
6484 emit_insn (gen_rtx_SET (target, gen_rtx_NOT (mode, target)));
6487 /* Expand the comparison CODE of CMP1 and CMP2 and copy 1 or 0 into
6488 TARGET if either all (ALL_P is true) or any (ALL_P is false) of the
6489 elements in CMP1 and CMP2 fulfill the comparison.
6490 This function is only used to emit patterns for the vx builtins and
6491 therefore only handles comparison codes required by the
6492 builtins. */
6493 void
6494 s390_expand_vec_compare_cc (rtx target, enum rtx_code code,
6495 rtx cmp1, rtx cmp2, bool all_p)
6497 machine_mode cc_producer_mode, cc_consumer_mode, scratch_mode;
6498 rtx tmp_reg = gen_reg_rtx (SImode);
6499 bool swap_p = false;
6501 if (GET_MODE_CLASS (GET_MODE (cmp1)) == MODE_VECTOR_INT)
6503 switch (code)
6505 case EQ:
6506 case NE:
6507 cc_producer_mode = CCVEQmode;
6508 break;
6509 case GE:
6510 case LT:
6511 code = swap_condition (code);
6512 swap_p = true;
6513 /* fallthrough */
6514 case GT:
6515 case LE:
6516 cc_producer_mode = CCVIHmode;
6517 break;
6518 case GEU:
6519 case LTU:
6520 code = swap_condition (code);
6521 swap_p = true;
6522 /* fallthrough */
6523 case GTU:
6524 case LEU:
6525 cc_producer_mode = CCVIHUmode;
6526 break;
6527 default:
6528 gcc_unreachable ();
6531 scratch_mode = GET_MODE (cmp1);
6532 /* These codes represent inverted CC interpretations. Inverting
6533 an ALL CC mode results in an ANY CC mode and the other way
6534 around. Invert the all_p flag here to compensate for
6535 that. */
6536 if (code == NE || code == LE || code == LEU)
6537 all_p = !all_p;
6539 cc_consumer_mode = all_p ? CCVIALLmode : CCVIANYmode;
6541 else if (GET_MODE_CLASS (GET_MODE (cmp1)) == MODE_VECTOR_FLOAT)
6543 bool inv_p = false;
6545 switch (code)
6547 case EQ: cc_producer_mode = CCVEQmode; break;
6548 case NE: cc_producer_mode = CCVEQmode; inv_p = true; break;
6549 case GT: cc_producer_mode = CCVFHmode; break;
6550 case GE: cc_producer_mode = CCVFHEmode; break;
6551 case UNLE: cc_producer_mode = CCVFHmode; inv_p = true; break;
6552 case UNLT: cc_producer_mode = CCVFHEmode; inv_p = true; break;
6553 case LT: cc_producer_mode = CCVFHmode; code = GT; swap_p = true; break;
6554 case LE: cc_producer_mode = CCVFHEmode; code = GE; swap_p = true; break;
6555 default: gcc_unreachable ();
6557 scratch_mode = mode_for_int_vector (GET_MODE (cmp1)).require ();
6559 if (inv_p)
6560 all_p = !all_p;
6562 cc_consumer_mode = all_p ? CCVFALLmode : CCVFANYmode;
6564 else
6565 gcc_unreachable ();
6567 if (swap_p)
6569 rtx tmp = cmp2;
6570 cmp2 = cmp1;
6571 cmp1 = tmp;
6574 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6575 gen_rtvec (2, gen_rtx_SET (
6576 gen_rtx_REG (cc_producer_mode, CC_REGNUM),
6577 gen_rtx_COMPARE (cc_producer_mode, cmp1, cmp2)),
6578 gen_rtx_CLOBBER (VOIDmode,
6579 gen_rtx_SCRATCH (scratch_mode)))));
6580 emit_move_insn (target, const0_rtx);
6581 emit_move_insn (tmp_reg, const1_rtx);
6583 emit_move_insn (target,
6584 gen_rtx_IF_THEN_ELSE (SImode,
6585 gen_rtx_fmt_ee (code, VOIDmode,
6586 gen_rtx_REG (cc_consumer_mode, CC_REGNUM),
6587 const0_rtx),
6588 tmp_reg, target));
6591 /* Invert the comparison CODE applied to a CC mode. This is only safe
6592 if we know whether there result was created by a floating point
6593 compare or not. For the CCV modes this is encoded as part of the
6594 mode. */
6595 enum rtx_code
6596 s390_reverse_condition (machine_mode mode, enum rtx_code code)
6598 /* Reversal of FP compares takes care -- an ordered compare
6599 becomes an unordered compare and vice versa. */
6600 if (mode == CCVFALLmode || mode == CCVFANYmode)
6601 return reverse_condition_maybe_unordered (code);
6602 else if (mode == CCVIALLmode || mode == CCVIANYmode)
6603 return reverse_condition (code);
6604 else
6605 gcc_unreachable ();
6608 /* Generate a vector comparison expression loading either elements of
6609 THEN or ELS into TARGET depending on the comparison COND of CMP_OP1
6610 and CMP_OP2. */
6612 void
6613 s390_expand_vcond (rtx target, rtx then, rtx els,
6614 enum rtx_code cond, rtx cmp_op1, rtx cmp_op2)
6616 rtx tmp;
6617 machine_mode result_mode;
6618 rtx result_target;
6620 machine_mode target_mode = GET_MODE (target);
6621 machine_mode cmp_mode = GET_MODE (cmp_op1);
6622 rtx op = (cond == LT) ? els : then;
6624 /* Try to optimize x < 0 ? -1 : 0 into (signed) x >> 31
6625 and x < 0 ? 1 : 0 into (unsigned) x >> 31. Likewise
6626 for short and byte (x >> 15 and x >> 7 respectively). */
6627 if ((cond == LT || cond == GE)
6628 && target_mode == cmp_mode
6629 && cmp_op2 == CONST0_RTX (cmp_mode)
6630 && op == CONST0_RTX (target_mode)
6631 && s390_vector_mode_supported_p (target_mode)
6632 && GET_MODE_CLASS (target_mode) == MODE_VECTOR_INT)
6634 rtx negop = (cond == LT) ? then : els;
6636 int shift = GET_MODE_BITSIZE (GET_MODE_INNER (target_mode)) - 1;
6638 /* if x < 0 ? 1 : 0 or if x >= 0 ? 0 : 1 */
6639 if (negop == CONST1_RTX (target_mode))
6641 rtx res = expand_simple_binop (cmp_mode, LSHIFTRT, cmp_op1,
6642 GEN_INT (shift), target,
6643 1, OPTAB_DIRECT);
6644 if (res != target)
6645 emit_move_insn (target, res);
6646 return;
6649 /* if x < 0 ? -1 : 0 or if x >= 0 ? 0 : -1 */
6650 else if (all_ones_operand (negop, target_mode))
6652 rtx res = expand_simple_binop (cmp_mode, ASHIFTRT, cmp_op1,
6653 GEN_INT (shift), target,
6654 0, OPTAB_DIRECT);
6655 if (res != target)
6656 emit_move_insn (target, res);
6657 return;
6661 /* We always use an integral type vector to hold the comparison
6662 result. */
6663 result_mode = mode_for_int_vector (cmp_mode).require ();
6664 result_target = gen_reg_rtx (result_mode);
6666 /* We allow vector immediates as comparison operands that
6667 can be handled by the optimization above but not by the
6668 following code. Hence, force them into registers here. */
6669 if (!REG_P (cmp_op1))
6670 cmp_op1 = force_reg (GET_MODE (cmp_op1), cmp_op1);
6672 if (!REG_P (cmp_op2))
6673 cmp_op2 = force_reg (GET_MODE (cmp_op2), cmp_op2);
6675 s390_expand_vec_compare (result_target, cond,
6676 cmp_op1, cmp_op2);
6678 /* If the results are supposed to be either -1 or 0 we are done
6679 since this is what our compare instructions generate anyway. */
6680 if (all_ones_operand (then, GET_MODE (then))
6681 && const0_operand (els, GET_MODE (els)))
6683 emit_move_insn (target, gen_rtx_SUBREG (target_mode,
6684 result_target, 0));
6685 return;
6688 /* Otherwise we will do a vsel afterwards. */
6689 /* This gets triggered e.g.
6690 with gcc.c-torture/compile/pr53410-1.c */
6691 if (!REG_P (then))
6692 then = force_reg (target_mode, then);
6694 if (!REG_P (els))
6695 els = force_reg (target_mode, els);
6697 tmp = gen_rtx_fmt_ee (EQ, VOIDmode,
6698 result_target,
6699 CONST0_RTX (result_mode));
6701 /* We compared the result against zero above so we have to swap then
6702 and els here. */
6703 tmp = gen_rtx_IF_THEN_ELSE (target_mode, tmp, els, then);
6705 gcc_assert (target_mode == GET_MODE (then));
6706 emit_insn (gen_rtx_SET (target, tmp));
6709 /* Emit the RTX necessary to initialize the vector TARGET with values
6710 in VALS. */
6711 void
6712 s390_expand_vec_init (rtx target, rtx vals)
6714 machine_mode mode = GET_MODE (target);
6715 machine_mode inner_mode = GET_MODE_INNER (mode);
6716 int n_elts = GET_MODE_NUNITS (mode);
6717 bool all_same = true, all_regs = true, all_const_int = true;
6718 rtx x;
6719 int i;
6721 for (i = 0; i < n_elts; ++i)
6723 x = XVECEXP (vals, 0, i);
6725 if (!CONST_INT_P (x))
6726 all_const_int = false;
6728 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
6729 all_same = false;
6731 if (!REG_P (x))
6732 all_regs = false;
6735 /* Use vector gen mask or vector gen byte mask if possible. */
6736 if (all_same && all_const_int
6737 && (XVECEXP (vals, 0, 0) == const0_rtx
6738 || s390_contiguous_bitmask_vector_p (XVECEXP (vals, 0, 0),
6739 NULL, NULL)
6740 || s390_bytemask_vector_p (XVECEXP (vals, 0, 0), NULL)))
6742 emit_insn (gen_rtx_SET (target,
6743 gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0))));
6744 return;
6747 if (all_same)
6749 emit_insn (gen_rtx_SET (target,
6750 gen_rtx_VEC_DUPLICATE (mode,
6751 XVECEXP (vals, 0, 0))));
6752 return;
6755 if (all_regs
6756 && REG_P (target)
6757 && n_elts == 2
6758 && GET_MODE_SIZE (inner_mode) == 8)
6760 /* Use vector load pair. */
6761 emit_insn (gen_rtx_SET (target,
6762 gen_rtx_VEC_CONCAT (mode,
6763 XVECEXP (vals, 0, 0),
6764 XVECEXP (vals, 0, 1))));
6765 return;
6768 /* Use vector load logical element and zero. */
6769 if (TARGET_VXE && (mode == V4SImode || mode == V4SFmode))
6771 bool found = true;
6773 x = XVECEXP (vals, 0, 0);
6774 if (memory_operand (x, inner_mode))
6776 for (i = 1; i < n_elts; ++i)
6777 found = found && XVECEXP (vals, 0, i) == const0_rtx;
6779 if (found)
6781 machine_mode half_mode = (inner_mode == SFmode
6782 ? V2SFmode : V2SImode);
6783 emit_insn (gen_rtx_SET (target,
6784 gen_rtx_VEC_CONCAT (mode,
6785 gen_rtx_VEC_CONCAT (half_mode,
6787 const0_rtx),
6788 gen_rtx_VEC_CONCAT (half_mode,
6789 const0_rtx,
6790 const0_rtx))));
6791 return;
6796 /* We are about to set the vector elements one by one. Zero out the
6797 full register first in order to help the data flow framework to
6798 detect it as full VR set. */
6799 emit_insn (gen_rtx_SET (target, CONST0_RTX (mode)));
6801 /* Unfortunately the vec_init expander is not allowed to fail. So
6802 we have to implement the fallback ourselves. */
6803 for (i = 0; i < n_elts; i++)
6805 rtx elem = XVECEXP (vals, 0, i);
6806 if (!general_operand (elem, GET_MODE (elem)))
6807 elem = force_reg (inner_mode, elem);
6809 emit_insn (gen_rtx_SET (target,
6810 gen_rtx_UNSPEC (mode,
6811 gen_rtvec (3, elem,
6812 GEN_INT (i), target),
6813 UNSPEC_VEC_SET)));
6817 /* Structure to hold the initial parameters for a compare_and_swap operation
6818 in HImode and QImode. */
6820 struct alignment_context
6822 rtx memsi; /* SI aligned memory location. */
6823 rtx shift; /* Bit offset with regard to lsb. */
6824 rtx modemask; /* Mask of the HQImode shifted by SHIFT bits. */
6825 rtx modemaski; /* ~modemask */
6826 bool aligned; /* True if memory is aligned, false else. */
6829 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic to initialize
6830 structure AC for transparent simplifying, if the memory alignment is known
6831 to be at least 32bit. MEM is the memory location for the actual operation
6832 and MODE its mode. */
6834 static void
6835 init_alignment_context (struct alignment_context *ac, rtx mem,
6836 machine_mode mode)
6838 ac->shift = GEN_INT (GET_MODE_SIZE (SImode) - GET_MODE_SIZE (mode));
6839 ac->aligned = (MEM_ALIGN (mem) >= GET_MODE_BITSIZE (SImode));
6841 if (ac->aligned)
6842 ac->memsi = adjust_address (mem, SImode, 0); /* Memory is aligned. */
6843 else
6845 /* Alignment is unknown. */
6846 rtx byteoffset, addr, align;
6848 /* Force the address into a register. */
6849 addr = force_reg (Pmode, XEXP (mem, 0));
6851 /* Align it to SImode. */
6852 align = expand_simple_binop (Pmode, AND, addr,
6853 GEN_INT (-GET_MODE_SIZE (SImode)),
6854 NULL_RTX, 1, OPTAB_DIRECT);
6855 /* Generate MEM. */
6856 ac->memsi = gen_rtx_MEM (SImode, align);
6857 MEM_VOLATILE_P (ac->memsi) = MEM_VOLATILE_P (mem);
6858 set_mem_alias_set (ac->memsi, ALIAS_SET_MEMORY_BARRIER);
6859 set_mem_align (ac->memsi, GET_MODE_BITSIZE (SImode));
6861 /* Calculate shiftcount. */
6862 byteoffset = expand_simple_binop (Pmode, AND, addr,
6863 GEN_INT (GET_MODE_SIZE (SImode) - 1),
6864 NULL_RTX, 1, OPTAB_DIRECT);
6865 /* As we already have some offset, evaluate the remaining distance. */
6866 ac->shift = expand_simple_binop (SImode, MINUS, ac->shift, byteoffset,
6867 NULL_RTX, 1, OPTAB_DIRECT);
6870 /* Shift is the byte count, but we need the bitcount. */
6871 ac->shift = expand_simple_binop (SImode, ASHIFT, ac->shift, GEN_INT (3),
6872 NULL_RTX, 1, OPTAB_DIRECT);
6874 /* Calculate masks. */
6875 ac->modemask = expand_simple_binop (SImode, ASHIFT,
6876 GEN_INT (GET_MODE_MASK (mode)),
6877 ac->shift, NULL_RTX, 1, OPTAB_DIRECT);
6878 ac->modemaski = expand_simple_unop (SImode, NOT, ac->modemask,
6879 NULL_RTX, 1);
6882 /* A subroutine of s390_expand_cs_hqi. Insert INS into VAL. If possible,
6883 use a single insv insn into SEQ2. Otherwise, put prep insns in SEQ1 and
6884 perform the merge in SEQ2. */
6886 static rtx
6887 s390_two_part_insv (struct alignment_context *ac, rtx *seq1, rtx *seq2,
6888 machine_mode mode, rtx val, rtx ins)
6890 rtx tmp;
6892 if (ac->aligned)
6894 start_sequence ();
6895 tmp = copy_to_mode_reg (SImode, val);
6896 if (s390_expand_insv (tmp, GEN_INT (GET_MODE_BITSIZE (mode)),
6897 const0_rtx, ins))
6899 *seq1 = NULL;
6900 *seq2 = get_insns ();
6901 end_sequence ();
6902 return tmp;
6904 end_sequence ();
6907 /* Failed to use insv. Generate a two part shift and mask. */
6908 start_sequence ();
6909 tmp = s390_expand_mask_and_shift (ins, mode, ac->shift);
6910 *seq1 = get_insns ();
6911 end_sequence ();
6913 start_sequence ();
6914 tmp = expand_simple_binop (SImode, IOR, tmp, val, NULL_RTX, 1, OPTAB_DIRECT);
6915 *seq2 = get_insns ();
6916 end_sequence ();
6918 return tmp;
6921 /* Expand an atomic compare and swap operation for HImode and QImode. MEM is
6922 the memory location, CMP the old value to compare MEM with and NEW_RTX the
6923 value to set if CMP == MEM. */
6925 static void
6926 s390_expand_cs_hqi (machine_mode mode, rtx btarget, rtx vtarget, rtx mem,
6927 rtx cmp, rtx new_rtx, bool is_weak)
6929 struct alignment_context ac;
6930 rtx cmpv, newv, val, cc, seq0, seq1, seq2, seq3;
6931 rtx res = gen_reg_rtx (SImode);
6932 rtx_code_label *csloop = NULL, *csend = NULL;
6934 gcc_assert (MEM_P (mem));
6936 init_alignment_context (&ac, mem, mode);
6938 /* Load full word. Subsequent loads are performed by CS. */
6939 val = expand_simple_binop (SImode, AND, ac.memsi, ac.modemaski,
6940 NULL_RTX, 1, OPTAB_DIRECT);
6942 /* Prepare insertions of cmp and new_rtx into the loaded value. When
6943 possible, we try to use insv to make this happen efficiently. If
6944 that fails we'll generate code both inside and outside the loop. */
6945 cmpv = s390_two_part_insv (&ac, &seq0, &seq2, mode, val, cmp);
6946 newv = s390_two_part_insv (&ac, &seq1, &seq3, mode, val, new_rtx);
6948 if (seq0)
6949 emit_insn (seq0);
6950 if (seq1)
6951 emit_insn (seq1);
6953 /* Start CS loop. */
6954 if (!is_weak)
6956 /* Begin assuming success. */
6957 emit_move_insn (btarget, const1_rtx);
6959 csloop = gen_label_rtx ();
6960 csend = gen_label_rtx ();
6961 emit_label (csloop);
6964 /* val = "<mem>00..0<mem>"
6965 * cmp = "00..0<cmp>00..0"
6966 * new = "00..0<new>00..0"
6969 emit_insn (seq2);
6970 emit_insn (seq3);
6972 cc = s390_emit_compare_and_swap (EQ, res, ac.memsi, cmpv, newv, CCZ1mode);
6973 if (is_weak)
6974 emit_insn (gen_cstorecc4 (btarget, cc, XEXP (cc, 0), XEXP (cc, 1)));
6975 else
6977 rtx tmp;
6979 /* Jump to end if we're done (likely?). */
6980 s390_emit_jump (csend, cc);
6982 /* Check for changes outside mode, and loop internal if so.
6983 Arrange the moves so that the compare is adjacent to the
6984 branch so that we can generate CRJ. */
6985 tmp = copy_to_reg (val);
6986 force_expand_binop (SImode, and_optab, res, ac.modemaski, val,
6987 1, OPTAB_DIRECT);
6988 cc = s390_emit_compare (NE, val, tmp);
6989 s390_emit_jump (csloop, cc);
6991 /* Failed. */
6992 emit_move_insn (btarget, const0_rtx);
6993 emit_label (csend);
6996 /* Return the correct part of the bitfield. */
6997 convert_move (vtarget, expand_simple_binop (SImode, LSHIFTRT, res, ac.shift,
6998 NULL_RTX, 1, OPTAB_DIRECT), 1);
7001 /* Variant of s390_expand_cs for SI, DI and TI modes. */
7002 static void
7003 s390_expand_cs_tdsi (machine_mode mode, rtx btarget, rtx vtarget, rtx mem,
7004 rtx cmp, rtx new_rtx, bool is_weak)
7006 rtx output = vtarget;
7007 rtx_code_label *skip_cs_label = NULL;
7008 bool do_const_opt = false;
7010 if (!register_operand (output, mode))
7011 output = gen_reg_rtx (mode);
7013 /* If IS_WEAK is true and the INPUT value is a constant, compare the memory
7014 with the constant first and skip the compare_and_swap because its very
7015 expensive and likely to fail anyway.
7016 Note 1: This is done only for IS_WEAK. C11 allows optimizations that may
7017 cause spurious in that case.
7018 Note 2: It may be useful to do this also for non-constant INPUT.
7019 Note 3: Currently only targets with "load on condition" are supported
7020 (z196 and newer). */
7022 if (TARGET_Z196
7023 && (mode == SImode || mode == DImode))
7024 do_const_opt = (is_weak && CONST_INT_P (cmp));
7026 if (do_const_opt)
7028 rtx cc = gen_rtx_REG (CCZmode, CC_REGNUM);
7030 skip_cs_label = gen_label_rtx ();
7031 emit_move_insn (btarget, const0_rtx);
7032 if (CONST_INT_P (cmp) && INTVAL (cmp) == 0)
7034 rtvec lt = rtvec_alloc (2);
7036 /* Load-and-test + conditional jump. */
7037 RTVEC_ELT (lt, 0)
7038 = gen_rtx_SET (cc, gen_rtx_COMPARE (CCZmode, mem, cmp));
7039 RTVEC_ELT (lt, 1) = gen_rtx_SET (output, mem);
7040 emit_insn (gen_rtx_PARALLEL (VOIDmode, lt));
7042 else
7044 emit_move_insn (output, mem);
7045 emit_insn (gen_rtx_SET (cc, gen_rtx_COMPARE (CCZmode, output, cmp)));
7047 s390_emit_jump (skip_cs_label, gen_rtx_NE (VOIDmode, cc, const0_rtx));
7048 add_reg_br_prob_note (get_last_insn (),
7049 profile_probability::very_unlikely ());
7050 /* If the jump is not taken, OUTPUT is the expected value. */
7051 cmp = output;
7052 /* Reload newval to a register manually, *after* the compare and jump
7053 above. Otherwise Reload might place it before the jump. */
7055 else
7056 cmp = force_reg (mode, cmp);
7057 new_rtx = force_reg (mode, new_rtx);
7058 s390_emit_compare_and_swap (EQ, output, mem, cmp, new_rtx,
7059 (do_const_opt) ? CCZmode : CCZ1mode);
7060 if (skip_cs_label != NULL)
7061 emit_label (skip_cs_label);
7063 /* We deliberately accept non-register operands in the predicate
7064 to ensure the write back to the output operand happens *before*
7065 the store-flags code below. This makes it easier for combine
7066 to merge the store-flags code with a potential test-and-branch
7067 pattern following (immediately!) afterwards. */
7068 if (output != vtarget)
7069 emit_move_insn (vtarget, output);
7071 if (do_const_opt)
7073 rtx cc, cond, ite;
7075 /* Do not use gen_cstorecc4 here because it writes either 1 or 0, but
7076 btarget has already been initialized with 0 above. */
7077 cc = gen_rtx_REG (CCZmode, CC_REGNUM);
7078 cond = gen_rtx_EQ (VOIDmode, cc, const0_rtx);
7079 ite = gen_rtx_IF_THEN_ELSE (SImode, cond, const1_rtx, btarget);
7080 emit_insn (gen_rtx_SET (btarget, ite));
7082 else
7084 rtx cc, cond;
7086 cc = gen_rtx_REG (CCZ1mode, CC_REGNUM);
7087 cond = gen_rtx_EQ (SImode, cc, const0_rtx);
7088 emit_insn (gen_cstorecc4 (btarget, cond, cc, const0_rtx));
7092 /* Expand an atomic compare and swap operation. MEM is the memory location,
7093 CMP the old value to compare MEM with and NEW_RTX the value to set if
7094 CMP == MEM. */
7096 void
7097 s390_expand_cs (machine_mode mode, rtx btarget, rtx vtarget, rtx mem,
7098 rtx cmp, rtx new_rtx, bool is_weak)
7100 switch (mode)
7102 case E_TImode:
7103 case E_DImode:
7104 case E_SImode:
7105 s390_expand_cs_tdsi (mode, btarget, vtarget, mem, cmp, new_rtx, is_weak);
7106 break;
7107 case E_HImode:
7108 case E_QImode:
7109 s390_expand_cs_hqi (mode, btarget, vtarget, mem, cmp, new_rtx, is_weak);
7110 break;
7111 default:
7112 gcc_unreachable ();
7116 /* Expand an atomic_exchange operation simulated with a compare-and-swap loop.
7117 The memory location MEM is set to INPUT. OUTPUT is set to the previous value
7118 of MEM. */
7120 void
7121 s390_expand_atomic_exchange_tdsi (rtx output, rtx mem, rtx input)
7123 machine_mode mode = GET_MODE (mem);
7124 rtx_code_label *csloop;
7126 if (TARGET_Z196
7127 && (mode == DImode || mode == SImode)
7128 && CONST_INT_P (input) && INTVAL (input) == 0)
7130 emit_move_insn (output, const0_rtx);
7131 if (mode == DImode)
7132 emit_insn (gen_atomic_fetch_anddi (output, mem, const0_rtx, input));
7133 else
7134 emit_insn (gen_atomic_fetch_andsi (output, mem, const0_rtx, input));
7135 return;
7138 input = force_reg (mode, input);
7139 emit_move_insn (output, mem);
7140 csloop = gen_label_rtx ();
7141 emit_label (csloop);
7142 s390_emit_jump (csloop, s390_emit_compare_and_swap (NE, output, mem, output,
7143 input, CCZ1mode));
7146 /* Expand an atomic operation CODE of mode MODE. MEM is the memory location
7147 and VAL the value to play with. If AFTER is true then store the value
7148 MEM holds after the operation, if AFTER is false then store the value MEM
7149 holds before the operation. If TARGET is zero then discard that value, else
7150 store it to TARGET. */
7152 void
7153 s390_expand_atomic (machine_mode mode, enum rtx_code code,
7154 rtx target, rtx mem, rtx val, bool after)
7156 struct alignment_context ac;
7157 rtx cmp;
7158 rtx new_rtx = gen_reg_rtx (SImode);
7159 rtx orig = gen_reg_rtx (SImode);
7160 rtx_code_label *csloop = gen_label_rtx ();
7162 gcc_assert (!target || register_operand (target, VOIDmode));
7163 gcc_assert (MEM_P (mem));
7165 init_alignment_context (&ac, mem, mode);
7167 /* Shift val to the correct bit positions.
7168 Preserve "icm", but prevent "ex icm". */
7169 if (!(ac.aligned && code == SET && MEM_P (val)))
7170 val = s390_expand_mask_and_shift (val, mode, ac.shift);
7172 /* Further preparation insns. */
7173 if (code == PLUS || code == MINUS)
7174 emit_move_insn (orig, val);
7175 else if (code == MULT || code == AND) /* val = "11..1<val>11..1" */
7176 val = expand_simple_binop (SImode, XOR, val, ac.modemaski,
7177 NULL_RTX, 1, OPTAB_DIRECT);
7179 /* Load full word. Subsequent loads are performed by CS. */
7180 cmp = force_reg (SImode, ac.memsi);
7182 /* Start CS loop. */
7183 emit_label (csloop);
7184 emit_move_insn (new_rtx, cmp);
7186 /* Patch new with val at correct position. */
7187 switch (code)
7189 case PLUS:
7190 case MINUS:
7191 val = expand_simple_binop (SImode, code, new_rtx, orig,
7192 NULL_RTX, 1, OPTAB_DIRECT);
7193 val = expand_simple_binop (SImode, AND, val, ac.modemask,
7194 NULL_RTX, 1, OPTAB_DIRECT);
7195 /* FALLTHRU */
7196 case SET:
7197 if (ac.aligned && MEM_P (val))
7198 store_bit_field (new_rtx, GET_MODE_BITSIZE (mode), 0,
7199 0, 0, SImode, val, false);
7200 else
7202 new_rtx = expand_simple_binop (SImode, AND, new_rtx, ac.modemaski,
7203 NULL_RTX, 1, OPTAB_DIRECT);
7204 new_rtx = expand_simple_binop (SImode, IOR, new_rtx, val,
7205 NULL_RTX, 1, OPTAB_DIRECT);
7207 break;
7208 case AND:
7209 case IOR:
7210 case XOR:
7211 new_rtx = expand_simple_binop (SImode, code, new_rtx, val,
7212 NULL_RTX, 1, OPTAB_DIRECT);
7213 break;
7214 case MULT: /* NAND */
7215 new_rtx = expand_simple_binop (SImode, AND, new_rtx, val,
7216 NULL_RTX, 1, OPTAB_DIRECT);
7217 new_rtx = expand_simple_binop (SImode, XOR, new_rtx, ac.modemask,
7218 NULL_RTX, 1, OPTAB_DIRECT);
7219 break;
7220 default:
7221 gcc_unreachable ();
7224 s390_emit_jump (csloop, s390_emit_compare_and_swap (NE, cmp,
7225 ac.memsi, cmp, new_rtx,
7226 CCZ1mode));
7228 /* Return the correct part of the bitfield. */
7229 if (target)
7230 convert_move (target, expand_simple_binop (SImode, LSHIFTRT,
7231 after ? new_rtx : cmp, ac.shift,
7232 NULL_RTX, 1, OPTAB_DIRECT), 1);
7235 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
7236 We need to emit DTP-relative relocations. */
7238 static void s390_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
7240 static void
7241 s390_output_dwarf_dtprel (FILE *file, int size, rtx x)
7243 switch (size)
7245 case 4:
7246 fputs ("\t.long\t", file);
7247 break;
7248 case 8:
7249 fputs ("\t.quad\t", file);
7250 break;
7251 default:
7252 gcc_unreachable ();
7254 output_addr_const (file, x);
7255 fputs ("@DTPOFF", file);
7258 /* Return the proper mode for REGNO being represented in the dwarf
7259 unwind table. */
7260 machine_mode
7261 s390_dwarf_frame_reg_mode (int regno)
7263 machine_mode save_mode = default_dwarf_frame_reg_mode (regno);
7265 /* Make sure not to return DImode for any GPR with -m31 -mzarch. */
7266 if (GENERAL_REGNO_P (regno))
7267 save_mode = Pmode;
7269 /* The rightmost 64 bits of vector registers are call-clobbered. */
7270 if (GET_MODE_SIZE (save_mode) > 8)
7271 save_mode = DImode;
7273 return save_mode;
7276 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
7277 /* Implement TARGET_MANGLE_TYPE. */
7279 static const char *
7280 s390_mangle_type (const_tree type)
7282 type = TYPE_MAIN_VARIANT (type);
7284 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
7285 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
7286 return NULL;
7288 if (type == s390_builtin_types[BT_BV16QI]) return "U6__boolc";
7289 if (type == s390_builtin_types[BT_BV8HI]) return "U6__bools";
7290 if (type == s390_builtin_types[BT_BV4SI]) return "U6__booli";
7291 if (type == s390_builtin_types[BT_BV2DI]) return "U6__booll";
7293 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
7294 && TARGET_LONG_DOUBLE_128)
7295 return "g";
7297 /* For all other types, use normal C++ mangling. */
7298 return NULL;
7300 #endif
7302 /* In the name of slightly smaller debug output, and to cater to
7303 general assembler lossage, recognize various UNSPEC sequences
7304 and turn them back into a direct symbol reference. */
7306 static rtx
7307 s390_delegitimize_address (rtx orig_x)
7309 rtx x, y;
7311 orig_x = delegitimize_mem_from_attrs (orig_x);
7312 x = orig_x;
7314 /* Extract the symbol ref from:
7315 (plus:SI (reg:SI 12 %r12)
7316 (const:SI (unspec:SI [(symbol_ref/f:SI ("*.LC0"))]
7317 UNSPEC_GOTOFF/PLTOFF)))
7319 (plus:SI (reg:SI 12 %r12)
7320 (const:SI (plus:SI (unspec:SI [(symbol_ref:SI ("L"))]
7321 UNSPEC_GOTOFF/PLTOFF)
7322 (const_int 4 [0x4])))) */
7323 if (GET_CODE (x) == PLUS
7324 && REG_P (XEXP (x, 0))
7325 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM
7326 && GET_CODE (XEXP (x, 1)) == CONST)
7328 HOST_WIDE_INT offset = 0;
7330 /* The const operand. */
7331 y = XEXP (XEXP (x, 1), 0);
7333 if (GET_CODE (y) == PLUS
7334 && GET_CODE (XEXP (y, 1)) == CONST_INT)
7336 offset = INTVAL (XEXP (y, 1));
7337 y = XEXP (y, 0);
7340 if (GET_CODE (y) == UNSPEC
7341 && (XINT (y, 1) == UNSPEC_GOTOFF
7342 || XINT (y, 1) == UNSPEC_PLTOFF))
7343 return plus_constant (Pmode, XVECEXP (y, 0, 0), offset);
7346 if (GET_CODE (x) != MEM)
7347 return orig_x;
7349 x = XEXP (x, 0);
7350 if (GET_CODE (x) == PLUS
7351 && GET_CODE (XEXP (x, 1)) == CONST
7352 && GET_CODE (XEXP (x, 0)) == REG
7353 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
7355 y = XEXP (XEXP (x, 1), 0);
7356 if (GET_CODE (y) == UNSPEC
7357 && XINT (y, 1) == UNSPEC_GOT)
7358 y = XVECEXP (y, 0, 0);
7359 else
7360 return orig_x;
7362 else if (GET_CODE (x) == CONST)
7364 /* Extract the symbol ref from:
7365 (mem:QI (const:DI (unspec:DI [(symbol_ref:DI ("foo"))]
7366 UNSPEC_PLT/GOTENT))) */
7368 y = XEXP (x, 0);
7369 if (GET_CODE (y) == UNSPEC
7370 && (XINT (y, 1) == UNSPEC_GOTENT
7371 || XINT (y, 1) == UNSPEC_PLT))
7372 y = XVECEXP (y, 0, 0);
7373 else
7374 return orig_x;
7376 else
7377 return orig_x;
7379 if (GET_MODE (orig_x) != Pmode)
7381 if (GET_MODE (orig_x) == BLKmode)
7382 return orig_x;
7383 y = lowpart_subreg (GET_MODE (orig_x), y, Pmode);
7384 if (y == NULL_RTX)
7385 return orig_x;
7387 return y;
7390 /* Output operand OP to stdio stream FILE.
7391 OP is an address (register + offset) which is not used to address data;
7392 instead the rightmost bits are interpreted as the value. */
7394 static void
7395 print_addrstyle_operand (FILE *file, rtx op)
7397 HOST_WIDE_INT offset;
7398 rtx base;
7400 /* Extract base register and offset. */
7401 if (!s390_decompose_addrstyle_without_index (op, &base, &offset))
7402 gcc_unreachable ();
7404 /* Sanity check. */
7405 if (base)
7407 gcc_assert (GET_CODE (base) == REG);
7408 gcc_assert (REGNO (base) < FIRST_PSEUDO_REGISTER);
7409 gcc_assert (REGNO_REG_CLASS (REGNO (base)) == ADDR_REGS);
7412 /* Offsets are constricted to twelve bits. */
7413 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset & ((1 << 12) - 1));
7414 if (base)
7415 fprintf (file, "(%s)", reg_names[REGNO (base)]);
7418 /* Assigns the number of NOP halfwords to be emitted before and after the
7419 function label to *HW_BEFORE and *HW_AFTER. Both pointers must not be NULL.
7420 If hotpatching is disabled for the function, the values are set to zero.
7423 static void
7424 s390_function_num_hotpatch_hw (tree decl,
7425 int *hw_before,
7426 int *hw_after)
7428 tree attr;
7430 attr = lookup_attribute ("hotpatch", DECL_ATTRIBUTES (decl));
7432 /* Handle the arguments of the hotpatch attribute. The values
7433 specified via attribute might override the cmdline argument
7434 values. */
7435 if (attr)
7437 tree args = TREE_VALUE (attr);
7439 *hw_before = TREE_INT_CST_LOW (TREE_VALUE (args));
7440 *hw_after = TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (args)));
7442 else
7444 /* Use the values specified by the cmdline arguments. */
7445 *hw_before = s390_hotpatch_hw_before_label;
7446 *hw_after = s390_hotpatch_hw_after_label;
7450 /* Write the current .machine and .machinemode specification to the assembler
7451 file. */
7453 #ifdef HAVE_AS_MACHINE_MACHINEMODE
7454 static void
7455 s390_asm_output_machine_for_arch (FILE *asm_out_file)
7457 fprintf (asm_out_file, "\t.machinemode %s\n",
7458 (TARGET_ZARCH) ? "zarch" : "esa");
7459 fprintf (asm_out_file, "\t.machine \"%s",
7460 processor_table[s390_arch].binutils_name);
7461 if (S390_USE_ARCHITECTURE_MODIFIERS)
7463 int cpu_flags;
7465 cpu_flags = processor_flags_table[(int) s390_arch];
7466 if (TARGET_HTM && !(cpu_flags & PF_TX))
7467 fprintf (asm_out_file, "+htm");
7468 else if (!TARGET_HTM && (cpu_flags & PF_TX))
7469 fprintf (asm_out_file, "+nohtm");
7470 if (TARGET_VX && !(cpu_flags & PF_VX))
7471 fprintf (asm_out_file, "+vx");
7472 else if (!TARGET_VX && (cpu_flags & PF_VX))
7473 fprintf (asm_out_file, "+novx");
7475 fprintf (asm_out_file, "\"\n");
7478 /* Write an extra function header before the very start of the function. */
7480 void
7481 s390_asm_output_function_prefix (FILE *asm_out_file,
7482 const char *fnname ATTRIBUTE_UNUSED)
7484 if (DECL_FUNCTION_SPECIFIC_TARGET (current_function_decl) == NULL)
7485 return;
7486 /* Since only the function specific options are saved but not the indications
7487 which options are set, it's too much work here to figure out which options
7488 have actually changed. Thus, generate .machine and .machinemode whenever a
7489 function has the target attribute or pragma. */
7490 fprintf (asm_out_file, "\t.machinemode push\n");
7491 fprintf (asm_out_file, "\t.machine push\n");
7492 s390_asm_output_machine_for_arch (asm_out_file);
7495 /* Write an extra function footer after the very end of the function. */
7497 void
7498 s390_asm_declare_function_size (FILE *asm_out_file,
7499 const char *fnname, tree decl)
7501 if (!flag_inhibit_size_directive)
7502 ASM_OUTPUT_MEASURED_SIZE (asm_out_file, fnname);
7503 if (DECL_FUNCTION_SPECIFIC_TARGET (decl) == NULL)
7504 return;
7505 fprintf (asm_out_file, "\t.machine pop\n");
7506 fprintf (asm_out_file, "\t.machinemode pop\n");
7508 #endif
7510 /* Write the extra assembler code needed to declare a function properly. */
7512 void
7513 s390_asm_output_function_label (FILE *asm_out_file, const char *fname,
7514 tree decl)
7516 int hw_before, hw_after;
7518 s390_function_num_hotpatch_hw (decl, &hw_before, &hw_after);
7519 if (hw_before > 0)
7521 unsigned int function_alignment;
7522 int i;
7524 /* Add a trampoline code area before the function label and initialize it
7525 with two-byte nop instructions. This area can be overwritten with code
7526 that jumps to a patched version of the function. */
7527 asm_fprintf (asm_out_file, "\tnopr\t%%r0"
7528 "\t# pre-label NOPs for hotpatch (%d halfwords)\n",
7529 hw_before);
7530 for (i = 1; i < hw_before; i++)
7531 fputs ("\tnopr\t%r0\n", asm_out_file);
7533 /* Note: The function label must be aligned so that (a) the bytes of the
7534 following nop do not cross a cacheline boundary, and (b) a jump address
7535 (eight bytes for 64 bit targets, 4 bytes for 32 bit targets) can be
7536 stored directly before the label without crossing a cacheline
7537 boundary. All this is necessary to make sure the trampoline code can
7538 be changed atomically.
7539 This alignment is done automatically using the FOUNCTION_BOUNDARY, but
7540 if there are NOPs before the function label, the alignment is placed
7541 before them. So it is necessary to duplicate the alignment after the
7542 NOPs. */
7543 function_alignment = MAX (8, DECL_ALIGN (decl) / BITS_PER_UNIT);
7544 if (! DECL_USER_ALIGN (decl))
7545 function_alignment = MAX (function_alignment,
7546 (unsigned int) align_functions_max_skip + 1);
7547 fputs ("\t# alignment for hotpatch\n", asm_out_file);
7548 ASM_OUTPUT_ALIGN (asm_out_file, align_functions_log);
7551 if (S390_USE_TARGET_ATTRIBUTE && TARGET_DEBUG_ARG)
7553 asm_fprintf (asm_out_file, "\t# fn:%s ar%d\n", fname, s390_arch);
7554 asm_fprintf (asm_out_file, "\t# fn:%s tu%d\n", fname, s390_tune);
7555 asm_fprintf (asm_out_file, "\t# fn:%s sg%d\n", fname, s390_stack_guard);
7556 asm_fprintf (asm_out_file, "\t# fn:%s ss%d\n", fname, s390_stack_size);
7557 asm_fprintf (asm_out_file, "\t# fn:%s bc%d\n", fname, s390_branch_cost);
7558 asm_fprintf (asm_out_file, "\t# fn:%s wf%d\n", fname,
7559 s390_warn_framesize);
7560 asm_fprintf (asm_out_file, "\t# fn:%s ba%d\n", fname, TARGET_BACKCHAIN);
7561 asm_fprintf (asm_out_file, "\t# fn:%s hd%d\n", fname, TARGET_HARD_DFP);
7562 asm_fprintf (asm_out_file, "\t# fn:%s hf%d\n", fname, !TARGET_SOFT_FLOAT);
7563 asm_fprintf (asm_out_file, "\t# fn:%s ht%d\n", fname, TARGET_OPT_HTM);
7564 asm_fprintf (asm_out_file, "\t# fn:%s vx%d\n", fname, TARGET_OPT_VX);
7565 asm_fprintf (asm_out_file, "\t# fn:%s ps%d\n", fname,
7566 TARGET_PACKED_STACK);
7567 asm_fprintf (asm_out_file, "\t# fn:%s se%d\n", fname, TARGET_SMALL_EXEC);
7568 asm_fprintf (asm_out_file, "\t# fn:%s mv%d\n", fname, TARGET_MVCLE);
7569 asm_fprintf (asm_out_file, "\t# fn:%s zv%d\n", fname, TARGET_ZVECTOR);
7570 asm_fprintf (asm_out_file, "\t# fn:%s wd%d\n", fname,
7571 s390_warn_dynamicstack_p);
7573 ASM_OUTPUT_LABEL (asm_out_file, fname);
7574 if (hw_after > 0)
7575 asm_fprintf (asm_out_file,
7576 "\t# post-label NOPs for hotpatch (%d halfwords)\n",
7577 hw_after);
7580 /* Output machine-dependent UNSPECs occurring in address constant X
7581 in assembler syntax to stdio stream FILE. Returns true if the
7582 constant X could be recognized, false otherwise. */
7584 static bool
7585 s390_output_addr_const_extra (FILE *file, rtx x)
7587 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 1)
7588 switch (XINT (x, 1))
7590 case UNSPEC_GOTENT:
7591 output_addr_const (file, XVECEXP (x, 0, 0));
7592 fprintf (file, "@GOTENT");
7593 return true;
7594 case UNSPEC_GOT:
7595 output_addr_const (file, XVECEXP (x, 0, 0));
7596 fprintf (file, "@GOT");
7597 return true;
7598 case UNSPEC_GOTOFF:
7599 output_addr_const (file, XVECEXP (x, 0, 0));
7600 fprintf (file, "@GOTOFF");
7601 return true;
7602 case UNSPEC_PLT:
7603 output_addr_const (file, XVECEXP (x, 0, 0));
7604 fprintf (file, "@PLT");
7605 return true;
7606 case UNSPEC_PLTOFF:
7607 output_addr_const (file, XVECEXP (x, 0, 0));
7608 fprintf (file, "@PLTOFF");
7609 return true;
7610 case UNSPEC_TLSGD:
7611 output_addr_const (file, XVECEXP (x, 0, 0));
7612 fprintf (file, "@TLSGD");
7613 return true;
7614 case UNSPEC_TLSLDM:
7615 assemble_name (file, get_some_local_dynamic_name ());
7616 fprintf (file, "@TLSLDM");
7617 return true;
7618 case UNSPEC_DTPOFF:
7619 output_addr_const (file, XVECEXP (x, 0, 0));
7620 fprintf (file, "@DTPOFF");
7621 return true;
7622 case UNSPEC_NTPOFF:
7623 output_addr_const (file, XVECEXP (x, 0, 0));
7624 fprintf (file, "@NTPOFF");
7625 return true;
7626 case UNSPEC_GOTNTPOFF:
7627 output_addr_const (file, XVECEXP (x, 0, 0));
7628 fprintf (file, "@GOTNTPOFF");
7629 return true;
7630 case UNSPEC_INDNTPOFF:
7631 output_addr_const (file, XVECEXP (x, 0, 0));
7632 fprintf (file, "@INDNTPOFF");
7633 return true;
7636 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 2)
7637 switch (XINT (x, 1))
7639 case UNSPEC_POOL_OFFSET:
7640 x = gen_rtx_MINUS (GET_MODE (x), XVECEXP (x, 0, 0), XVECEXP (x, 0, 1));
7641 output_addr_const (file, x);
7642 return true;
7644 return false;
7647 /* Output address operand ADDR in assembler syntax to
7648 stdio stream FILE. */
7650 void
7651 print_operand_address (FILE *file, rtx addr)
7653 struct s390_address ad;
7654 memset (&ad, 0, sizeof (s390_address));
7656 if (s390_loadrelative_operand_p (addr, NULL, NULL))
7658 if (!TARGET_Z10)
7660 output_operand_lossage ("symbolic memory references are "
7661 "only supported on z10 or later");
7662 return;
7664 output_addr_const (file, addr);
7665 return;
7668 if (!s390_decompose_address (addr, &ad)
7669 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7670 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
7671 output_operand_lossage ("cannot decompose address");
7673 if (ad.disp)
7674 output_addr_const (file, ad.disp);
7675 else
7676 fprintf (file, "0");
7678 if (ad.base && ad.indx)
7679 fprintf (file, "(%s,%s)", reg_names[REGNO (ad.indx)],
7680 reg_names[REGNO (ad.base)]);
7681 else if (ad.base)
7682 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
7685 /* Output operand X in assembler syntax to stdio stream FILE.
7686 CODE specified the format flag. The following format flags
7687 are recognized:
7689 'C': print opcode suffix for branch condition.
7690 'D': print opcode suffix for inverse branch condition.
7691 'E': print opcode suffix for branch on index instruction.
7692 'G': print the size of the operand in bytes.
7693 'J': print tls_load/tls_gdcall/tls_ldcall suffix
7694 'M': print the second word of a TImode operand.
7695 'N': print the second word of a DImode operand.
7696 'O': print only the displacement of a memory reference or address.
7697 'R': print only the base register of a memory reference or address.
7698 'S': print S-type memory reference (base+displacement).
7699 'Y': print address style operand without index (e.g. shift count or setmem
7700 operand).
7702 'b': print integer X as if it's an unsigned byte.
7703 'c': print integer X as if it's an signed byte.
7704 'e': "end" contiguous bitmask X in either DImode or vector inner mode.
7705 'f': "end" contiguous bitmask X in SImode.
7706 'h': print integer X as if it's a signed halfword.
7707 'i': print the first nonzero HImode part of X.
7708 'j': print the first HImode part unequal to -1 of X.
7709 'k': print the first nonzero SImode part of X.
7710 'm': print the first SImode part unequal to -1 of X.
7711 'o': print integer X as if it's an unsigned 32bit word.
7712 's': "start" of contiguous bitmask X in either DImode or vector inner mode.
7713 't': CONST_INT: "start" of contiguous bitmask X in SImode.
7714 CONST_VECTOR: Generate a bitmask for vgbm instruction.
7715 'x': print integer X as if it's an unsigned halfword.
7716 'v': print register number as vector register (v1 instead of f1).
7719 void
7720 print_operand (FILE *file, rtx x, int code)
7722 HOST_WIDE_INT ival;
7724 switch (code)
7726 case 'C':
7727 fprintf (file, s390_branch_condition_mnemonic (x, FALSE));
7728 return;
7730 case 'D':
7731 fprintf (file, s390_branch_condition_mnemonic (x, TRUE));
7732 return;
7734 case 'E':
7735 if (GET_CODE (x) == LE)
7736 fprintf (file, "l");
7737 else if (GET_CODE (x) == GT)
7738 fprintf (file, "h");
7739 else
7740 output_operand_lossage ("invalid comparison operator "
7741 "for 'E' output modifier");
7742 return;
7744 case 'J':
7745 if (GET_CODE (x) == SYMBOL_REF)
7747 fprintf (file, "%s", ":tls_load:");
7748 output_addr_const (file, x);
7750 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
7752 fprintf (file, "%s", ":tls_gdcall:");
7753 output_addr_const (file, XVECEXP (x, 0, 0));
7755 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM)
7757 fprintf (file, "%s", ":tls_ldcall:");
7758 const char *name = get_some_local_dynamic_name ();
7759 gcc_assert (name);
7760 assemble_name (file, name);
7762 else
7763 output_operand_lossage ("invalid reference for 'J' output modifier");
7764 return;
7766 case 'G':
7767 fprintf (file, "%u", GET_MODE_SIZE (GET_MODE (x)));
7768 return;
7770 case 'O':
7772 struct s390_address ad;
7773 int ret;
7775 ret = s390_decompose_address (MEM_P (x) ? XEXP (x, 0) : x, &ad);
7777 if (!ret
7778 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7779 || ad.indx)
7781 output_operand_lossage ("invalid address for 'O' output modifier");
7782 return;
7785 if (ad.disp)
7786 output_addr_const (file, ad.disp);
7787 else
7788 fprintf (file, "0");
7790 return;
7792 case 'R':
7794 struct s390_address ad;
7795 int ret;
7797 ret = s390_decompose_address (MEM_P (x) ? XEXP (x, 0) : x, &ad);
7799 if (!ret
7800 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7801 || ad.indx)
7803 output_operand_lossage ("invalid address for 'R' output modifier");
7804 return;
7807 if (ad.base)
7808 fprintf (file, "%s", reg_names[REGNO (ad.base)]);
7809 else
7810 fprintf (file, "0");
7812 return;
7814 case 'S':
7816 struct s390_address ad;
7817 int ret;
7819 if (!MEM_P (x))
7821 output_operand_lossage ("memory reference expected for "
7822 "'S' output modifier");
7823 return;
7825 ret = s390_decompose_address (XEXP (x, 0), &ad);
7827 if (!ret
7828 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7829 || ad.indx)
7831 output_operand_lossage ("invalid address for 'S' output modifier");
7832 return;
7835 if (ad.disp)
7836 output_addr_const (file, ad.disp);
7837 else
7838 fprintf (file, "0");
7840 if (ad.base)
7841 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
7843 return;
7845 case 'N':
7846 if (GET_CODE (x) == REG)
7847 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
7848 else if (GET_CODE (x) == MEM)
7849 x = change_address (x, VOIDmode,
7850 plus_constant (Pmode, XEXP (x, 0), 4));
7851 else
7852 output_operand_lossage ("register or memory expression expected "
7853 "for 'N' output modifier");
7854 break;
7856 case 'M':
7857 if (GET_CODE (x) == REG)
7858 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
7859 else if (GET_CODE (x) == MEM)
7860 x = change_address (x, VOIDmode,
7861 plus_constant (Pmode, XEXP (x, 0), 8));
7862 else
7863 output_operand_lossage ("register or memory expression expected "
7864 "for 'M' output modifier");
7865 break;
7867 case 'Y':
7868 print_addrstyle_operand (file, x);
7869 return;
7872 switch (GET_CODE (x))
7874 case REG:
7875 /* Print FP regs as fx instead of vx when they are accessed
7876 through non-vector mode. */
7877 if (code == 'v'
7878 || VECTOR_NOFP_REG_P (x)
7879 || (FP_REG_P (x) && VECTOR_MODE_P (GET_MODE (x)))
7880 || (VECTOR_REG_P (x)
7881 && (GET_MODE_SIZE (GET_MODE (x)) /
7882 s390_class_max_nregs (FP_REGS, GET_MODE (x))) > 8))
7883 fprintf (file, "%%v%s", reg_names[REGNO (x)] + 2);
7884 else
7885 fprintf (file, "%s", reg_names[REGNO (x)]);
7886 break;
7888 case MEM:
7889 output_address (GET_MODE (x), XEXP (x, 0));
7890 break;
7892 case CONST:
7893 case CODE_LABEL:
7894 case LABEL_REF:
7895 case SYMBOL_REF:
7896 output_addr_const (file, x);
7897 break;
7899 case CONST_INT:
7900 ival = INTVAL (x);
7901 switch (code)
7903 case 0:
7904 break;
7905 case 'b':
7906 ival &= 0xff;
7907 break;
7908 case 'c':
7909 ival = ((ival & 0xff) ^ 0x80) - 0x80;
7910 break;
7911 case 'x':
7912 ival &= 0xffff;
7913 break;
7914 case 'h':
7915 ival = ((ival & 0xffff) ^ 0x8000) - 0x8000;
7916 break;
7917 case 'i':
7918 ival = s390_extract_part (x, HImode, 0);
7919 break;
7920 case 'j':
7921 ival = s390_extract_part (x, HImode, -1);
7922 break;
7923 case 'k':
7924 ival = s390_extract_part (x, SImode, 0);
7925 break;
7926 case 'm':
7927 ival = s390_extract_part (x, SImode, -1);
7928 break;
7929 case 'o':
7930 ival &= 0xffffffff;
7931 break;
7932 case 'e': case 'f':
7933 case 's': case 't':
7935 int start, end;
7936 int len;
7937 bool ok;
7939 len = (code == 's' || code == 'e' ? 64 : 32);
7940 ok = s390_contiguous_bitmask_p (ival, true, len, &start, &end);
7941 gcc_assert (ok);
7942 if (code == 's' || code == 't')
7943 ival = start;
7944 else
7945 ival = end;
7947 break;
7948 default:
7949 output_operand_lossage ("invalid constant for output modifier '%c'", code);
7951 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
7952 break;
7954 case CONST_WIDE_INT:
7955 if (code == 'b')
7956 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7957 CONST_WIDE_INT_ELT (x, 0) & 0xff);
7958 else if (code == 'x')
7959 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7960 CONST_WIDE_INT_ELT (x, 0) & 0xffff);
7961 else if (code == 'h')
7962 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7963 ((CONST_WIDE_INT_ELT (x, 0) & 0xffff) ^ 0x8000) - 0x8000);
7964 else
7966 if (code == 0)
7967 output_operand_lossage ("invalid constant - try using "
7968 "an output modifier");
7969 else
7970 output_operand_lossage ("invalid constant for output modifier '%c'",
7971 code);
7973 break;
7974 case CONST_VECTOR:
7975 switch (code)
7977 case 'h':
7978 gcc_assert (const_vec_duplicate_p (x));
7979 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7980 ((INTVAL (XVECEXP (x, 0, 0)) & 0xffff) ^ 0x8000) - 0x8000);
7981 break;
7982 case 'e':
7983 case 's':
7985 int start, end;
7986 bool ok;
7988 ok = s390_contiguous_bitmask_vector_p (x, &start, &end);
7989 gcc_assert (ok);
7990 ival = (code == 's') ? start : end;
7991 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
7993 break;
7994 case 't':
7996 unsigned mask;
7997 bool ok = s390_bytemask_vector_p (x, &mask);
7998 gcc_assert (ok);
7999 fprintf (file, "%u", mask);
8001 break;
8003 default:
8004 output_operand_lossage ("invalid constant vector for output "
8005 "modifier '%c'", code);
8007 break;
8009 default:
8010 if (code == 0)
8011 output_operand_lossage ("invalid expression - try using "
8012 "an output modifier");
8013 else
8014 output_operand_lossage ("invalid expression for output "
8015 "modifier '%c'", code);
8016 break;
8020 /* Target hook for assembling integer objects. We need to define it
8021 here to work a round a bug in some versions of GAS, which couldn't
8022 handle values smaller than INT_MIN when printed in decimal. */
8024 static bool
8025 s390_assemble_integer (rtx x, unsigned int size, int aligned_p)
8027 if (size == 8 && aligned_p
8028 && GET_CODE (x) == CONST_INT && INTVAL (x) < INT_MIN)
8030 fprintf (asm_out_file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n",
8031 INTVAL (x));
8032 return true;
8034 return default_assemble_integer (x, size, aligned_p);
8037 /* Returns true if register REGNO is used for forming
8038 a memory address in expression X. */
8040 static bool
8041 reg_used_in_mem_p (int regno, rtx x)
8043 enum rtx_code code = GET_CODE (x);
8044 int i, j;
8045 const char *fmt;
8047 if (code == MEM)
8049 if (refers_to_regno_p (regno, XEXP (x, 0)))
8050 return true;
8052 else if (code == SET
8053 && GET_CODE (SET_DEST (x)) == PC)
8055 if (refers_to_regno_p (regno, SET_SRC (x)))
8056 return true;
8059 fmt = GET_RTX_FORMAT (code);
8060 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8062 if (fmt[i] == 'e'
8063 && reg_used_in_mem_p (regno, XEXP (x, i)))
8064 return true;
8066 else if (fmt[i] == 'E')
8067 for (j = 0; j < XVECLEN (x, i); j++)
8068 if (reg_used_in_mem_p (regno, XVECEXP (x, i, j)))
8069 return true;
8071 return false;
8074 /* Returns true if expression DEP_RTX sets an address register
8075 used by instruction INSN to address memory. */
8077 static bool
8078 addr_generation_dependency_p (rtx dep_rtx, rtx_insn *insn)
8080 rtx target, pat;
8082 if (NONJUMP_INSN_P (dep_rtx))
8083 dep_rtx = PATTERN (dep_rtx);
8085 if (GET_CODE (dep_rtx) == SET)
8087 target = SET_DEST (dep_rtx);
8088 if (GET_CODE (target) == STRICT_LOW_PART)
8089 target = XEXP (target, 0);
8090 while (GET_CODE (target) == SUBREG)
8091 target = SUBREG_REG (target);
8093 if (GET_CODE (target) == REG)
8095 int regno = REGNO (target);
8097 if (s390_safe_attr_type (insn) == TYPE_LA)
8099 pat = PATTERN (insn);
8100 if (GET_CODE (pat) == PARALLEL)
8102 gcc_assert (XVECLEN (pat, 0) == 2);
8103 pat = XVECEXP (pat, 0, 0);
8105 gcc_assert (GET_CODE (pat) == SET);
8106 return refers_to_regno_p (regno, SET_SRC (pat));
8108 else if (get_attr_atype (insn) == ATYPE_AGEN)
8109 return reg_used_in_mem_p (regno, PATTERN (insn));
8112 return false;
8115 /* Return 1, if dep_insn sets register used in insn in the agen unit. */
8118 s390_agen_dep_p (rtx_insn *dep_insn, rtx_insn *insn)
8120 rtx dep_rtx = PATTERN (dep_insn);
8121 int i;
8123 if (GET_CODE (dep_rtx) == SET
8124 && addr_generation_dependency_p (dep_rtx, insn))
8125 return 1;
8126 else if (GET_CODE (dep_rtx) == PARALLEL)
8128 for (i = 0; i < XVECLEN (dep_rtx, 0); i++)
8130 if (addr_generation_dependency_p (XVECEXP (dep_rtx, 0, i), insn))
8131 return 1;
8134 return 0;
8138 /* A C statement (sans semicolon) to update the integer scheduling priority
8139 INSN_PRIORITY (INSN). Increase the priority to execute the INSN earlier,
8140 reduce the priority to execute INSN later. Do not define this macro if
8141 you do not need to adjust the scheduling priorities of insns.
8143 A STD instruction should be scheduled earlier,
8144 in order to use the bypass. */
8145 static int
8146 s390_adjust_priority (rtx_insn *insn, int priority)
8148 if (! INSN_P (insn))
8149 return priority;
8151 if (s390_tune <= PROCESSOR_2064_Z900)
8152 return priority;
8154 switch (s390_safe_attr_type (insn))
8156 case TYPE_FSTOREDF:
8157 case TYPE_FSTORESF:
8158 priority = priority << 3;
8159 break;
8160 case TYPE_STORE:
8161 case TYPE_STM:
8162 priority = priority << 1;
8163 break;
8164 default:
8165 break;
8167 return priority;
8171 /* The number of instructions that can be issued per cycle. */
8173 static int
8174 s390_issue_rate (void)
8176 switch (s390_tune)
8178 case PROCESSOR_2084_Z990:
8179 case PROCESSOR_2094_Z9_109:
8180 case PROCESSOR_2094_Z9_EC:
8181 case PROCESSOR_2817_Z196:
8182 return 3;
8183 case PROCESSOR_2097_Z10:
8184 return 2;
8185 case PROCESSOR_9672_G5:
8186 case PROCESSOR_9672_G6:
8187 case PROCESSOR_2064_Z900:
8188 /* Starting with EC12 we use the sched_reorder hook to take care
8189 of instruction dispatch constraints. The algorithm only
8190 picks the best instruction and assumes only a single
8191 instruction gets issued per cycle. */
8192 case PROCESSOR_2827_ZEC12:
8193 case PROCESSOR_2964_Z13:
8194 case PROCESSOR_3906_Z14:
8195 default:
8196 return 1;
8200 static int
8201 s390_first_cycle_multipass_dfa_lookahead (void)
8203 return 4;
8206 /* Annotate every literal pool reference in X by an UNSPEC_LTREF expression.
8207 Fix up MEMs as required. */
8209 static void
8210 annotate_constant_pool_refs (rtx *x)
8212 int i, j;
8213 const char *fmt;
8215 gcc_assert (GET_CODE (*x) != SYMBOL_REF
8216 || !CONSTANT_POOL_ADDRESS_P (*x));
8218 /* Literal pool references can only occur inside a MEM ... */
8219 if (GET_CODE (*x) == MEM)
8221 rtx memref = XEXP (*x, 0);
8223 if (GET_CODE (memref) == SYMBOL_REF
8224 && CONSTANT_POOL_ADDRESS_P (memref))
8226 rtx base = cfun->machine->base_reg;
8227 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, memref, base),
8228 UNSPEC_LTREF);
8230 *x = replace_equiv_address (*x, addr);
8231 return;
8234 if (GET_CODE (memref) == CONST
8235 && GET_CODE (XEXP (memref, 0)) == PLUS
8236 && GET_CODE (XEXP (XEXP (memref, 0), 1)) == CONST_INT
8237 && GET_CODE (XEXP (XEXP (memref, 0), 0)) == SYMBOL_REF
8238 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (memref, 0), 0)))
8240 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (memref, 0), 1));
8241 rtx sym = XEXP (XEXP (memref, 0), 0);
8242 rtx base = cfun->machine->base_reg;
8243 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
8244 UNSPEC_LTREF);
8246 *x = replace_equiv_address (*x, plus_constant (Pmode, addr, off));
8247 return;
8251 /* ... or a load-address type pattern. */
8252 if (GET_CODE (*x) == SET)
8254 rtx addrref = SET_SRC (*x);
8256 if (GET_CODE (addrref) == SYMBOL_REF
8257 && CONSTANT_POOL_ADDRESS_P (addrref))
8259 rtx base = cfun->machine->base_reg;
8260 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addrref, base),
8261 UNSPEC_LTREF);
8263 SET_SRC (*x) = addr;
8264 return;
8267 if (GET_CODE (addrref) == CONST
8268 && GET_CODE (XEXP (addrref, 0)) == PLUS
8269 && GET_CODE (XEXP (XEXP (addrref, 0), 1)) == CONST_INT
8270 && GET_CODE (XEXP (XEXP (addrref, 0), 0)) == SYMBOL_REF
8271 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (addrref, 0), 0)))
8273 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (addrref, 0), 1));
8274 rtx sym = XEXP (XEXP (addrref, 0), 0);
8275 rtx base = cfun->machine->base_reg;
8276 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
8277 UNSPEC_LTREF);
8279 SET_SRC (*x) = plus_constant (Pmode, addr, off);
8280 return;
8284 /* Annotate LTREL_BASE as well. */
8285 if (GET_CODE (*x) == UNSPEC
8286 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
8288 rtx base = cfun->machine->base_reg;
8289 *x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XVECEXP (*x, 0, 0), base),
8290 UNSPEC_LTREL_BASE);
8291 return;
8294 fmt = GET_RTX_FORMAT (GET_CODE (*x));
8295 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
8297 if (fmt[i] == 'e')
8299 annotate_constant_pool_refs (&XEXP (*x, i));
8301 else if (fmt[i] == 'E')
8303 for (j = 0; j < XVECLEN (*x, i); j++)
8304 annotate_constant_pool_refs (&XVECEXP (*x, i, j));
8309 /* Split all branches that exceed the maximum distance.
8310 Returns true if this created a new literal pool entry. */
8312 static int
8313 s390_split_branches (void)
8315 rtx temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
8316 int new_literal = 0, ret;
8317 rtx_insn *insn;
8318 rtx pat, target;
8319 rtx *label;
8321 /* We need correct insn addresses. */
8323 shorten_branches (get_insns ());
8325 /* Find all branches that exceed 64KB, and split them. */
8327 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8329 if (! JUMP_P (insn) || tablejump_p (insn, NULL, NULL))
8330 continue;
8332 pat = PATTERN (insn);
8333 if (GET_CODE (pat) == PARALLEL)
8334 pat = XVECEXP (pat, 0, 0);
8335 if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx)
8336 continue;
8338 if (GET_CODE (SET_SRC (pat)) == LABEL_REF)
8340 label = &SET_SRC (pat);
8342 else if (GET_CODE (SET_SRC (pat)) == IF_THEN_ELSE)
8344 if (GET_CODE (XEXP (SET_SRC (pat), 1)) == LABEL_REF)
8345 label = &XEXP (SET_SRC (pat), 1);
8346 else if (GET_CODE (XEXP (SET_SRC (pat), 2)) == LABEL_REF)
8347 label = &XEXP (SET_SRC (pat), 2);
8348 else
8349 continue;
8351 else
8352 continue;
8354 if (get_attr_length (insn) <= 4)
8355 continue;
8357 /* We are going to use the return register as scratch register,
8358 make sure it will be saved/restored by the prologue/epilogue. */
8359 cfun_frame_layout.save_return_addr_p = 1;
8361 if (!flag_pic)
8363 new_literal = 1;
8364 rtx mem = force_const_mem (Pmode, *label);
8365 rtx_insn *set_insn = emit_insn_before (gen_rtx_SET (temp_reg, mem),
8366 insn);
8367 INSN_ADDRESSES_NEW (set_insn, -1);
8368 annotate_constant_pool_refs (&PATTERN (set_insn));
8370 target = temp_reg;
8372 else
8374 new_literal = 1;
8375 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, *label),
8376 UNSPEC_LTREL_OFFSET);
8377 target = gen_rtx_CONST (Pmode, target);
8378 target = force_const_mem (Pmode, target);
8379 rtx_insn *set_insn = emit_insn_before (gen_rtx_SET (temp_reg, target),
8380 insn);
8381 INSN_ADDRESSES_NEW (set_insn, -1);
8382 annotate_constant_pool_refs (&PATTERN (set_insn));
8384 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XEXP (target, 0),
8385 cfun->machine->base_reg),
8386 UNSPEC_LTREL_BASE);
8387 target = gen_rtx_PLUS (Pmode, temp_reg, target);
8390 ret = validate_change (insn, label, target, 0);
8391 gcc_assert (ret);
8394 return new_literal;
8398 /* Find an annotated literal pool symbol referenced in RTX X,
8399 and store it at REF. Will abort if X contains references to
8400 more than one such pool symbol; multiple references to the same
8401 symbol are allowed, however.
8403 The rtx pointed to by REF must be initialized to NULL_RTX
8404 by the caller before calling this routine. */
8406 static void
8407 find_constant_pool_ref (rtx x, rtx *ref)
8409 int i, j;
8410 const char *fmt;
8412 /* Ignore LTREL_BASE references. */
8413 if (GET_CODE (x) == UNSPEC
8414 && XINT (x, 1) == UNSPEC_LTREL_BASE)
8415 return;
8416 /* Likewise POOL_ENTRY insns. */
8417 if (GET_CODE (x) == UNSPEC_VOLATILE
8418 && XINT (x, 1) == UNSPECV_POOL_ENTRY)
8419 return;
8421 gcc_assert (GET_CODE (x) != SYMBOL_REF
8422 || !CONSTANT_POOL_ADDRESS_P (x));
8424 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_LTREF)
8426 rtx sym = XVECEXP (x, 0, 0);
8427 gcc_assert (GET_CODE (sym) == SYMBOL_REF
8428 && CONSTANT_POOL_ADDRESS_P (sym));
8430 if (*ref == NULL_RTX)
8431 *ref = sym;
8432 else
8433 gcc_assert (*ref == sym);
8435 return;
8438 fmt = GET_RTX_FORMAT (GET_CODE (x));
8439 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8441 if (fmt[i] == 'e')
8443 find_constant_pool_ref (XEXP (x, i), ref);
8445 else if (fmt[i] == 'E')
8447 for (j = 0; j < XVECLEN (x, i); j++)
8448 find_constant_pool_ref (XVECEXP (x, i, j), ref);
8453 /* Replace every reference to the annotated literal pool
8454 symbol REF in X by its base plus OFFSET. */
8456 static void
8457 replace_constant_pool_ref (rtx *x, rtx ref, rtx offset)
8459 int i, j;
8460 const char *fmt;
8462 gcc_assert (*x != ref);
8464 if (GET_CODE (*x) == UNSPEC
8465 && XINT (*x, 1) == UNSPEC_LTREF
8466 && XVECEXP (*x, 0, 0) == ref)
8468 *x = gen_rtx_PLUS (Pmode, XVECEXP (*x, 0, 1), offset);
8469 return;
8472 if (GET_CODE (*x) == PLUS
8473 && GET_CODE (XEXP (*x, 1)) == CONST_INT
8474 && GET_CODE (XEXP (*x, 0)) == UNSPEC
8475 && XINT (XEXP (*x, 0), 1) == UNSPEC_LTREF
8476 && XVECEXP (XEXP (*x, 0), 0, 0) == ref)
8478 rtx addr = gen_rtx_PLUS (Pmode, XVECEXP (XEXP (*x, 0), 0, 1), offset);
8479 *x = plus_constant (Pmode, addr, INTVAL (XEXP (*x, 1)));
8480 return;
8483 fmt = GET_RTX_FORMAT (GET_CODE (*x));
8484 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
8486 if (fmt[i] == 'e')
8488 replace_constant_pool_ref (&XEXP (*x, i), ref, offset);
8490 else if (fmt[i] == 'E')
8492 for (j = 0; j < XVECLEN (*x, i); j++)
8493 replace_constant_pool_ref (&XVECEXP (*x, i, j), ref, offset);
8498 /* Check whether X contains an UNSPEC_LTREL_BASE.
8499 Return its constant pool symbol if found, NULL_RTX otherwise. */
8501 static rtx
8502 find_ltrel_base (rtx x)
8504 int i, j;
8505 const char *fmt;
8507 if (GET_CODE (x) == UNSPEC
8508 && XINT (x, 1) == UNSPEC_LTREL_BASE)
8509 return XVECEXP (x, 0, 0);
8511 fmt = GET_RTX_FORMAT (GET_CODE (x));
8512 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8514 if (fmt[i] == 'e')
8516 rtx fnd = find_ltrel_base (XEXP (x, i));
8517 if (fnd)
8518 return fnd;
8520 else if (fmt[i] == 'E')
8522 for (j = 0; j < XVECLEN (x, i); j++)
8524 rtx fnd = find_ltrel_base (XVECEXP (x, i, j));
8525 if (fnd)
8526 return fnd;
8531 return NULL_RTX;
8534 /* Replace any occurrence of UNSPEC_LTREL_BASE in X with its base. */
8536 static void
8537 replace_ltrel_base (rtx *x)
8539 int i, j;
8540 const char *fmt;
8542 if (GET_CODE (*x) == UNSPEC
8543 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
8545 *x = XVECEXP (*x, 0, 1);
8546 return;
8549 fmt = GET_RTX_FORMAT (GET_CODE (*x));
8550 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
8552 if (fmt[i] == 'e')
8554 replace_ltrel_base (&XEXP (*x, i));
8556 else if (fmt[i] == 'E')
8558 for (j = 0; j < XVECLEN (*x, i); j++)
8559 replace_ltrel_base (&XVECEXP (*x, i, j));
8565 /* We keep a list of constants which we have to add to internal
8566 constant tables in the middle of large functions. */
8568 #define NR_C_MODES 32
8569 machine_mode constant_modes[NR_C_MODES] =
8571 TFmode, TImode, TDmode,
8572 V16QImode, V8HImode, V4SImode, V2DImode, V1TImode,
8573 V4SFmode, V2DFmode, V1TFmode,
8574 DFmode, DImode, DDmode,
8575 V8QImode, V4HImode, V2SImode, V1DImode, V2SFmode, V1DFmode,
8576 SFmode, SImode, SDmode,
8577 V4QImode, V2HImode, V1SImode, V1SFmode,
8578 HImode,
8579 V2QImode, V1HImode,
8580 QImode,
8581 V1QImode
8584 struct constant
8586 struct constant *next;
8587 rtx value;
8588 rtx_code_label *label;
8591 struct constant_pool
8593 struct constant_pool *next;
8594 rtx_insn *first_insn;
8595 rtx_insn *pool_insn;
8596 bitmap insns;
8597 rtx_insn *emit_pool_after;
8599 struct constant *constants[NR_C_MODES];
8600 struct constant *execute;
8601 rtx_code_label *label;
8602 int size;
8605 /* Allocate new constant_pool structure. */
8607 static struct constant_pool *
8608 s390_alloc_pool (void)
8610 struct constant_pool *pool;
8611 int i;
8613 pool = (struct constant_pool *) xmalloc (sizeof *pool);
8614 pool->next = NULL;
8615 for (i = 0; i < NR_C_MODES; i++)
8616 pool->constants[i] = NULL;
8618 pool->execute = NULL;
8619 pool->label = gen_label_rtx ();
8620 pool->first_insn = NULL;
8621 pool->pool_insn = NULL;
8622 pool->insns = BITMAP_ALLOC (NULL);
8623 pool->size = 0;
8624 pool->emit_pool_after = NULL;
8626 return pool;
8629 /* Create new constant pool covering instructions starting at INSN
8630 and chain it to the end of POOL_LIST. */
8632 static struct constant_pool *
8633 s390_start_pool (struct constant_pool **pool_list, rtx_insn *insn)
8635 struct constant_pool *pool, **prev;
8637 pool = s390_alloc_pool ();
8638 pool->first_insn = insn;
8640 for (prev = pool_list; *prev; prev = &(*prev)->next)
8642 *prev = pool;
8644 return pool;
8647 /* End range of instructions covered by POOL at INSN and emit
8648 placeholder insn representing the pool. */
8650 static void
8651 s390_end_pool (struct constant_pool *pool, rtx_insn *insn)
8653 rtx pool_size = GEN_INT (pool->size + 8 /* alignment slop */);
8655 if (!insn)
8656 insn = get_last_insn ();
8658 pool->pool_insn = emit_insn_after (gen_pool (pool_size), insn);
8659 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8662 /* Add INSN to the list of insns covered by POOL. */
8664 static void
8665 s390_add_pool_insn (struct constant_pool *pool, rtx insn)
8667 bitmap_set_bit (pool->insns, INSN_UID (insn));
8670 /* Return pool out of POOL_LIST that covers INSN. */
8672 static struct constant_pool *
8673 s390_find_pool (struct constant_pool *pool_list, rtx insn)
8675 struct constant_pool *pool;
8677 for (pool = pool_list; pool; pool = pool->next)
8678 if (bitmap_bit_p (pool->insns, INSN_UID (insn)))
8679 break;
8681 return pool;
8684 /* Add constant VAL of mode MODE to the constant pool POOL. */
8686 static void
8687 s390_add_constant (struct constant_pool *pool, rtx val, machine_mode mode)
8689 struct constant *c;
8690 int i;
8692 for (i = 0; i < NR_C_MODES; i++)
8693 if (constant_modes[i] == mode)
8694 break;
8695 gcc_assert (i != NR_C_MODES);
8697 for (c = pool->constants[i]; c != NULL; c = c->next)
8698 if (rtx_equal_p (val, c->value))
8699 break;
8701 if (c == NULL)
8703 c = (struct constant *) xmalloc (sizeof *c);
8704 c->value = val;
8705 c->label = gen_label_rtx ();
8706 c->next = pool->constants[i];
8707 pool->constants[i] = c;
8708 pool->size += GET_MODE_SIZE (mode);
8712 /* Return an rtx that represents the offset of X from the start of
8713 pool POOL. */
8715 static rtx
8716 s390_pool_offset (struct constant_pool *pool, rtx x)
8718 rtx label;
8720 label = gen_rtx_LABEL_REF (GET_MODE (x), pool->label);
8721 x = gen_rtx_UNSPEC (GET_MODE (x), gen_rtvec (2, x, label),
8722 UNSPEC_POOL_OFFSET);
8723 return gen_rtx_CONST (GET_MODE (x), x);
8726 /* Find constant VAL of mode MODE in the constant pool POOL.
8727 Return an RTX describing the distance from the start of
8728 the pool to the location of the new constant. */
8730 static rtx
8731 s390_find_constant (struct constant_pool *pool, rtx val,
8732 machine_mode mode)
8734 struct constant *c;
8735 int i;
8737 for (i = 0; i < NR_C_MODES; i++)
8738 if (constant_modes[i] == mode)
8739 break;
8740 gcc_assert (i != NR_C_MODES);
8742 for (c = pool->constants[i]; c != NULL; c = c->next)
8743 if (rtx_equal_p (val, c->value))
8744 break;
8746 gcc_assert (c);
8748 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
8751 /* Check whether INSN is an execute. Return the label_ref to its
8752 execute target template if so, NULL_RTX otherwise. */
8754 static rtx
8755 s390_execute_label (rtx insn)
8757 if (INSN_P (insn)
8758 && GET_CODE (PATTERN (insn)) == PARALLEL
8759 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == UNSPEC
8760 && (XINT (XVECEXP (PATTERN (insn), 0, 0), 1) == UNSPEC_EXECUTE
8761 || XINT (XVECEXP (PATTERN (insn), 0, 0), 1) == UNSPEC_EXECUTE_JUMP))
8763 if (XINT (XVECEXP (PATTERN (insn), 0, 0), 1) == UNSPEC_EXECUTE)
8764 return XVECEXP (XVECEXP (PATTERN (insn), 0, 0), 0, 2);
8765 else
8767 gcc_assert (JUMP_P (insn));
8768 /* For jump insns as execute target:
8769 - There is one operand less in the parallel (the
8770 modification register of the execute is always 0).
8771 - The execute target label is wrapped into an
8772 if_then_else in order to hide it from jump analysis. */
8773 return XEXP (XVECEXP (XVECEXP (PATTERN (insn), 0, 0), 0, 0), 0);
8777 return NULL_RTX;
8780 /* Add execute target for INSN to the constant pool POOL. */
8782 static void
8783 s390_add_execute (struct constant_pool *pool, rtx insn)
8785 struct constant *c;
8787 for (c = pool->execute; c != NULL; c = c->next)
8788 if (INSN_UID (insn) == INSN_UID (c->value))
8789 break;
8791 if (c == NULL)
8793 c = (struct constant *) xmalloc (sizeof *c);
8794 c->value = insn;
8795 c->label = gen_label_rtx ();
8796 c->next = pool->execute;
8797 pool->execute = c;
8798 pool->size += 6;
8802 /* Find execute target for INSN in the constant pool POOL.
8803 Return an RTX describing the distance from the start of
8804 the pool to the location of the execute target. */
8806 static rtx
8807 s390_find_execute (struct constant_pool *pool, rtx insn)
8809 struct constant *c;
8811 for (c = pool->execute; c != NULL; c = c->next)
8812 if (INSN_UID (insn) == INSN_UID (c->value))
8813 break;
8815 gcc_assert (c);
8817 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
8820 /* For an execute INSN, extract the execute target template. */
8822 static rtx
8823 s390_execute_target (rtx insn)
8825 rtx pattern = PATTERN (insn);
8826 gcc_assert (s390_execute_label (insn));
8828 if (XVECLEN (pattern, 0) == 2)
8830 pattern = copy_rtx (XVECEXP (pattern, 0, 1));
8832 else
8834 rtvec vec = rtvec_alloc (XVECLEN (pattern, 0) - 1);
8835 int i;
8837 for (i = 0; i < XVECLEN (pattern, 0) - 1; i++)
8838 RTVEC_ELT (vec, i) = copy_rtx (XVECEXP (pattern, 0, i + 1));
8840 pattern = gen_rtx_PARALLEL (VOIDmode, vec);
8843 return pattern;
8846 /* Indicate that INSN cannot be duplicated. This is the case for
8847 execute insns that carry a unique label. */
8849 static bool
8850 s390_cannot_copy_insn_p (rtx_insn *insn)
8852 rtx label = s390_execute_label (insn);
8853 return label && label != const0_rtx;
8856 /* Dump out the constants in POOL. If REMOTE_LABEL is true,
8857 do not emit the pool base label. */
8859 static void
8860 s390_dump_pool (struct constant_pool *pool, bool remote_label)
8862 struct constant *c;
8863 rtx_insn *insn = pool->pool_insn;
8864 int i;
8866 /* Switch to rodata section. */
8867 if (TARGET_CPU_ZARCH)
8869 insn = emit_insn_after (gen_pool_section_start (), insn);
8870 INSN_ADDRESSES_NEW (insn, -1);
8873 /* Ensure minimum pool alignment. */
8874 if (TARGET_CPU_ZARCH)
8875 insn = emit_insn_after (gen_pool_align (GEN_INT (8)), insn);
8876 else
8877 insn = emit_insn_after (gen_pool_align (GEN_INT (4)), insn);
8878 INSN_ADDRESSES_NEW (insn, -1);
8880 /* Emit pool base label. */
8881 if (!remote_label)
8883 insn = emit_label_after (pool->label, insn);
8884 INSN_ADDRESSES_NEW (insn, -1);
8887 /* Dump constants in descending alignment requirement order,
8888 ensuring proper alignment for every constant. */
8889 for (i = 0; i < NR_C_MODES; i++)
8890 for (c = pool->constants[i]; c; c = c->next)
8892 /* Convert UNSPEC_LTREL_OFFSET unspecs to pool-relative references. */
8893 rtx value = copy_rtx (c->value);
8894 if (GET_CODE (value) == CONST
8895 && GET_CODE (XEXP (value, 0)) == UNSPEC
8896 && XINT (XEXP (value, 0), 1) == UNSPEC_LTREL_OFFSET
8897 && XVECLEN (XEXP (value, 0), 0) == 1)
8898 value = s390_pool_offset (pool, XVECEXP (XEXP (value, 0), 0, 0));
8900 insn = emit_label_after (c->label, insn);
8901 INSN_ADDRESSES_NEW (insn, -1);
8903 value = gen_rtx_UNSPEC_VOLATILE (constant_modes[i],
8904 gen_rtvec (1, value),
8905 UNSPECV_POOL_ENTRY);
8906 insn = emit_insn_after (value, insn);
8907 INSN_ADDRESSES_NEW (insn, -1);
8910 /* Ensure minimum alignment for instructions. */
8911 insn = emit_insn_after (gen_pool_align (GEN_INT (2)), insn);
8912 INSN_ADDRESSES_NEW (insn, -1);
8914 /* Output in-pool execute template insns. */
8915 for (c = pool->execute; c; c = c->next)
8917 insn = emit_label_after (c->label, insn);
8918 INSN_ADDRESSES_NEW (insn, -1);
8920 insn = emit_insn_after (s390_execute_target (c->value), insn);
8921 INSN_ADDRESSES_NEW (insn, -1);
8924 /* Switch back to previous section. */
8925 if (TARGET_CPU_ZARCH)
8927 insn = emit_insn_after (gen_pool_section_end (), insn);
8928 INSN_ADDRESSES_NEW (insn, -1);
8931 insn = emit_barrier_after (insn);
8932 INSN_ADDRESSES_NEW (insn, -1);
8934 /* Remove placeholder insn. */
8935 remove_insn (pool->pool_insn);
8938 /* Free all memory used by POOL. */
8940 static void
8941 s390_free_pool (struct constant_pool *pool)
8943 struct constant *c, *next;
8944 int i;
8946 for (i = 0; i < NR_C_MODES; i++)
8947 for (c = pool->constants[i]; c; c = next)
8949 next = c->next;
8950 free (c);
8953 for (c = pool->execute; c; c = next)
8955 next = c->next;
8956 free (c);
8959 BITMAP_FREE (pool->insns);
8960 free (pool);
8964 /* Collect main literal pool. Return NULL on overflow. */
8966 static struct constant_pool *
8967 s390_mainpool_start (void)
8969 struct constant_pool *pool;
8970 rtx_insn *insn;
8972 pool = s390_alloc_pool ();
8974 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8976 if (NONJUMP_INSN_P (insn)
8977 && GET_CODE (PATTERN (insn)) == SET
8978 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC_VOLATILE
8979 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPECV_MAIN_POOL)
8981 /* There might be two main_pool instructions if base_reg
8982 is call-clobbered; one for shrink-wrapped code and one
8983 for the rest. We want to keep the first. */
8984 if (pool->pool_insn)
8986 insn = PREV_INSN (insn);
8987 delete_insn (NEXT_INSN (insn));
8988 continue;
8990 pool->pool_insn = insn;
8993 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
8995 s390_add_execute (pool, insn);
8997 else if (NONJUMP_INSN_P (insn) || CALL_P (insn))
8999 rtx pool_ref = NULL_RTX;
9000 find_constant_pool_ref (PATTERN (insn), &pool_ref);
9001 if (pool_ref)
9003 rtx constant = get_pool_constant (pool_ref);
9004 machine_mode mode = get_pool_mode (pool_ref);
9005 s390_add_constant (pool, constant, mode);
9009 /* If hot/cold partitioning is enabled we have to make sure that
9010 the literal pool is emitted in the same section where the
9011 initialization of the literal pool base pointer takes place.
9012 emit_pool_after is only used in the non-overflow case on non
9013 Z cpus where we can emit the literal pool at the end of the
9014 function body within the text section. */
9015 if (NOTE_P (insn)
9016 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS
9017 && !pool->emit_pool_after)
9018 pool->emit_pool_after = PREV_INSN (insn);
9021 gcc_assert (pool->pool_insn || pool->size == 0);
9023 if (pool->size >= 4096)
9025 /* We're going to chunkify the pool, so remove the main
9026 pool placeholder insn. */
9027 remove_insn (pool->pool_insn);
9029 s390_free_pool (pool);
9030 pool = NULL;
9033 /* If the functions ends with the section where the literal pool
9034 should be emitted set the marker to its end. */
9035 if (pool && !pool->emit_pool_after)
9036 pool->emit_pool_after = get_last_insn ();
9038 return pool;
9041 /* POOL holds the main literal pool as collected by s390_mainpool_start.
9042 Modify the current function to output the pool constants as well as
9043 the pool register setup instruction. */
9045 static void
9046 s390_mainpool_finish (struct constant_pool *pool)
9048 rtx base_reg = cfun->machine->base_reg;
9050 /* If the pool is empty, we're done. */
9051 if (pool->size == 0)
9053 /* We don't actually need a base register after all. */
9054 cfun->machine->base_reg = NULL_RTX;
9056 if (pool->pool_insn)
9057 remove_insn (pool->pool_insn);
9058 s390_free_pool (pool);
9059 return;
9062 /* We need correct insn addresses. */
9063 shorten_branches (get_insns ());
9065 /* On zSeries, we use a LARL to load the pool register. The pool is
9066 located in the .rodata section, so we emit it after the function. */
9067 if (TARGET_CPU_ZARCH)
9069 rtx set = gen_main_base_64 (base_reg, pool->label);
9070 rtx_insn *insn = emit_insn_after (set, pool->pool_insn);
9071 INSN_ADDRESSES_NEW (insn, -1);
9072 remove_insn (pool->pool_insn);
9074 insn = get_last_insn ();
9075 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
9076 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
9078 s390_dump_pool (pool, 0);
9081 /* On S/390, if the total size of the function's code plus literal pool
9082 does not exceed 4096 bytes, we use BASR to set up a function base
9083 pointer, and emit the literal pool at the end of the function. */
9084 else if (INSN_ADDRESSES (INSN_UID (pool->emit_pool_after))
9085 + pool->size + 8 /* alignment slop */ < 4096)
9087 rtx set = gen_main_base_31_small (base_reg, pool->label);
9088 rtx_insn *insn = emit_insn_after (set, pool->pool_insn);
9089 INSN_ADDRESSES_NEW (insn, -1);
9090 remove_insn (pool->pool_insn);
9092 insn = emit_label_after (pool->label, insn);
9093 INSN_ADDRESSES_NEW (insn, -1);
9095 /* emit_pool_after will be set by s390_mainpool_start to the
9096 last insn of the section where the literal pool should be
9097 emitted. */
9098 insn = pool->emit_pool_after;
9100 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
9101 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
9103 s390_dump_pool (pool, 1);
9106 /* Otherwise, we emit an inline literal pool and use BASR to branch
9107 over it, setting up the pool register at the same time. */
9108 else
9110 rtx_code_label *pool_end = gen_label_rtx ();
9112 rtx pat = gen_main_base_31_large (base_reg, pool->label, pool_end);
9113 rtx_insn *insn = emit_jump_insn_after (pat, pool->pool_insn);
9114 JUMP_LABEL (insn) = pool_end;
9115 INSN_ADDRESSES_NEW (insn, -1);
9116 remove_insn (pool->pool_insn);
9118 insn = emit_label_after (pool->label, insn);
9119 INSN_ADDRESSES_NEW (insn, -1);
9121 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
9122 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
9124 insn = emit_label_after (pool_end, pool->pool_insn);
9125 INSN_ADDRESSES_NEW (insn, -1);
9127 s390_dump_pool (pool, 1);
9131 /* Replace all literal pool references. */
9133 for (rtx_insn *insn = get_insns (); insn; insn = NEXT_INSN (insn))
9135 if (INSN_P (insn))
9136 replace_ltrel_base (&PATTERN (insn));
9138 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
9140 rtx addr, pool_ref = NULL_RTX;
9141 find_constant_pool_ref (PATTERN (insn), &pool_ref);
9142 if (pool_ref)
9144 if (s390_execute_label (insn))
9145 addr = s390_find_execute (pool, insn);
9146 else
9147 addr = s390_find_constant (pool, get_pool_constant (pool_ref),
9148 get_pool_mode (pool_ref));
9150 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
9151 INSN_CODE (insn) = -1;
9157 /* Free the pool. */
9158 s390_free_pool (pool);
9161 /* POOL holds the main literal pool as collected by s390_mainpool_start.
9162 We have decided we cannot use this pool, so revert all changes
9163 to the current function that were done by s390_mainpool_start. */
9164 static void
9165 s390_mainpool_cancel (struct constant_pool *pool)
9167 /* We didn't actually change the instruction stream, so simply
9168 free the pool memory. */
9169 s390_free_pool (pool);
9173 /* Chunkify the literal pool. */
9175 #define S390_POOL_CHUNK_MIN 0xc00
9176 #define S390_POOL_CHUNK_MAX 0xe00
9178 static struct constant_pool *
9179 s390_chunkify_start (void)
9181 struct constant_pool *curr_pool = NULL, *pool_list = NULL;
9182 int extra_size = 0;
9183 bitmap far_labels;
9184 rtx pending_ltrel = NULL_RTX;
9185 rtx_insn *insn;
9187 rtx (*gen_reload_base) (rtx, rtx) =
9188 TARGET_CPU_ZARCH? gen_reload_base_64 : gen_reload_base_31;
9191 /* We need correct insn addresses. */
9193 shorten_branches (get_insns ());
9195 /* Scan all insns and move literals to pool chunks. */
9197 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9199 bool section_switch_p = false;
9201 /* Check for pending LTREL_BASE. */
9202 if (INSN_P (insn))
9204 rtx ltrel_base = find_ltrel_base (PATTERN (insn));
9205 if (ltrel_base)
9207 gcc_assert (ltrel_base == pending_ltrel);
9208 pending_ltrel = NULL_RTX;
9212 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
9214 if (!curr_pool)
9215 curr_pool = s390_start_pool (&pool_list, insn);
9217 s390_add_execute (curr_pool, insn);
9218 s390_add_pool_insn (curr_pool, insn);
9220 else if (NONJUMP_INSN_P (insn) || CALL_P (insn))
9222 rtx pool_ref = NULL_RTX;
9223 find_constant_pool_ref (PATTERN (insn), &pool_ref);
9224 if (pool_ref)
9226 rtx constant = get_pool_constant (pool_ref);
9227 machine_mode mode = get_pool_mode (pool_ref);
9229 if (!curr_pool)
9230 curr_pool = s390_start_pool (&pool_list, insn);
9232 s390_add_constant (curr_pool, constant, mode);
9233 s390_add_pool_insn (curr_pool, insn);
9235 /* Don't split the pool chunk between a LTREL_OFFSET load
9236 and the corresponding LTREL_BASE. */
9237 if (GET_CODE (constant) == CONST
9238 && GET_CODE (XEXP (constant, 0)) == UNSPEC
9239 && XINT (XEXP (constant, 0), 1) == UNSPEC_LTREL_OFFSET)
9241 gcc_assert (!pending_ltrel);
9242 pending_ltrel = pool_ref;
9247 if (JUMP_P (insn) || JUMP_TABLE_DATA_P (insn) || LABEL_P (insn))
9249 if (curr_pool)
9250 s390_add_pool_insn (curr_pool, insn);
9251 /* An LTREL_BASE must follow within the same basic block. */
9252 gcc_assert (!pending_ltrel);
9255 if (NOTE_P (insn))
9256 switch (NOTE_KIND (insn))
9258 case NOTE_INSN_SWITCH_TEXT_SECTIONS:
9259 section_switch_p = true;
9260 break;
9261 case NOTE_INSN_VAR_LOCATION:
9262 continue;
9263 default:
9264 break;
9267 if (!curr_pool
9268 || INSN_ADDRESSES_SIZE () <= (size_t) INSN_UID (insn)
9269 || INSN_ADDRESSES (INSN_UID (insn)) == -1)
9270 continue;
9272 if (TARGET_CPU_ZARCH)
9274 if (curr_pool->size < S390_POOL_CHUNK_MAX)
9275 continue;
9277 s390_end_pool (curr_pool, NULL);
9278 curr_pool = NULL;
9280 else
9282 int chunk_size = INSN_ADDRESSES (INSN_UID (insn))
9283 - INSN_ADDRESSES (INSN_UID (curr_pool->first_insn))
9284 + extra_size;
9286 /* We will later have to insert base register reload insns.
9287 Those will have an effect on code size, which we need to
9288 consider here. This calculation makes rather pessimistic
9289 worst-case assumptions. */
9290 if (LABEL_P (insn))
9291 extra_size += 6;
9293 if (chunk_size < S390_POOL_CHUNK_MIN
9294 && curr_pool->size < S390_POOL_CHUNK_MIN
9295 && !section_switch_p)
9296 continue;
9298 /* Pool chunks can only be inserted after BARRIERs ... */
9299 if (BARRIER_P (insn))
9301 s390_end_pool (curr_pool, insn);
9302 curr_pool = NULL;
9303 extra_size = 0;
9306 /* ... so if we don't find one in time, create one. */
9307 else if (chunk_size > S390_POOL_CHUNK_MAX
9308 || curr_pool->size > S390_POOL_CHUNK_MAX
9309 || section_switch_p)
9311 rtx_insn *label, *jump, *barrier, *next, *prev;
9313 if (!section_switch_p)
9315 /* We can insert the barrier only after a 'real' insn. */
9316 if (! NONJUMP_INSN_P (insn) && ! CALL_P (insn))
9317 continue;
9318 if (get_attr_length (insn) == 0)
9319 continue;
9320 /* Don't separate LTREL_BASE from the corresponding
9321 LTREL_OFFSET load. */
9322 if (pending_ltrel)
9323 continue;
9324 next = insn;
9327 insn = next;
9328 next = NEXT_INSN (insn);
9330 while (next
9331 && NOTE_P (next)
9332 && NOTE_KIND (next) == NOTE_INSN_VAR_LOCATION);
9334 else
9336 gcc_assert (!pending_ltrel);
9338 /* The old pool has to end before the section switch
9339 note in order to make it part of the current
9340 section. */
9341 insn = PREV_INSN (insn);
9344 label = gen_label_rtx ();
9345 prev = insn;
9346 if (prev && NOTE_P (prev))
9347 prev = prev_nonnote_insn (prev);
9348 if (prev)
9349 jump = emit_jump_insn_after_setloc (gen_jump (label), insn,
9350 INSN_LOCATION (prev));
9351 else
9352 jump = emit_jump_insn_after_noloc (gen_jump (label), insn);
9353 barrier = emit_barrier_after (jump);
9354 insn = emit_label_after (label, barrier);
9355 JUMP_LABEL (jump) = label;
9356 LABEL_NUSES (label) = 1;
9358 INSN_ADDRESSES_NEW (jump, -1);
9359 INSN_ADDRESSES_NEW (barrier, -1);
9360 INSN_ADDRESSES_NEW (insn, -1);
9362 s390_end_pool (curr_pool, barrier);
9363 curr_pool = NULL;
9364 extra_size = 0;
9369 if (curr_pool)
9370 s390_end_pool (curr_pool, NULL);
9371 gcc_assert (!pending_ltrel);
9373 /* Find all labels that are branched into
9374 from an insn belonging to a different chunk. */
9376 far_labels = BITMAP_ALLOC (NULL);
9378 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9380 rtx_jump_table_data *table;
9382 /* Labels marked with LABEL_PRESERVE_P can be target
9383 of non-local jumps, so we have to mark them.
9384 The same holds for named labels.
9386 Don't do that, however, if it is the label before
9387 a jump table. */
9389 if (LABEL_P (insn)
9390 && (LABEL_PRESERVE_P (insn) || LABEL_NAME (insn)))
9392 rtx_insn *vec_insn = NEXT_INSN (insn);
9393 if (! vec_insn || ! JUMP_TABLE_DATA_P (vec_insn))
9394 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (insn));
9396 /* Check potential targets in a table jump (casesi_jump). */
9397 else if (tablejump_p (insn, NULL, &table))
9399 rtx vec_pat = PATTERN (table);
9400 int i, diff_p = GET_CODE (vec_pat) == ADDR_DIFF_VEC;
9402 for (i = 0; i < XVECLEN (vec_pat, diff_p); i++)
9404 rtx label = XEXP (XVECEXP (vec_pat, diff_p, i), 0);
9406 if (s390_find_pool (pool_list, label)
9407 != s390_find_pool (pool_list, insn))
9408 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
9411 /* If we have a direct jump (conditional or unconditional),
9412 check all potential targets. */
9413 else if (JUMP_P (insn))
9415 rtx pat = PATTERN (insn);
9417 if (GET_CODE (pat) == PARALLEL)
9418 pat = XVECEXP (pat, 0, 0);
9420 if (GET_CODE (pat) == SET)
9422 rtx label = JUMP_LABEL (insn);
9423 if (label && !ANY_RETURN_P (label))
9425 if (s390_find_pool (pool_list, label)
9426 != s390_find_pool (pool_list, insn))
9427 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
9433 /* Insert base register reload insns before every pool. */
9435 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
9437 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
9438 curr_pool->label);
9439 rtx_insn *insn = curr_pool->first_insn;
9440 INSN_ADDRESSES_NEW (emit_insn_before (new_insn, insn), -1);
9443 /* Insert base register reload insns at every far label. */
9445 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9446 if (LABEL_P (insn)
9447 && bitmap_bit_p (far_labels, CODE_LABEL_NUMBER (insn)))
9449 struct constant_pool *pool = s390_find_pool (pool_list, insn);
9450 if (pool)
9452 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
9453 pool->label);
9454 INSN_ADDRESSES_NEW (emit_insn_after (new_insn, insn), -1);
9459 BITMAP_FREE (far_labels);
9462 /* Recompute insn addresses. */
9464 init_insn_lengths ();
9465 shorten_branches (get_insns ());
9467 return pool_list;
9470 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
9471 After we have decided to use this list, finish implementing
9472 all changes to the current function as required. */
9474 static void
9475 s390_chunkify_finish (struct constant_pool *pool_list)
9477 struct constant_pool *curr_pool = NULL;
9478 rtx_insn *insn;
9481 /* Replace all literal pool references. */
9483 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9485 if (INSN_P (insn))
9486 replace_ltrel_base (&PATTERN (insn));
9488 curr_pool = s390_find_pool (pool_list, insn);
9489 if (!curr_pool)
9490 continue;
9492 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
9494 rtx addr, pool_ref = NULL_RTX;
9495 find_constant_pool_ref (PATTERN (insn), &pool_ref);
9496 if (pool_ref)
9498 if (s390_execute_label (insn))
9499 addr = s390_find_execute (curr_pool, insn);
9500 else
9501 addr = s390_find_constant (curr_pool,
9502 get_pool_constant (pool_ref),
9503 get_pool_mode (pool_ref));
9505 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
9506 INSN_CODE (insn) = -1;
9511 /* Dump out all literal pools. */
9513 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
9514 s390_dump_pool (curr_pool, 0);
9516 /* Free pool list. */
9518 while (pool_list)
9520 struct constant_pool *next = pool_list->next;
9521 s390_free_pool (pool_list);
9522 pool_list = next;
9526 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
9527 We have decided we cannot use this list, so revert all changes
9528 to the current function that were done by s390_chunkify_start. */
9530 static void
9531 s390_chunkify_cancel (struct constant_pool *pool_list)
9533 struct constant_pool *curr_pool = NULL;
9534 rtx_insn *insn;
9536 /* Remove all pool placeholder insns. */
9538 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
9540 /* Did we insert an extra barrier? Remove it. */
9541 rtx_insn *barrier = PREV_INSN (curr_pool->pool_insn);
9542 rtx_insn *jump = barrier? PREV_INSN (barrier) : NULL;
9543 rtx_insn *label = NEXT_INSN (curr_pool->pool_insn);
9545 if (jump && JUMP_P (jump)
9546 && barrier && BARRIER_P (barrier)
9547 && label && LABEL_P (label)
9548 && GET_CODE (PATTERN (jump)) == SET
9549 && SET_DEST (PATTERN (jump)) == pc_rtx
9550 && GET_CODE (SET_SRC (PATTERN (jump))) == LABEL_REF
9551 && XEXP (SET_SRC (PATTERN (jump)), 0) == label)
9553 remove_insn (jump);
9554 remove_insn (barrier);
9555 remove_insn (label);
9558 remove_insn (curr_pool->pool_insn);
9561 /* Remove all base register reload insns. */
9563 for (insn = get_insns (); insn; )
9565 rtx_insn *next_insn = NEXT_INSN (insn);
9567 if (NONJUMP_INSN_P (insn)
9568 && GET_CODE (PATTERN (insn)) == SET
9569 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
9570 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_RELOAD_BASE)
9571 remove_insn (insn);
9573 insn = next_insn;
9576 /* Free pool list. */
9578 while (pool_list)
9580 struct constant_pool *next = pool_list->next;
9581 s390_free_pool (pool_list);
9582 pool_list = next;
9586 /* Output the constant pool entry EXP in mode MODE with alignment ALIGN. */
9588 void
9589 s390_output_pool_entry (rtx exp, machine_mode mode, unsigned int align)
9591 switch (GET_MODE_CLASS (mode))
9593 case MODE_FLOAT:
9594 case MODE_DECIMAL_FLOAT:
9595 gcc_assert (GET_CODE (exp) == CONST_DOUBLE);
9597 assemble_real (*CONST_DOUBLE_REAL_VALUE (exp),
9598 as_a <scalar_float_mode> (mode), align);
9599 break;
9601 case MODE_INT:
9602 assemble_integer (exp, GET_MODE_SIZE (mode), align, 1);
9603 mark_symbol_refs_as_used (exp);
9604 break;
9606 case MODE_VECTOR_INT:
9607 case MODE_VECTOR_FLOAT:
9609 int i;
9610 machine_mode inner_mode;
9611 gcc_assert (GET_CODE (exp) == CONST_VECTOR);
9613 inner_mode = GET_MODE_INNER (GET_MODE (exp));
9614 for (i = 0; i < XVECLEN (exp, 0); i++)
9615 s390_output_pool_entry (XVECEXP (exp, 0, i),
9616 inner_mode,
9617 i == 0
9618 ? align
9619 : GET_MODE_BITSIZE (inner_mode));
9621 break;
9623 default:
9624 gcc_unreachable ();
9629 /* Return an RTL expression representing the value of the return address
9630 for the frame COUNT steps up from the current frame. FRAME is the
9631 frame pointer of that frame. */
9634 s390_return_addr_rtx (int count, rtx frame ATTRIBUTE_UNUSED)
9636 int offset;
9637 rtx addr;
9639 /* Without backchain, we fail for all but the current frame. */
9641 if (!TARGET_BACKCHAIN && count > 0)
9642 return NULL_RTX;
9644 /* For the current frame, we need to make sure the initial
9645 value of RETURN_REGNUM is actually saved. */
9647 if (count == 0)
9649 /* On non-z architectures branch splitting could overwrite r14. */
9650 if (TARGET_CPU_ZARCH)
9651 return get_hard_reg_initial_val (Pmode, RETURN_REGNUM);
9652 else
9654 cfun_frame_layout.save_return_addr_p = true;
9655 return gen_rtx_MEM (Pmode, return_address_pointer_rtx);
9659 if (TARGET_PACKED_STACK)
9660 offset = -2 * UNITS_PER_LONG;
9661 else
9662 offset = RETURN_REGNUM * UNITS_PER_LONG;
9664 addr = plus_constant (Pmode, frame, offset);
9665 addr = memory_address (Pmode, addr);
9666 return gen_rtx_MEM (Pmode, addr);
9669 /* Return an RTL expression representing the back chain stored in
9670 the current stack frame. */
9673 s390_back_chain_rtx (void)
9675 rtx chain;
9677 gcc_assert (TARGET_BACKCHAIN);
9679 if (TARGET_PACKED_STACK)
9680 chain = plus_constant (Pmode, stack_pointer_rtx,
9681 STACK_POINTER_OFFSET - UNITS_PER_LONG);
9682 else
9683 chain = stack_pointer_rtx;
9685 chain = gen_rtx_MEM (Pmode, chain);
9686 return chain;
9689 /* Find first call clobbered register unused in a function.
9690 This could be used as base register in a leaf function
9691 or for holding the return address before epilogue. */
9693 static int
9694 find_unused_clobbered_reg (void)
9696 int i;
9697 for (i = 0; i < 6; i++)
9698 if (!df_regs_ever_live_p (i))
9699 return i;
9700 return 0;
9704 /* Helper function for s390_regs_ever_clobbered. Sets the fields in DATA for all
9705 clobbered hard regs in SETREG. */
9707 static void
9708 s390_reg_clobbered_rtx (rtx setreg, const_rtx set_insn ATTRIBUTE_UNUSED, void *data)
9710 char *regs_ever_clobbered = (char *)data;
9711 unsigned int i, regno;
9712 machine_mode mode = GET_MODE (setreg);
9714 if (GET_CODE (setreg) == SUBREG)
9716 rtx inner = SUBREG_REG (setreg);
9717 if (!GENERAL_REG_P (inner) && !FP_REG_P (inner))
9718 return;
9719 regno = subreg_regno (setreg);
9721 else if (GENERAL_REG_P (setreg) || FP_REG_P (setreg))
9722 regno = REGNO (setreg);
9723 else
9724 return;
9726 for (i = regno;
9727 i < end_hard_regno (mode, regno);
9728 i++)
9729 regs_ever_clobbered[i] = 1;
9732 /* Walks through all basic blocks of the current function looking
9733 for clobbered hard regs using s390_reg_clobbered_rtx. The fields
9734 of the passed integer array REGS_EVER_CLOBBERED are set to one for
9735 each of those regs. */
9737 static void
9738 s390_regs_ever_clobbered (char regs_ever_clobbered[])
9740 basic_block cur_bb;
9741 rtx_insn *cur_insn;
9742 unsigned int i;
9744 memset (regs_ever_clobbered, 0, 32);
9746 /* For non-leaf functions we have to consider all call clobbered regs to be
9747 clobbered. */
9748 if (!crtl->is_leaf)
9750 for (i = 0; i < 32; i++)
9751 regs_ever_clobbered[i] = call_really_used_regs[i];
9754 /* Make the "magic" eh_return registers live if necessary. For regs_ever_live
9755 this work is done by liveness analysis (mark_regs_live_at_end).
9756 Special care is needed for functions containing landing pads. Landing pads
9757 may use the eh registers, but the code which sets these registers is not
9758 contained in that function. Hence s390_regs_ever_clobbered is not able to
9759 deal with this automatically. */
9760 if (crtl->calls_eh_return || cfun->machine->has_landing_pad_p)
9761 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM ; i++)
9762 if (crtl->calls_eh_return
9763 || (cfun->machine->has_landing_pad_p
9764 && df_regs_ever_live_p (EH_RETURN_DATA_REGNO (i))))
9765 regs_ever_clobbered[EH_RETURN_DATA_REGNO (i)] = 1;
9767 /* For nonlocal gotos all call-saved registers have to be saved.
9768 This flag is also set for the unwinding code in libgcc.
9769 See expand_builtin_unwind_init. For regs_ever_live this is done by
9770 reload. */
9771 if (crtl->saves_all_registers)
9772 for (i = 0; i < 32; i++)
9773 if (!call_really_used_regs[i])
9774 regs_ever_clobbered[i] = 1;
9776 FOR_EACH_BB_FN (cur_bb, cfun)
9778 FOR_BB_INSNS (cur_bb, cur_insn)
9780 rtx pat;
9782 if (!INSN_P (cur_insn))
9783 continue;
9785 pat = PATTERN (cur_insn);
9787 /* Ignore GPR restore insns. */
9788 if (epilogue_completed && RTX_FRAME_RELATED_P (cur_insn))
9790 if (GET_CODE (pat) == SET
9791 && GENERAL_REG_P (SET_DEST (pat)))
9793 /* lgdr */
9794 if (GET_MODE (SET_SRC (pat)) == DImode
9795 && FP_REG_P (SET_SRC (pat)))
9796 continue;
9798 /* l / lg */
9799 if (GET_CODE (SET_SRC (pat)) == MEM)
9800 continue;
9803 /* lm / lmg */
9804 if (GET_CODE (pat) == PARALLEL
9805 && load_multiple_operation (pat, VOIDmode))
9806 continue;
9809 note_stores (pat,
9810 s390_reg_clobbered_rtx,
9811 regs_ever_clobbered);
9816 /* Determine the frame area which actually has to be accessed
9817 in the function epilogue. The values are stored at the
9818 given pointers AREA_BOTTOM (address of the lowest used stack
9819 address) and AREA_TOP (address of the first item which does
9820 not belong to the stack frame). */
9822 static void
9823 s390_frame_area (int *area_bottom, int *area_top)
9825 int b, t;
9827 b = INT_MAX;
9828 t = INT_MIN;
9830 if (cfun_frame_layout.first_restore_gpr != -1)
9832 b = (cfun_frame_layout.gprs_offset
9833 + cfun_frame_layout.first_restore_gpr * UNITS_PER_LONG);
9834 t = b + (cfun_frame_layout.last_restore_gpr
9835 - cfun_frame_layout.first_restore_gpr + 1) * UNITS_PER_LONG;
9838 if (TARGET_64BIT && cfun_save_high_fprs_p)
9840 b = MIN (b, cfun_frame_layout.f8_offset);
9841 t = MAX (t, (cfun_frame_layout.f8_offset
9842 + cfun_frame_layout.high_fprs * 8));
9845 if (!TARGET_64BIT)
9847 if (cfun_fpr_save_p (FPR4_REGNUM))
9849 b = MIN (b, cfun_frame_layout.f4_offset);
9850 t = MAX (t, cfun_frame_layout.f4_offset + 8);
9852 if (cfun_fpr_save_p (FPR6_REGNUM))
9854 b = MIN (b, cfun_frame_layout.f4_offset + 8);
9855 t = MAX (t, cfun_frame_layout.f4_offset + 16);
9858 *area_bottom = b;
9859 *area_top = t;
9861 /* Update gpr_save_slots in the frame layout trying to make use of
9862 FPRs as GPR save slots.
9863 This is a helper routine of s390_register_info. */
9865 static void
9866 s390_register_info_gprtofpr ()
9868 int save_reg_slot = FPR0_REGNUM;
9869 int i, j;
9871 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
9872 return;
9874 /* builtin_eh_return needs to be able to modify the return address
9875 on the stack. It could also adjust the FPR save slot instead but
9876 is it worth the trouble?! */
9877 if (crtl->calls_eh_return)
9878 return;
9880 for (i = 15; i >= 6; i--)
9882 if (cfun_gpr_save_slot (i) == SAVE_SLOT_NONE)
9883 continue;
9885 /* Advance to the next FP register which can be used as a
9886 GPR save slot. */
9887 while ((!call_really_used_regs[save_reg_slot]
9888 || df_regs_ever_live_p (save_reg_slot)
9889 || cfun_fpr_save_p (save_reg_slot))
9890 && FP_REGNO_P (save_reg_slot))
9891 save_reg_slot++;
9892 if (!FP_REGNO_P (save_reg_slot))
9894 /* We only want to use ldgr/lgdr if we can get rid of
9895 stm/lm entirely. So undo the gpr slot allocation in
9896 case we ran out of FPR save slots. */
9897 for (j = 6; j <= 15; j++)
9898 if (FP_REGNO_P (cfun_gpr_save_slot (j)))
9899 cfun_gpr_save_slot (j) = SAVE_SLOT_STACK;
9900 break;
9902 cfun_gpr_save_slot (i) = save_reg_slot++;
9906 /* Set the bits in fpr_bitmap for FPRs which need to be saved due to
9907 stdarg.
9908 This is a helper routine for s390_register_info. */
9910 static void
9911 s390_register_info_stdarg_fpr ()
9913 int i;
9914 int min_fpr;
9915 int max_fpr;
9917 /* Save the FP argument regs for stdarg. f0, f2 for 31 bit and
9918 f0-f4 for 64 bit. */
9919 if (!cfun->stdarg
9920 || !TARGET_HARD_FLOAT
9921 || !cfun->va_list_fpr_size
9922 || crtl->args.info.fprs >= FP_ARG_NUM_REG)
9923 return;
9925 min_fpr = crtl->args.info.fprs;
9926 max_fpr = min_fpr + cfun->va_list_fpr_size - 1;
9927 if (max_fpr >= FP_ARG_NUM_REG)
9928 max_fpr = FP_ARG_NUM_REG - 1;
9930 /* FPR argument regs start at f0. */
9931 min_fpr += FPR0_REGNUM;
9932 max_fpr += FPR0_REGNUM;
9934 for (i = min_fpr; i <= max_fpr; i++)
9935 cfun_set_fpr_save (i);
9938 /* Reserve the GPR save slots for GPRs which need to be saved due to
9939 stdarg.
9940 This is a helper routine for s390_register_info. */
9942 static void
9943 s390_register_info_stdarg_gpr ()
9945 int i;
9946 int min_gpr;
9947 int max_gpr;
9949 if (!cfun->stdarg
9950 || !cfun->va_list_gpr_size
9951 || crtl->args.info.gprs >= GP_ARG_NUM_REG)
9952 return;
9954 min_gpr = crtl->args.info.gprs;
9955 max_gpr = min_gpr + cfun->va_list_gpr_size - 1;
9956 if (max_gpr >= GP_ARG_NUM_REG)
9957 max_gpr = GP_ARG_NUM_REG - 1;
9959 /* GPR argument regs start at r2. */
9960 min_gpr += GPR2_REGNUM;
9961 max_gpr += GPR2_REGNUM;
9963 /* If r6 was supposed to be saved into an FPR and now needs to go to
9964 the stack for vararg we have to adjust the restore range to make
9965 sure that the restore is done from stack as well. */
9966 if (FP_REGNO_P (cfun_gpr_save_slot (GPR6_REGNUM))
9967 && min_gpr <= GPR6_REGNUM
9968 && max_gpr >= GPR6_REGNUM)
9970 if (cfun_frame_layout.first_restore_gpr == -1
9971 || cfun_frame_layout.first_restore_gpr > GPR6_REGNUM)
9972 cfun_frame_layout.first_restore_gpr = GPR6_REGNUM;
9973 if (cfun_frame_layout.last_restore_gpr == -1
9974 || cfun_frame_layout.last_restore_gpr < GPR6_REGNUM)
9975 cfun_frame_layout.last_restore_gpr = GPR6_REGNUM;
9978 if (cfun_frame_layout.first_save_gpr == -1
9979 || cfun_frame_layout.first_save_gpr > min_gpr)
9980 cfun_frame_layout.first_save_gpr = min_gpr;
9982 if (cfun_frame_layout.last_save_gpr == -1
9983 || cfun_frame_layout.last_save_gpr < max_gpr)
9984 cfun_frame_layout.last_save_gpr = max_gpr;
9986 for (i = min_gpr; i <= max_gpr; i++)
9987 cfun_gpr_save_slot (i) = SAVE_SLOT_STACK;
9990 /* Calculate the save and restore ranges for stm(g) and lm(g) in the
9991 prologue and epilogue. */
9993 static void
9994 s390_register_info_set_ranges ()
9996 int i, j;
9998 /* Find the first and the last save slot supposed to use the stack
9999 to set the restore range.
10000 Vararg regs might be marked as save to stack but only the
10001 call-saved regs really need restoring (i.e. r6). This code
10002 assumes that the vararg regs have not yet been recorded in
10003 cfun_gpr_save_slot. */
10004 for (i = 0; i < 16 && cfun_gpr_save_slot (i) != SAVE_SLOT_STACK; i++);
10005 for (j = 15; j > i && cfun_gpr_save_slot (j) != SAVE_SLOT_STACK; j--);
10006 cfun_frame_layout.first_restore_gpr = (i == 16) ? -1 : i;
10007 cfun_frame_layout.last_restore_gpr = (i == 16) ? -1 : j;
10008 cfun_frame_layout.first_save_gpr = (i == 16) ? -1 : i;
10009 cfun_frame_layout.last_save_gpr = (i == 16) ? -1 : j;
10012 /* The GPR and FPR save slots in cfun->machine->frame_layout are set
10013 for registers which need to be saved in function prologue.
10014 This function can be used until the insns emitted for save/restore
10015 of the regs are visible in the RTL stream. */
10017 static void
10018 s390_register_info ()
10020 int i;
10021 char clobbered_regs[32];
10023 gcc_assert (!epilogue_completed);
10025 if (reload_completed)
10026 /* After reload we rely on our own routine to determine which
10027 registers need saving. */
10028 s390_regs_ever_clobbered (clobbered_regs);
10029 else
10030 /* During reload we use regs_ever_live as a base since reload
10031 does changes in there which we otherwise would not be aware
10032 of. */
10033 for (i = 0; i < 32; i++)
10034 clobbered_regs[i] = df_regs_ever_live_p (i);
10036 for (i = 0; i < 32; i++)
10037 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i];
10039 /* Mark the call-saved FPRs which need to be saved.
10040 This needs to be done before checking the special GPRs since the
10041 stack pointer usage depends on whether high FPRs have to be saved
10042 or not. */
10043 cfun_frame_layout.fpr_bitmap = 0;
10044 cfun_frame_layout.high_fprs = 0;
10045 for (i = FPR0_REGNUM; i <= FPR15_REGNUM; i++)
10046 if (clobbered_regs[i] && !call_really_used_regs[i])
10048 cfun_set_fpr_save (i);
10049 if (i >= FPR8_REGNUM)
10050 cfun_frame_layout.high_fprs++;
10053 /* Register 12 is used for GOT address, but also as temp in prologue
10054 for split-stack stdarg functions (unless r14 is available). */
10055 clobbered_regs[12]
10056 |= ((flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
10057 || (flag_split_stack && cfun->stdarg
10058 && (crtl->is_leaf || TARGET_TPF_PROFILING
10059 || has_hard_reg_initial_val (Pmode, RETURN_REGNUM))));
10061 clobbered_regs[BASE_REGNUM]
10062 |= (cfun->machine->base_reg
10063 && REGNO (cfun->machine->base_reg) == BASE_REGNUM);
10065 clobbered_regs[HARD_FRAME_POINTER_REGNUM]
10066 |= !!frame_pointer_needed;
10068 /* On pre z900 machines this might take until machine dependent
10069 reorg to decide.
10070 save_return_addr_p will only be set on non-zarch machines so
10071 there is no risk that r14 goes into an FPR instead of a stack
10072 slot. */
10073 clobbered_regs[RETURN_REGNUM]
10074 |= (!crtl->is_leaf
10075 || TARGET_TPF_PROFILING
10076 || cfun->machine->split_branches_pending_p
10077 || cfun_frame_layout.save_return_addr_p
10078 || crtl->calls_eh_return);
10080 clobbered_regs[STACK_POINTER_REGNUM]
10081 |= (!crtl->is_leaf
10082 || TARGET_TPF_PROFILING
10083 || cfun_save_high_fprs_p
10084 || get_frame_size () > 0
10085 || (reload_completed && cfun_frame_layout.frame_size > 0)
10086 || cfun->calls_alloca);
10088 memset (cfun_frame_layout.gpr_save_slots, SAVE_SLOT_NONE, 16);
10090 for (i = 6; i < 16; i++)
10091 if (clobbered_regs[i])
10092 cfun_gpr_save_slot (i) = SAVE_SLOT_STACK;
10094 s390_register_info_stdarg_fpr ();
10095 s390_register_info_gprtofpr ();
10096 s390_register_info_set_ranges ();
10097 /* stdarg functions might need to save GPRs 2 to 6. This might
10098 override the GPR->FPR save decision made by
10099 s390_register_info_gprtofpr for r6 since vararg regs must go to
10100 the stack. */
10101 s390_register_info_stdarg_gpr ();
10104 /* This function is called by s390_optimize_prologue in order to get
10105 rid of unnecessary GPR save/restore instructions. The register info
10106 for the GPRs is re-computed and the ranges are re-calculated. */
10108 static void
10109 s390_optimize_register_info ()
10111 char clobbered_regs[32];
10112 int i;
10114 gcc_assert (epilogue_completed);
10115 gcc_assert (!cfun->machine->split_branches_pending_p);
10117 s390_regs_ever_clobbered (clobbered_regs);
10119 for (i = 0; i < 32; i++)
10120 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i];
10122 /* There is still special treatment needed for cases invisible to
10123 s390_regs_ever_clobbered. */
10124 clobbered_regs[RETURN_REGNUM]
10125 |= (TARGET_TPF_PROFILING
10126 /* When expanding builtin_return_addr in ESA mode we do not
10127 know whether r14 will later be needed as scratch reg when
10128 doing branch splitting. So the builtin always accesses the
10129 r14 save slot and we need to stick to the save/restore
10130 decision for r14 even if it turns out that it didn't get
10131 clobbered. */
10132 || cfun_frame_layout.save_return_addr_p
10133 || crtl->calls_eh_return);
10135 memset (cfun_frame_layout.gpr_save_slots, SAVE_SLOT_NONE, 6);
10137 for (i = 6; i < 16; i++)
10138 if (!clobbered_regs[i])
10139 cfun_gpr_save_slot (i) = SAVE_SLOT_NONE;
10141 s390_register_info_set_ranges ();
10142 s390_register_info_stdarg_gpr ();
10145 /* Fill cfun->machine with info about frame of current function. */
10147 static void
10148 s390_frame_info (void)
10150 HOST_WIDE_INT lowest_offset;
10152 cfun_frame_layout.first_save_gpr_slot = cfun_frame_layout.first_save_gpr;
10153 cfun_frame_layout.last_save_gpr_slot = cfun_frame_layout.last_save_gpr;
10155 /* The va_arg builtin uses a constant distance of 16 *
10156 UNITS_PER_LONG (r0-r15) to reach the FPRs from the reg_save_area
10157 pointer. So even if we are going to save the stack pointer in an
10158 FPR we need the stack space in order to keep the offsets
10159 correct. */
10160 if (cfun->stdarg && cfun_save_arg_fprs_p)
10162 cfun_frame_layout.last_save_gpr_slot = STACK_POINTER_REGNUM;
10164 if (cfun_frame_layout.first_save_gpr_slot == -1)
10165 cfun_frame_layout.first_save_gpr_slot = STACK_POINTER_REGNUM;
10168 cfun_frame_layout.frame_size = get_frame_size ();
10169 if (!TARGET_64BIT && cfun_frame_layout.frame_size > 0x7fff0000)
10170 fatal_error (input_location,
10171 "total size of local variables exceeds architecture limit");
10173 if (!TARGET_PACKED_STACK)
10175 /* Fixed stack layout. */
10176 cfun_frame_layout.backchain_offset = 0;
10177 cfun_frame_layout.f0_offset = 16 * UNITS_PER_LONG;
10178 cfun_frame_layout.f4_offset = cfun_frame_layout.f0_offset + 2 * 8;
10179 cfun_frame_layout.f8_offset = -cfun_frame_layout.high_fprs * 8;
10180 cfun_frame_layout.gprs_offset = (cfun_frame_layout.first_save_gpr_slot
10181 * UNITS_PER_LONG);
10183 else if (TARGET_BACKCHAIN)
10185 /* Kernel stack layout - packed stack, backchain, no float */
10186 gcc_assert (TARGET_SOFT_FLOAT);
10187 cfun_frame_layout.backchain_offset = (STACK_POINTER_OFFSET
10188 - UNITS_PER_LONG);
10190 /* The distance between the backchain and the return address
10191 save slot must not change. So we always need a slot for the
10192 stack pointer which resides in between. */
10193 cfun_frame_layout.last_save_gpr_slot = STACK_POINTER_REGNUM;
10195 cfun_frame_layout.gprs_offset
10196 = cfun_frame_layout.backchain_offset - cfun_gprs_save_area_size;
10198 /* FPRs will not be saved. Nevertheless pick sane values to
10199 keep area calculations valid. */
10200 cfun_frame_layout.f0_offset =
10201 cfun_frame_layout.f4_offset =
10202 cfun_frame_layout.f8_offset = cfun_frame_layout.gprs_offset;
10204 else
10206 int num_fprs;
10208 /* Packed stack layout without backchain. */
10210 /* With stdarg FPRs need their dedicated slots. */
10211 num_fprs = (TARGET_64BIT && cfun->stdarg ? 2
10212 : (cfun_fpr_save_p (FPR4_REGNUM) +
10213 cfun_fpr_save_p (FPR6_REGNUM)));
10214 cfun_frame_layout.f4_offset = STACK_POINTER_OFFSET - 8 * num_fprs;
10216 num_fprs = (cfun->stdarg ? 2
10217 : (cfun_fpr_save_p (FPR0_REGNUM)
10218 + cfun_fpr_save_p (FPR2_REGNUM)));
10219 cfun_frame_layout.f0_offset = cfun_frame_layout.f4_offset - 8 * num_fprs;
10221 cfun_frame_layout.gprs_offset
10222 = cfun_frame_layout.f0_offset - cfun_gprs_save_area_size;
10224 cfun_frame_layout.f8_offset = (cfun_frame_layout.gprs_offset
10225 - cfun_frame_layout.high_fprs * 8);
10228 if (cfun_save_high_fprs_p)
10229 cfun_frame_layout.frame_size += cfun_frame_layout.high_fprs * 8;
10231 if (!crtl->is_leaf)
10232 cfun_frame_layout.frame_size += crtl->outgoing_args_size;
10234 /* In the following cases we have to allocate a STACK_POINTER_OFFSET
10235 sized area at the bottom of the stack. This is required also for
10236 leaf functions. When GCC generates a local stack reference it
10237 will always add STACK_POINTER_OFFSET to all these references. */
10238 if (crtl->is_leaf
10239 && !TARGET_TPF_PROFILING
10240 && cfun_frame_layout.frame_size == 0
10241 && !cfun->calls_alloca)
10242 return;
10244 /* Calculate the number of bytes we have used in our own register
10245 save area. With the packed stack layout we can re-use the
10246 remaining bytes for normal stack elements. */
10248 if (TARGET_PACKED_STACK)
10249 lowest_offset = MIN (MIN (cfun_frame_layout.f0_offset,
10250 cfun_frame_layout.f4_offset),
10251 cfun_frame_layout.gprs_offset);
10252 else
10253 lowest_offset = 0;
10255 if (TARGET_BACKCHAIN)
10256 lowest_offset = MIN (lowest_offset, cfun_frame_layout.backchain_offset);
10258 cfun_frame_layout.frame_size += STACK_POINTER_OFFSET - lowest_offset;
10260 /* If under 31 bit an odd number of gprs has to be saved we have to
10261 adjust the frame size to sustain 8 byte alignment of stack
10262 frames. */
10263 cfun_frame_layout.frame_size = ((cfun_frame_layout.frame_size +
10264 STACK_BOUNDARY / BITS_PER_UNIT - 1)
10265 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1));
10268 /* Generate frame layout. Fills in register and frame data for the current
10269 function in cfun->machine. This routine can be called multiple times;
10270 it will re-do the complete frame layout every time. */
10272 static void
10273 s390_init_frame_layout (void)
10275 HOST_WIDE_INT frame_size;
10276 int base_used;
10278 /* After LRA the frame layout is supposed to be read-only and should
10279 not be re-computed. */
10280 if (reload_completed)
10281 return;
10283 /* On S/390 machines, we may need to perform branch splitting, which
10284 will require both base and return address register. We have no
10285 choice but to assume we're going to need them until right at the
10286 end of the machine dependent reorg phase. */
10287 if (!TARGET_CPU_ZARCH)
10288 cfun->machine->split_branches_pending_p = true;
10292 frame_size = cfun_frame_layout.frame_size;
10294 /* Try to predict whether we'll need the base register. */
10295 base_used = cfun->machine->split_branches_pending_p
10296 || crtl->uses_const_pool
10297 || (!DISP_IN_RANGE (frame_size)
10298 && !CONST_OK_FOR_K (frame_size));
10300 /* Decide which register to use as literal pool base. In small
10301 leaf functions, try to use an unused call-clobbered register
10302 as base register to avoid save/restore overhead. */
10303 if (!base_used)
10304 cfun->machine->base_reg = NULL_RTX;
10305 else
10307 int br = 0;
10309 if (crtl->is_leaf)
10310 /* Prefer r5 (most likely to be free). */
10311 for (br = 5; br >= 2 && df_regs_ever_live_p (br); br--)
10313 cfun->machine->base_reg =
10314 gen_rtx_REG (Pmode, (br >= 2) ? br : BASE_REGNUM);
10317 s390_register_info ();
10318 s390_frame_info ();
10320 while (frame_size != cfun_frame_layout.frame_size);
10323 /* Remove the FPR clobbers from a tbegin insn if it can be proven that
10324 the TX is nonescaping. A transaction is considered escaping if
10325 there is at least one path from tbegin returning CC0 to the
10326 function exit block without an tend.
10328 The check so far has some limitations:
10329 - only single tbegin/tend BBs are supported
10330 - the first cond jump after tbegin must separate the CC0 path from ~CC0
10331 - when CC is copied to a GPR and the CC0 check is done with the GPR
10332 this is not supported
10335 static void
10336 s390_optimize_nonescaping_tx (void)
10338 const unsigned int CC0 = 1 << 3;
10339 basic_block tbegin_bb = NULL;
10340 basic_block tend_bb = NULL;
10341 basic_block bb;
10342 rtx_insn *insn;
10343 bool result = true;
10344 int bb_index;
10345 rtx_insn *tbegin_insn = NULL;
10347 if (!cfun->machine->tbegin_p)
10348 return;
10350 for (bb_index = 0; bb_index < n_basic_blocks_for_fn (cfun); bb_index++)
10352 bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
10354 if (!bb)
10355 continue;
10357 FOR_BB_INSNS (bb, insn)
10359 rtx ite, cc, pat, target;
10360 unsigned HOST_WIDE_INT mask;
10362 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
10363 continue;
10365 pat = PATTERN (insn);
10367 if (GET_CODE (pat) == PARALLEL)
10368 pat = XVECEXP (pat, 0, 0);
10370 if (GET_CODE (pat) != SET
10371 || GET_CODE (SET_SRC (pat)) != UNSPEC_VOLATILE)
10372 continue;
10374 if (XINT (SET_SRC (pat), 1) == UNSPECV_TBEGIN)
10376 rtx_insn *tmp;
10378 tbegin_insn = insn;
10380 /* Just return if the tbegin doesn't have clobbers. */
10381 if (GET_CODE (PATTERN (insn)) != PARALLEL)
10382 return;
10384 if (tbegin_bb != NULL)
10385 return;
10387 /* Find the next conditional jump. */
10388 for (tmp = NEXT_INSN (insn);
10389 tmp != NULL_RTX;
10390 tmp = NEXT_INSN (tmp))
10392 if (reg_set_p (gen_rtx_REG (CCmode, CC_REGNUM), tmp))
10393 return;
10394 if (!JUMP_P (tmp))
10395 continue;
10397 ite = SET_SRC (PATTERN (tmp));
10398 if (GET_CODE (ite) != IF_THEN_ELSE)
10399 continue;
10401 cc = XEXP (XEXP (ite, 0), 0);
10402 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc))
10403 || GET_MODE (cc) != CCRAWmode
10404 || GET_CODE (XEXP (XEXP (ite, 0), 1)) != CONST_INT)
10405 return;
10407 if (bb->succs->length () != 2)
10408 return;
10410 mask = INTVAL (XEXP (XEXP (ite, 0), 1));
10411 if (GET_CODE (XEXP (ite, 0)) == NE)
10412 mask ^= 0xf;
10414 if (mask == CC0)
10415 target = XEXP (ite, 1);
10416 else if (mask == (CC0 ^ 0xf))
10417 target = XEXP (ite, 2);
10418 else
10419 return;
10422 edge_iterator ei;
10423 edge e1, e2;
10425 ei = ei_start (bb->succs);
10426 e1 = ei_safe_edge (ei);
10427 ei_next (&ei);
10428 e2 = ei_safe_edge (ei);
10430 if (e2->flags & EDGE_FALLTHRU)
10432 e2 = e1;
10433 e1 = ei_safe_edge (ei);
10436 if (!(e1->flags & EDGE_FALLTHRU))
10437 return;
10439 tbegin_bb = (target == pc_rtx) ? e1->dest : e2->dest;
10441 if (tmp == BB_END (bb))
10442 break;
10446 if (XINT (SET_SRC (pat), 1) == UNSPECV_TEND)
10448 if (tend_bb != NULL)
10449 return;
10450 tend_bb = bb;
10455 /* Either we successfully remove the FPR clobbers here or we are not
10456 able to do anything for this TX. Both cases don't qualify for
10457 another look. */
10458 cfun->machine->tbegin_p = false;
10460 if (tbegin_bb == NULL || tend_bb == NULL)
10461 return;
10463 calculate_dominance_info (CDI_POST_DOMINATORS);
10464 result = dominated_by_p (CDI_POST_DOMINATORS, tbegin_bb, tend_bb);
10465 free_dominance_info (CDI_POST_DOMINATORS);
10467 if (!result)
10468 return;
10470 PATTERN (tbegin_insn) = gen_rtx_PARALLEL (VOIDmode,
10471 gen_rtvec (2,
10472 XVECEXP (PATTERN (tbegin_insn), 0, 0),
10473 XVECEXP (PATTERN (tbegin_insn), 0, 1)));
10474 INSN_CODE (tbegin_insn) = -1;
10475 df_insn_rescan (tbegin_insn);
10477 return;
10480 /* Implement TARGET_HARD_REGNO_NREGS. Because all registers in a class
10481 have the same size, this is equivalent to CLASS_MAX_NREGS. */
10483 static unsigned int
10484 s390_hard_regno_nregs (unsigned int regno, machine_mode mode)
10486 return s390_class_max_nregs (REGNO_REG_CLASS (regno), mode);
10489 /* Implement TARGET_HARD_REGNO_MODE_OK.
10491 Integer modes <= word size fit into any GPR.
10492 Integer modes > word size fit into successive GPRs, starting with
10493 an even-numbered register.
10494 SImode and DImode fit into FPRs as well.
10496 Floating point modes <= word size fit into any FPR or GPR.
10497 Floating point modes > word size (i.e. DFmode on 32-bit) fit
10498 into any FPR, or an even-odd GPR pair.
10499 TFmode fits only into an even-odd FPR pair.
10501 Complex floating point modes fit either into two FPRs, or into
10502 successive GPRs (again starting with an even number).
10503 TCmode fits only into two successive even-odd FPR pairs.
10505 Condition code modes fit only into the CC register. */
10507 static bool
10508 s390_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
10510 if (!TARGET_VX && VECTOR_NOFP_REGNO_P (regno))
10511 return false;
10513 switch (REGNO_REG_CLASS (regno))
10515 case VEC_REGS:
10516 return ((GET_MODE_CLASS (mode) == MODE_INT
10517 && s390_class_max_nregs (VEC_REGS, mode) == 1)
10518 || mode == DFmode
10519 || (TARGET_VXE && mode == SFmode)
10520 || s390_vector_mode_supported_p (mode));
10521 break;
10522 case FP_REGS:
10523 if (TARGET_VX
10524 && ((GET_MODE_CLASS (mode) == MODE_INT
10525 && s390_class_max_nregs (FP_REGS, mode) == 1)
10526 || mode == DFmode
10527 || s390_vector_mode_supported_p (mode)))
10528 return true;
10530 if (REGNO_PAIR_OK (regno, mode))
10532 if (mode == SImode || mode == DImode)
10533 return true;
10535 if (FLOAT_MODE_P (mode) && GET_MODE_CLASS (mode) != MODE_VECTOR_FLOAT)
10536 return true;
10538 break;
10539 case ADDR_REGS:
10540 if (FRAME_REGNO_P (regno) && mode == Pmode)
10541 return true;
10543 /* fallthrough */
10544 case GENERAL_REGS:
10545 if (REGNO_PAIR_OK (regno, mode))
10547 if (TARGET_ZARCH
10548 || (mode != TFmode && mode != TCmode && mode != TDmode))
10549 return true;
10551 break;
10552 case CC_REGS:
10553 if (GET_MODE_CLASS (mode) == MODE_CC)
10554 return true;
10555 break;
10556 case ACCESS_REGS:
10557 if (REGNO_PAIR_OK (regno, mode))
10559 if (mode == SImode || mode == Pmode)
10560 return true;
10562 break;
10563 default:
10564 return false;
10567 return false;
10570 /* Implement TARGET_MODES_TIEABLE_P. */
10572 static bool
10573 s390_modes_tieable_p (machine_mode mode1, machine_mode mode2)
10575 return ((mode1 == SFmode || mode1 == DFmode)
10576 == (mode2 == SFmode || mode2 == DFmode));
10579 /* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */
10581 bool
10582 s390_hard_regno_rename_ok (unsigned int old_reg, unsigned int new_reg)
10584 /* Once we've decided upon a register to use as base register, it must
10585 no longer be used for any other purpose. */
10586 if (cfun->machine->base_reg)
10587 if (REGNO (cfun->machine->base_reg) == old_reg
10588 || REGNO (cfun->machine->base_reg) == new_reg)
10589 return false;
10591 /* Prevent regrename from using call-saved regs which haven't
10592 actually been saved. This is necessary since regrename assumes
10593 the backend save/restore decisions are based on
10594 df_regs_ever_live. Since we have our own routine we have to tell
10595 regrename manually about it. */
10596 if (GENERAL_REGNO_P (new_reg)
10597 && !call_really_used_regs[new_reg]
10598 && cfun_gpr_save_slot (new_reg) == SAVE_SLOT_NONE)
10599 return false;
10601 return true;
10604 /* Return nonzero if register REGNO can be used as a scratch register
10605 in peephole2. */
10607 static bool
10608 s390_hard_regno_scratch_ok (unsigned int regno)
10610 /* See s390_hard_regno_rename_ok. */
10611 if (GENERAL_REGNO_P (regno)
10612 && !call_really_used_regs[regno]
10613 && cfun_gpr_save_slot (regno) == SAVE_SLOT_NONE)
10614 return false;
10616 return true;
10619 /* Implement TARGET_HARD_REGNO_CALL_PART_CLOBBERED. When generating
10620 code that runs in z/Architecture mode, but conforms to the 31-bit
10621 ABI, GPRs can hold 8 bytes; the ABI guarantees only that the lower 4
10622 bytes are saved across calls, however. */
10624 static bool
10625 s390_hard_regno_call_part_clobbered (unsigned int regno, machine_mode mode)
10627 if (!TARGET_64BIT
10628 && TARGET_ZARCH
10629 && GET_MODE_SIZE (mode) > 4
10630 && ((regno >= 6 && regno <= 15) || regno == 32))
10631 return true;
10633 if (TARGET_VX
10634 && GET_MODE_SIZE (mode) > 8
10635 && (((TARGET_64BIT && regno >= 24 && regno <= 31))
10636 || (!TARGET_64BIT && (regno == 18 || regno == 19))))
10637 return true;
10639 return false;
10642 /* Maximum number of registers to represent a value of mode MODE
10643 in a register of class RCLASS. */
10646 s390_class_max_nregs (enum reg_class rclass, machine_mode mode)
10648 int reg_size;
10649 bool reg_pair_required_p = false;
10651 switch (rclass)
10653 case FP_REGS:
10654 case VEC_REGS:
10655 reg_size = TARGET_VX ? 16 : 8;
10657 /* TF and TD modes would fit into a VR but we put them into a
10658 register pair since we do not have 128bit FP instructions on
10659 full VRs. */
10660 if (TARGET_VX
10661 && SCALAR_FLOAT_MODE_P (mode)
10662 && GET_MODE_SIZE (mode) >= 16)
10663 reg_pair_required_p = true;
10665 /* Even if complex types would fit into a single FPR/VR we force
10666 them into a register pair to deal with the parts more easily.
10667 (FIXME: What about complex ints?) */
10668 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
10669 reg_pair_required_p = true;
10670 break;
10671 case ACCESS_REGS:
10672 reg_size = 4;
10673 break;
10674 default:
10675 reg_size = UNITS_PER_WORD;
10676 break;
10679 if (reg_pair_required_p)
10680 return 2 * ((GET_MODE_SIZE (mode) / 2 + reg_size - 1) / reg_size);
10682 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
10685 /* Implement TARGET_CAN_CHANGE_MODE_CLASS. */
10687 static bool
10688 s390_can_change_mode_class (machine_mode from_mode,
10689 machine_mode to_mode,
10690 reg_class_t rclass)
10692 machine_mode small_mode;
10693 machine_mode big_mode;
10695 /* V1TF and TF have different representations in vector
10696 registers. */
10697 if (reg_classes_intersect_p (VEC_REGS, rclass)
10698 && ((from_mode == V1TFmode && to_mode == TFmode)
10699 || (from_mode == TFmode && to_mode == V1TFmode)))
10700 return false;
10702 if (GET_MODE_SIZE (from_mode) == GET_MODE_SIZE (to_mode))
10703 return true;
10705 if (GET_MODE_SIZE (from_mode) < GET_MODE_SIZE (to_mode))
10707 small_mode = from_mode;
10708 big_mode = to_mode;
10710 else
10712 small_mode = to_mode;
10713 big_mode = from_mode;
10716 /* Values residing in VRs are little-endian style. All modes are
10717 placed left-aligned in an VR. This means that we cannot allow
10718 switching between modes with differing sizes. Also if the vector
10719 facility is available we still place TFmode values in VR register
10720 pairs, since the only instructions we have operating on TFmodes
10721 only deal with register pairs. Therefore we have to allow DFmode
10722 subregs of TFmodes to enable the TFmode splitters. */
10723 if (reg_classes_intersect_p (VEC_REGS, rclass)
10724 && (GET_MODE_SIZE (small_mode) < 8
10725 || s390_class_max_nregs (VEC_REGS, big_mode) == 1))
10726 return false;
10728 /* Likewise for access registers, since they have only half the
10729 word size on 64-bit. */
10730 if (reg_classes_intersect_p (ACCESS_REGS, rclass))
10731 return false;
10733 return true;
10736 /* Return true if we use LRA instead of reload pass. */
10737 static bool
10738 s390_lra_p (void)
10740 return s390_lra_flag;
10743 /* Return true if register FROM can be eliminated via register TO. */
10745 static bool
10746 s390_can_eliminate (const int from, const int to)
10748 /* On zSeries machines, we have not marked the base register as fixed.
10749 Instead, we have an elimination rule BASE_REGNUM -> BASE_REGNUM.
10750 If a function requires the base register, we say here that this
10751 elimination cannot be performed. This will cause reload to free
10752 up the base register (as if it were fixed). On the other hand,
10753 if the current function does *not* require the base register, we
10754 say here the elimination succeeds, which in turn allows reload
10755 to allocate the base register for any other purpose. */
10756 if (from == BASE_REGNUM && to == BASE_REGNUM)
10758 if (TARGET_CPU_ZARCH)
10760 s390_init_frame_layout ();
10761 return cfun->machine->base_reg == NULL_RTX;
10764 return false;
10767 /* Everything else must point into the stack frame. */
10768 gcc_assert (to == STACK_POINTER_REGNUM
10769 || to == HARD_FRAME_POINTER_REGNUM);
10771 gcc_assert (from == FRAME_POINTER_REGNUM
10772 || from == ARG_POINTER_REGNUM
10773 || from == RETURN_ADDRESS_POINTER_REGNUM);
10775 /* Make sure we actually saved the return address. */
10776 if (from == RETURN_ADDRESS_POINTER_REGNUM)
10777 if (!crtl->calls_eh_return
10778 && !cfun->stdarg
10779 && !cfun_frame_layout.save_return_addr_p)
10780 return false;
10782 return true;
10785 /* Return offset between register FROM and TO initially after prolog. */
10787 HOST_WIDE_INT
10788 s390_initial_elimination_offset (int from, int to)
10790 HOST_WIDE_INT offset;
10792 /* ??? Why are we called for non-eliminable pairs? */
10793 if (!s390_can_eliminate (from, to))
10794 return 0;
10796 switch (from)
10798 case FRAME_POINTER_REGNUM:
10799 offset = (get_frame_size()
10800 + STACK_POINTER_OFFSET
10801 + crtl->outgoing_args_size);
10802 break;
10804 case ARG_POINTER_REGNUM:
10805 s390_init_frame_layout ();
10806 offset = cfun_frame_layout.frame_size + STACK_POINTER_OFFSET;
10807 break;
10809 case RETURN_ADDRESS_POINTER_REGNUM:
10810 s390_init_frame_layout ();
10812 if (cfun_frame_layout.first_save_gpr_slot == -1)
10814 /* If it turns out that for stdarg nothing went into the reg
10815 save area we also do not need the return address
10816 pointer. */
10817 if (cfun->stdarg && !cfun_save_arg_fprs_p)
10818 return 0;
10820 gcc_unreachable ();
10823 /* In order to make the following work it is not necessary for
10824 r14 to have a save slot. It is sufficient if one other GPR
10825 got one. Since the GPRs are always stored without gaps we
10826 are able to calculate where the r14 save slot would
10827 reside. */
10828 offset = (cfun_frame_layout.frame_size + cfun_frame_layout.gprs_offset +
10829 (RETURN_REGNUM - cfun_frame_layout.first_save_gpr_slot) *
10830 UNITS_PER_LONG);
10831 break;
10833 case BASE_REGNUM:
10834 offset = 0;
10835 break;
10837 default:
10838 gcc_unreachable ();
10841 return offset;
10844 /* Emit insn to save fpr REGNUM at offset OFFSET relative
10845 to register BASE. Return generated insn. */
10847 static rtx
10848 save_fpr (rtx base, int offset, int regnum)
10850 rtx addr;
10851 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
10853 if (regnum >= 16 && regnum <= (16 + FP_ARG_NUM_REG))
10854 set_mem_alias_set (addr, get_varargs_alias_set ());
10855 else
10856 set_mem_alias_set (addr, get_frame_alias_set ());
10858 return emit_move_insn (addr, gen_rtx_REG (DFmode, regnum));
10861 /* Emit insn to restore fpr REGNUM from offset OFFSET relative
10862 to register BASE. Return generated insn. */
10864 static rtx
10865 restore_fpr (rtx base, int offset, int regnum)
10867 rtx addr;
10868 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
10869 set_mem_alias_set (addr, get_frame_alias_set ());
10871 return emit_move_insn (gen_rtx_REG (DFmode, regnum), addr);
10874 /* Return true if REGNO is a global register, but not one
10875 of the special ones that need to be saved/restored in anyway. */
10877 static inline bool
10878 global_not_special_regno_p (int regno)
10880 return (global_regs[regno]
10881 /* These registers are special and need to be
10882 restored in any case. */
10883 && !(regno == STACK_POINTER_REGNUM
10884 || regno == RETURN_REGNUM
10885 || regno == BASE_REGNUM
10886 || (flag_pic && regno == (int)PIC_OFFSET_TABLE_REGNUM)));
10889 /* Generate insn to save registers FIRST to LAST into
10890 the register save area located at offset OFFSET
10891 relative to register BASE. */
10893 static rtx
10894 save_gprs (rtx base, int offset, int first, int last)
10896 rtx addr, insn, note;
10897 int i;
10899 addr = plus_constant (Pmode, base, offset);
10900 addr = gen_rtx_MEM (Pmode, addr);
10902 set_mem_alias_set (addr, get_frame_alias_set ());
10904 /* Special-case single register. */
10905 if (first == last)
10907 if (TARGET_64BIT)
10908 insn = gen_movdi (addr, gen_rtx_REG (Pmode, first));
10909 else
10910 insn = gen_movsi (addr, gen_rtx_REG (Pmode, first));
10912 if (!global_not_special_regno_p (first))
10913 RTX_FRAME_RELATED_P (insn) = 1;
10914 return insn;
10918 insn = gen_store_multiple (addr,
10919 gen_rtx_REG (Pmode, first),
10920 GEN_INT (last - first + 1));
10922 if (first <= 6 && cfun->stdarg)
10923 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
10925 rtx mem = XEXP (XVECEXP (PATTERN (insn), 0, i), 0);
10927 if (first + i <= 6)
10928 set_mem_alias_set (mem, get_varargs_alias_set ());
10931 /* We need to set the FRAME_RELATED flag on all SETs
10932 inside the store-multiple pattern.
10934 However, we must not emit DWARF records for registers 2..5
10935 if they are stored for use by variable arguments ...
10937 ??? Unfortunately, it is not enough to simply not the
10938 FRAME_RELATED flags for those SETs, because the first SET
10939 of the PARALLEL is always treated as if it had the flag
10940 set, even if it does not. Therefore we emit a new pattern
10941 without those registers as REG_FRAME_RELATED_EXPR note. */
10943 if (first >= 6 && !global_not_special_regno_p (first))
10945 rtx pat = PATTERN (insn);
10947 for (i = 0; i < XVECLEN (pat, 0); i++)
10948 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
10949 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (pat,
10950 0, i)))))
10951 RTX_FRAME_RELATED_P (XVECEXP (pat, 0, i)) = 1;
10953 RTX_FRAME_RELATED_P (insn) = 1;
10955 else if (last >= 6)
10957 int start;
10959 for (start = first >= 6 ? first : 6; start <= last; start++)
10960 if (!global_not_special_regno_p (start))
10961 break;
10963 if (start > last)
10964 return insn;
10966 addr = plus_constant (Pmode, base,
10967 offset + (start - first) * UNITS_PER_LONG);
10969 if (start == last)
10971 if (TARGET_64BIT)
10972 note = gen_movdi (gen_rtx_MEM (Pmode, addr),
10973 gen_rtx_REG (Pmode, start));
10974 else
10975 note = gen_movsi (gen_rtx_MEM (Pmode, addr),
10976 gen_rtx_REG (Pmode, start));
10977 note = PATTERN (note);
10979 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
10980 RTX_FRAME_RELATED_P (insn) = 1;
10982 return insn;
10985 note = gen_store_multiple (gen_rtx_MEM (Pmode, addr),
10986 gen_rtx_REG (Pmode, start),
10987 GEN_INT (last - start + 1));
10988 note = PATTERN (note);
10990 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
10992 for (i = 0; i < XVECLEN (note, 0); i++)
10993 if (GET_CODE (XVECEXP (note, 0, i)) == SET
10994 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (note,
10995 0, i)))))
10996 RTX_FRAME_RELATED_P (XVECEXP (note, 0, i)) = 1;
10998 RTX_FRAME_RELATED_P (insn) = 1;
11001 return insn;
11004 /* Generate insn to restore registers FIRST to LAST from
11005 the register save area located at offset OFFSET
11006 relative to register BASE. */
11008 static rtx
11009 restore_gprs (rtx base, int offset, int first, int last)
11011 rtx addr, insn;
11013 addr = plus_constant (Pmode, base, offset);
11014 addr = gen_rtx_MEM (Pmode, addr);
11015 set_mem_alias_set (addr, get_frame_alias_set ());
11017 /* Special-case single register. */
11018 if (first == last)
11020 if (TARGET_64BIT)
11021 insn = gen_movdi (gen_rtx_REG (Pmode, first), addr);
11022 else
11023 insn = gen_movsi (gen_rtx_REG (Pmode, first), addr);
11025 RTX_FRAME_RELATED_P (insn) = 1;
11026 return insn;
11029 insn = gen_load_multiple (gen_rtx_REG (Pmode, first),
11030 addr,
11031 GEN_INT (last - first + 1));
11032 RTX_FRAME_RELATED_P (insn) = 1;
11033 return insn;
11036 /* Return insn sequence to load the GOT register. */
11038 rtx_insn *
11039 s390_load_got (void)
11041 rtx_insn *insns;
11043 /* We cannot use pic_offset_table_rtx here since we use this
11044 function also for non-pic if __tls_get_offset is called and in
11045 that case PIC_OFFSET_TABLE_REGNUM as well as pic_offset_table_rtx
11046 aren't usable. */
11047 rtx got_rtx = gen_rtx_REG (Pmode, 12);
11049 start_sequence ();
11051 if (TARGET_CPU_ZARCH)
11053 emit_move_insn (got_rtx, s390_got_symbol ());
11055 else
11057 rtx offset;
11059 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, s390_got_symbol ()),
11060 UNSPEC_LTREL_OFFSET);
11061 offset = gen_rtx_CONST (Pmode, offset);
11062 offset = force_const_mem (Pmode, offset);
11064 emit_move_insn (got_rtx, offset);
11066 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (offset, 0)),
11067 UNSPEC_LTREL_BASE);
11068 offset = gen_rtx_PLUS (Pmode, got_rtx, offset);
11070 emit_move_insn (got_rtx, offset);
11073 insns = get_insns ();
11074 end_sequence ();
11075 return insns;
11078 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
11079 and the change to the stack pointer. */
11081 static void
11082 s390_emit_stack_tie (void)
11084 rtx mem = gen_frame_mem (BLKmode,
11085 gen_rtx_REG (Pmode, STACK_POINTER_REGNUM));
11087 emit_insn (gen_stack_tie (mem));
11090 /* Copy GPRS into FPR save slots. */
11092 static void
11093 s390_save_gprs_to_fprs (void)
11095 int i;
11097 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
11098 return;
11100 for (i = 6; i < 16; i++)
11102 if (FP_REGNO_P (cfun_gpr_save_slot (i)))
11104 rtx_insn *insn =
11105 emit_move_insn (gen_rtx_REG (DImode, cfun_gpr_save_slot (i)),
11106 gen_rtx_REG (DImode, i));
11107 RTX_FRAME_RELATED_P (insn) = 1;
11108 /* This prevents dwarf2cfi from interpreting the set. Doing
11109 so it might emit def_cfa_register infos setting an FPR as
11110 new CFA. */
11111 add_reg_note (insn, REG_CFA_REGISTER, copy_rtx (PATTERN (insn)));
11116 /* Restore GPRs from FPR save slots. */
11118 static void
11119 s390_restore_gprs_from_fprs (void)
11121 int i;
11123 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
11124 return;
11126 for (i = 6; i < 16; i++)
11128 rtx_insn *insn;
11130 if (!FP_REGNO_P (cfun_gpr_save_slot (i)))
11131 continue;
11133 rtx fpr = gen_rtx_REG (DImode, cfun_gpr_save_slot (i));
11135 if (i == STACK_POINTER_REGNUM)
11136 insn = emit_insn (gen_stack_restore_from_fpr (fpr));
11137 else
11138 insn = emit_move_insn (gen_rtx_REG (DImode, i), fpr);
11140 df_set_regs_ever_live (i, true);
11141 add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DImode, i));
11142 if (i == STACK_POINTER_REGNUM)
11143 add_reg_note (insn, REG_CFA_DEF_CFA,
11144 plus_constant (Pmode, stack_pointer_rtx,
11145 STACK_POINTER_OFFSET));
11146 RTX_FRAME_RELATED_P (insn) = 1;
11151 /* A pass run immediately before shrink-wrapping and prologue and epilogue
11152 generation. */
11154 namespace {
11156 const pass_data pass_data_s390_early_mach =
11158 RTL_PASS, /* type */
11159 "early_mach", /* name */
11160 OPTGROUP_NONE, /* optinfo_flags */
11161 TV_MACH_DEP, /* tv_id */
11162 0, /* properties_required */
11163 0, /* properties_provided */
11164 0, /* properties_destroyed */
11165 0, /* todo_flags_start */
11166 ( TODO_df_verify | TODO_df_finish ), /* todo_flags_finish */
11169 class pass_s390_early_mach : public rtl_opt_pass
11171 public:
11172 pass_s390_early_mach (gcc::context *ctxt)
11173 : rtl_opt_pass (pass_data_s390_early_mach, ctxt)
11176 /* opt_pass methods: */
11177 virtual unsigned int execute (function *);
11179 }; // class pass_s390_early_mach
11181 unsigned int
11182 pass_s390_early_mach::execute (function *fun)
11184 rtx_insn *insn;
11186 /* Try to get rid of the FPR clobbers. */
11187 s390_optimize_nonescaping_tx ();
11189 /* Re-compute register info. */
11190 s390_register_info ();
11192 /* If we're using a base register, ensure that it is always valid for
11193 the first non-prologue instruction. */
11194 if (fun->machine->base_reg)
11195 emit_insn_at_entry (gen_main_pool (fun->machine->base_reg));
11197 /* Annotate all constant pool references to let the scheduler know
11198 they implicitly use the base register. */
11199 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
11200 if (INSN_P (insn))
11202 annotate_constant_pool_refs (&PATTERN (insn));
11203 df_insn_rescan (insn);
11205 return 0;
11208 } // anon namespace
11210 /* Calculate TARGET = REG + OFFSET as s390_emit_prologue would do it.
11211 - push too big immediates to the literal pool and annotate the refs
11212 - emit frame related notes for stack pointer changes. */
11214 static rtx
11215 s390_prologue_plus_offset (rtx target, rtx reg, rtx offset, bool frame_related_p)
11217 rtx insn;
11218 rtx orig_offset = offset;
11220 gcc_assert (REG_P (target));
11221 gcc_assert (REG_P (reg));
11222 gcc_assert (CONST_INT_P (offset));
11224 if (offset == const0_rtx) /* lr/lgr */
11226 insn = emit_move_insn (target, reg);
11228 else if (DISP_IN_RANGE (INTVAL (offset))) /* la */
11230 insn = emit_move_insn (target, gen_rtx_PLUS (Pmode, reg,
11231 offset));
11233 else
11235 if (!satisfies_constraint_K (offset) /* ahi/aghi */
11236 && (!TARGET_EXTIMM
11237 || (!satisfies_constraint_Op (offset) /* alfi/algfi */
11238 && !satisfies_constraint_On (offset)))) /* slfi/slgfi */
11239 offset = force_const_mem (Pmode, offset);
11241 if (target != reg)
11243 insn = emit_move_insn (target, reg);
11244 RTX_FRAME_RELATED_P (insn) = frame_related_p ? 1 : 0;
11247 insn = emit_insn (gen_add2_insn (target, offset));
11249 if (!CONST_INT_P (offset))
11251 annotate_constant_pool_refs (&PATTERN (insn));
11253 if (frame_related_p)
11254 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
11255 gen_rtx_SET (target,
11256 gen_rtx_PLUS (Pmode, target,
11257 orig_offset)));
11261 RTX_FRAME_RELATED_P (insn) = frame_related_p ? 1 : 0;
11263 /* If this is a stack adjustment and we are generating a stack clash
11264 prologue, then add a REG_STACK_CHECK note to signal that this insn
11265 should be left alone. */
11266 if (flag_stack_clash_protection && target == stack_pointer_rtx)
11267 add_reg_note (insn, REG_STACK_CHECK, const0_rtx);
11269 return insn;
11272 /* Emit a compare instruction with a volatile memory access as stack
11273 probe. It does not waste store tags and does not clobber any
11274 registers apart from the condition code. */
11275 static void
11276 s390_emit_stack_probe (rtx addr)
11278 rtx tmp = gen_rtx_MEM (Pmode, addr);
11279 MEM_VOLATILE_P (tmp) = 1;
11280 s390_emit_compare (EQ, gen_rtx_REG (Pmode, 0), tmp);
11281 emit_insn (gen_blockage ());
11284 /* Use a runtime loop if we have to emit more probes than this. */
11285 #define MIN_UNROLL_PROBES 3
11287 /* Allocate SIZE bytes of stack space, using TEMP_REG as a temporary
11288 if necessary. LAST_PROBE_OFFSET contains the offset of the closest
11289 probe relative to the stack pointer.
11291 Note that SIZE is negative.
11293 The return value is true if TEMP_REG has been clobbered. */
11294 static bool
11295 allocate_stack_space (rtx size, HOST_WIDE_INT last_probe_offset,
11296 rtx temp_reg)
11298 bool temp_reg_clobbered_p = false;
11299 HOST_WIDE_INT probe_interval
11300 = 1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL);
11301 HOST_WIDE_INT guard_size
11302 = 1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE);
11304 if (flag_stack_clash_protection)
11306 if (last_probe_offset + -INTVAL (size) < guard_size)
11307 dump_stack_clash_frame_info (NO_PROBE_SMALL_FRAME, true);
11308 else
11310 rtx offset = GEN_INT (probe_interval - UNITS_PER_LONG);
11311 HOST_WIDE_INT rounded_size = -INTVAL (size) & -probe_interval;
11312 HOST_WIDE_INT num_probes = rounded_size / probe_interval;
11313 HOST_WIDE_INT residual = -INTVAL (size) - rounded_size;
11315 if (num_probes < MIN_UNROLL_PROBES)
11317 /* Emit unrolled probe statements. */
11319 for (unsigned int i = 0; i < num_probes; i++)
11321 s390_prologue_plus_offset (stack_pointer_rtx,
11322 stack_pointer_rtx,
11323 GEN_INT (-probe_interval), true);
11324 s390_emit_stack_probe (gen_rtx_PLUS (Pmode,
11325 stack_pointer_rtx,
11326 offset));
11328 dump_stack_clash_frame_info (PROBE_INLINE, residual != 0);
11330 else
11332 /* Emit a loop probing the pages. */
11334 rtx_code_label *loop_start_label = gen_label_rtx ();
11336 /* From now on temp_reg will be the CFA register. */
11337 s390_prologue_plus_offset (temp_reg, stack_pointer_rtx,
11338 GEN_INT (-rounded_size), true);
11339 emit_label (loop_start_label);
11341 s390_prologue_plus_offset (stack_pointer_rtx,
11342 stack_pointer_rtx,
11343 GEN_INT (-probe_interval), false);
11344 s390_emit_stack_probe (gen_rtx_PLUS (Pmode,
11345 stack_pointer_rtx,
11346 offset));
11347 emit_cmp_and_jump_insns (stack_pointer_rtx, temp_reg,
11348 GT, NULL_RTX,
11349 Pmode, 1, loop_start_label);
11351 /* Without this make_edges ICEes. */
11352 JUMP_LABEL (get_last_insn ()) = loop_start_label;
11353 LABEL_NUSES (loop_start_label) = 1;
11355 /* That's going to be a NOP since stack pointer and
11356 temp_reg are supposed to be the same here. We just
11357 emit it to set the CFA reg back to r15. */
11358 s390_prologue_plus_offset (stack_pointer_rtx, temp_reg,
11359 const0_rtx, true);
11360 temp_reg_clobbered_p = true;
11361 dump_stack_clash_frame_info (PROBE_LOOP, residual != 0);
11364 /* Handle any residual allocation request. */
11365 s390_prologue_plus_offset (stack_pointer_rtx,
11366 stack_pointer_rtx,
11367 GEN_INT (-residual), true);
11368 last_probe_offset += residual;
11369 if (last_probe_offset >= probe_interval)
11370 s390_emit_stack_probe (gen_rtx_PLUS (Pmode,
11371 stack_pointer_rtx,
11372 GEN_INT (residual
11373 - UNITS_PER_LONG)));
11375 return temp_reg_clobbered_p;
11379 /* Subtract frame size from stack pointer. */
11380 s390_prologue_plus_offset (stack_pointer_rtx,
11381 stack_pointer_rtx,
11382 size, true);
11384 return temp_reg_clobbered_p;
11387 /* Expand the prologue into a bunch of separate insns. */
11389 void
11390 s390_emit_prologue (void)
11392 rtx insn, addr;
11393 rtx temp_reg;
11394 int i;
11395 int offset;
11396 int next_fpr = 0;
11398 /* Choose best register to use for temp use within prologue.
11399 TPF with profiling must avoid the register 14 - the tracing function
11400 needs the original contents of r14 to be preserved. */
11402 if (!has_hard_reg_initial_val (Pmode, RETURN_REGNUM)
11403 && !crtl->is_leaf
11404 && !TARGET_TPF_PROFILING)
11405 temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
11406 else if (flag_split_stack && cfun->stdarg)
11407 temp_reg = gen_rtx_REG (Pmode, 12);
11408 else
11409 temp_reg = gen_rtx_REG (Pmode, 1);
11411 /* When probing for stack-clash mitigation, we have to track the distance
11412 between the stack pointer and closest known reference.
11414 Most of the time we have to make a worst case assumption. The
11415 only exception is when TARGET_BACKCHAIN is active, in which case
11416 we know *sp (offset 0) was written. */
11417 HOST_WIDE_INT probe_interval
11418 = 1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL);
11419 HOST_WIDE_INT last_probe_offset
11420 = (TARGET_BACKCHAIN
11421 ? (TARGET_PACKED_STACK ? STACK_POINTER_OFFSET - UNITS_PER_LONG : 0)
11422 : probe_interval - (STACK_BOUNDARY / UNITS_PER_WORD));
11424 s390_save_gprs_to_fprs ();
11426 /* Save call saved gprs. */
11427 if (cfun_frame_layout.first_save_gpr != -1)
11429 insn = save_gprs (stack_pointer_rtx,
11430 cfun_frame_layout.gprs_offset +
11431 UNITS_PER_LONG * (cfun_frame_layout.first_save_gpr
11432 - cfun_frame_layout.first_save_gpr_slot),
11433 cfun_frame_layout.first_save_gpr,
11434 cfun_frame_layout.last_save_gpr);
11436 /* This is not 100% correct. If we have more than one register saved,
11437 then LAST_PROBE_OFFSET can move even closer to sp. */
11438 last_probe_offset
11439 = (cfun_frame_layout.gprs_offset +
11440 UNITS_PER_LONG * (cfun_frame_layout.first_save_gpr
11441 - cfun_frame_layout.first_save_gpr_slot));
11443 emit_insn (insn);
11446 /* Dummy insn to mark literal pool slot. */
11448 if (cfun->machine->base_reg)
11449 emit_insn (gen_main_pool (cfun->machine->base_reg));
11451 offset = cfun_frame_layout.f0_offset;
11453 /* Save f0 and f2. */
11454 for (i = FPR0_REGNUM; i <= FPR0_REGNUM + 1; i++)
11456 if (cfun_fpr_save_p (i))
11458 save_fpr (stack_pointer_rtx, offset, i);
11459 if (offset < last_probe_offset)
11460 last_probe_offset = offset;
11461 offset += 8;
11463 else if (!TARGET_PACKED_STACK || cfun->stdarg)
11464 offset += 8;
11467 /* Save f4 and f6. */
11468 offset = cfun_frame_layout.f4_offset;
11469 for (i = FPR4_REGNUM; i <= FPR4_REGNUM + 1; i++)
11471 if (cfun_fpr_save_p (i))
11473 insn = save_fpr (stack_pointer_rtx, offset, i);
11474 if (offset < last_probe_offset)
11475 last_probe_offset = offset;
11476 offset += 8;
11478 /* If f4 and f6 are call clobbered they are saved due to
11479 stdargs and therefore are not frame related. */
11480 if (!call_really_used_regs[i])
11481 RTX_FRAME_RELATED_P (insn) = 1;
11483 else if (!TARGET_PACKED_STACK || call_really_used_regs[i])
11484 offset += 8;
11487 if (TARGET_PACKED_STACK
11488 && cfun_save_high_fprs_p
11489 && cfun_frame_layout.f8_offset + cfun_frame_layout.high_fprs * 8 > 0)
11491 offset = (cfun_frame_layout.f8_offset
11492 + (cfun_frame_layout.high_fprs - 1) * 8);
11494 for (i = FPR15_REGNUM; i >= FPR8_REGNUM && offset >= 0; i--)
11495 if (cfun_fpr_save_p (i))
11497 insn = save_fpr (stack_pointer_rtx, offset, i);
11498 if (offset < last_probe_offset)
11499 last_probe_offset = offset;
11501 RTX_FRAME_RELATED_P (insn) = 1;
11502 offset -= 8;
11504 if (offset >= cfun_frame_layout.f8_offset)
11505 next_fpr = i;
11508 if (!TARGET_PACKED_STACK)
11509 next_fpr = cfun_save_high_fprs_p ? FPR15_REGNUM : 0;
11511 if (flag_stack_usage_info)
11512 current_function_static_stack_size = cfun_frame_layout.frame_size;
11514 /* Decrement stack pointer. */
11516 if (cfun_frame_layout.frame_size > 0)
11518 rtx frame_off = GEN_INT (-cfun_frame_layout.frame_size);
11519 rtx_insn *stack_pointer_backup_loc;
11520 bool temp_reg_clobbered_p;
11522 if (s390_stack_size)
11524 HOST_WIDE_INT stack_guard;
11526 if (s390_stack_guard)
11527 stack_guard = s390_stack_guard;
11528 else
11530 /* If no value for stack guard is provided the smallest power of 2
11531 larger than the current frame size is chosen. */
11532 stack_guard = 1;
11533 while (stack_guard < cfun_frame_layout.frame_size)
11534 stack_guard <<= 1;
11537 if (cfun_frame_layout.frame_size >= s390_stack_size)
11539 warning (0, "frame size of function %qs is %wd"
11540 " bytes exceeding user provided stack limit of "
11541 "%d bytes. "
11542 "An unconditional trap is added.",
11543 current_function_name(), cfun_frame_layout.frame_size,
11544 s390_stack_size);
11545 emit_insn (gen_trap ());
11546 emit_barrier ();
11548 else
11550 /* stack_guard has to be smaller than s390_stack_size.
11551 Otherwise we would emit an AND with zero which would
11552 not match the test under mask pattern. */
11553 if (stack_guard >= s390_stack_size)
11555 warning (0, "frame size of function %qs is %wd"
11556 " bytes which is more than half the stack size. "
11557 "The dynamic check would not be reliable. "
11558 "No check emitted for this function.",
11559 current_function_name(),
11560 cfun_frame_layout.frame_size);
11562 else
11564 HOST_WIDE_INT stack_check_mask = ((s390_stack_size - 1)
11565 & ~(stack_guard - 1));
11567 rtx t = gen_rtx_AND (Pmode, stack_pointer_rtx,
11568 GEN_INT (stack_check_mask));
11569 if (TARGET_64BIT)
11570 emit_insn (gen_ctrapdi4 (gen_rtx_EQ (VOIDmode,
11571 t, const0_rtx),
11572 t, const0_rtx, const0_rtx));
11573 else
11574 emit_insn (gen_ctrapsi4 (gen_rtx_EQ (VOIDmode,
11575 t, const0_rtx),
11576 t, const0_rtx, const0_rtx));
11581 if (s390_warn_framesize > 0
11582 && cfun_frame_layout.frame_size >= s390_warn_framesize)
11583 warning (0, "frame size of %qs is %wd bytes",
11584 current_function_name (), cfun_frame_layout.frame_size);
11586 if (s390_warn_dynamicstack_p && cfun->calls_alloca)
11587 warning (0, "%qs uses dynamic stack allocation", current_function_name ());
11589 /* Save the location where we could backup the incoming stack
11590 pointer. */
11591 stack_pointer_backup_loc = get_last_insn ();
11593 temp_reg_clobbered_p = allocate_stack_space (frame_off, last_probe_offset,
11594 temp_reg);
11596 if (TARGET_BACKCHAIN || next_fpr)
11598 if (temp_reg_clobbered_p)
11600 /* allocate_stack_space had to make use of temp_reg and
11601 we need it to hold a backup of the incoming stack
11602 pointer. Calculate back that value from the current
11603 stack pointer. */
11604 s390_prologue_plus_offset (temp_reg, stack_pointer_rtx,
11605 GEN_INT (cfun_frame_layout.frame_size),
11606 false);
11608 else
11610 /* allocate_stack_space didn't actually required
11611 temp_reg. Insert the stack pointer backup insn
11612 before the stack pointer decrement code - knowing now
11613 that the value will survive. */
11614 emit_insn_after (gen_move_insn (temp_reg, stack_pointer_rtx),
11615 stack_pointer_backup_loc);
11619 /* Set backchain. */
11621 if (TARGET_BACKCHAIN)
11623 if (cfun_frame_layout.backchain_offset)
11624 addr = gen_rtx_MEM (Pmode,
11625 plus_constant (Pmode, stack_pointer_rtx,
11626 cfun_frame_layout.backchain_offset));
11627 else
11628 addr = gen_rtx_MEM (Pmode, stack_pointer_rtx);
11629 set_mem_alias_set (addr, get_frame_alias_set ());
11630 insn = emit_insn (gen_move_insn (addr, temp_reg));
11633 /* If we support non-call exceptions (e.g. for Java),
11634 we need to make sure the backchain pointer is set up
11635 before any possibly trapping memory access. */
11636 if (TARGET_BACKCHAIN && cfun->can_throw_non_call_exceptions)
11638 addr = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode));
11639 emit_clobber (addr);
11642 else if (flag_stack_clash_protection)
11643 dump_stack_clash_frame_info (NO_PROBE_NO_FRAME, false);
11645 /* Save fprs 8 - 15 (64 bit ABI). */
11647 if (cfun_save_high_fprs_p && next_fpr)
11649 /* If the stack might be accessed through a different register
11650 we have to make sure that the stack pointer decrement is not
11651 moved below the use of the stack slots. */
11652 s390_emit_stack_tie ();
11654 insn = emit_insn (gen_add2_insn (temp_reg,
11655 GEN_INT (cfun_frame_layout.f8_offset)));
11657 offset = 0;
11659 for (i = FPR8_REGNUM; i <= next_fpr; i++)
11660 if (cfun_fpr_save_p (i))
11662 rtx addr = plus_constant (Pmode, stack_pointer_rtx,
11663 cfun_frame_layout.frame_size
11664 + cfun_frame_layout.f8_offset
11665 + offset);
11667 insn = save_fpr (temp_reg, offset, i);
11668 offset += 8;
11669 RTX_FRAME_RELATED_P (insn) = 1;
11670 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
11671 gen_rtx_SET (gen_rtx_MEM (DFmode, addr),
11672 gen_rtx_REG (DFmode, i)));
11676 /* Set frame pointer, if needed. */
11678 if (frame_pointer_needed)
11680 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
11681 RTX_FRAME_RELATED_P (insn) = 1;
11684 /* Set up got pointer, if needed. */
11686 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
11688 rtx_insn *insns = s390_load_got ();
11690 for (rtx_insn *insn = insns; insn; insn = NEXT_INSN (insn))
11691 annotate_constant_pool_refs (&PATTERN (insn));
11693 emit_insn (insns);
11696 if (TARGET_TPF_PROFILING)
11698 /* Generate a BAS instruction to serve as a function
11699 entry intercept to facilitate the use of tracing
11700 algorithms located at the branch target. */
11701 emit_insn (gen_prologue_tpf ());
11703 /* Emit a blockage here so that all code
11704 lies between the profiling mechanisms. */
11705 emit_insn (gen_blockage ());
11709 /* Expand the epilogue into a bunch of separate insns. */
11711 void
11712 s390_emit_epilogue (bool sibcall)
11714 rtx frame_pointer, return_reg, cfa_restores = NULL_RTX;
11715 int area_bottom, area_top, offset = 0;
11716 int next_offset;
11717 int i;
11719 if (TARGET_TPF_PROFILING)
11722 /* Generate a BAS instruction to serve as a function
11723 entry intercept to facilitate the use of tracing
11724 algorithms located at the branch target. */
11726 /* Emit a blockage here so that all code
11727 lies between the profiling mechanisms. */
11728 emit_insn (gen_blockage ());
11730 emit_insn (gen_epilogue_tpf ());
11733 /* Check whether to use frame or stack pointer for restore. */
11735 frame_pointer = (frame_pointer_needed
11736 ? hard_frame_pointer_rtx : stack_pointer_rtx);
11738 s390_frame_area (&area_bottom, &area_top);
11740 /* Check whether we can access the register save area.
11741 If not, increment the frame pointer as required. */
11743 if (area_top <= area_bottom)
11745 /* Nothing to restore. */
11747 else if (DISP_IN_RANGE (cfun_frame_layout.frame_size + area_bottom)
11748 && DISP_IN_RANGE (cfun_frame_layout.frame_size + area_top - 1))
11750 /* Area is in range. */
11751 offset = cfun_frame_layout.frame_size;
11753 else
11755 rtx insn, frame_off, cfa;
11757 offset = area_bottom < 0 ? -area_bottom : 0;
11758 frame_off = GEN_INT (cfun_frame_layout.frame_size - offset);
11760 cfa = gen_rtx_SET (frame_pointer,
11761 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
11762 if (DISP_IN_RANGE (INTVAL (frame_off)))
11764 insn = gen_rtx_SET (frame_pointer,
11765 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
11766 insn = emit_insn (insn);
11768 else
11770 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
11771 frame_off = force_const_mem (Pmode, frame_off);
11773 insn = emit_insn (gen_add2_insn (frame_pointer, frame_off));
11774 annotate_constant_pool_refs (&PATTERN (insn));
11776 add_reg_note (insn, REG_CFA_ADJUST_CFA, cfa);
11777 RTX_FRAME_RELATED_P (insn) = 1;
11780 /* Restore call saved fprs. */
11782 if (TARGET_64BIT)
11784 if (cfun_save_high_fprs_p)
11786 next_offset = cfun_frame_layout.f8_offset;
11787 for (i = FPR8_REGNUM; i <= FPR15_REGNUM; i++)
11789 if (cfun_fpr_save_p (i))
11791 restore_fpr (frame_pointer,
11792 offset + next_offset, i);
11793 cfa_restores
11794 = alloc_reg_note (REG_CFA_RESTORE,
11795 gen_rtx_REG (DFmode, i), cfa_restores);
11796 next_offset += 8;
11802 else
11804 next_offset = cfun_frame_layout.f4_offset;
11805 /* f4, f6 */
11806 for (i = FPR4_REGNUM; i <= FPR4_REGNUM + 1; i++)
11808 if (cfun_fpr_save_p (i))
11810 restore_fpr (frame_pointer,
11811 offset + next_offset, i);
11812 cfa_restores
11813 = alloc_reg_note (REG_CFA_RESTORE,
11814 gen_rtx_REG (DFmode, i), cfa_restores);
11815 next_offset += 8;
11817 else if (!TARGET_PACKED_STACK)
11818 next_offset += 8;
11823 /* Return register. */
11825 return_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
11827 /* Restore call saved gprs. */
11829 if (cfun_frame_layout.first_restore_gpr != -1)
11831 rtx insn, addr;
11832 int i;
11834 /* Check for global register and save them
11835 to stack location from where they get restored. */
11837 for (i = cfun_frame_layout.first_restore_gpr;
11838 i <= cfun_frame_layout.last_restore_gpr;
11839 i++)
11841 if (global_not_special_regno_p (i))
11843 addr = plus_constant (Pmode, frame_pointer,
11844 offset + cfun_frame_layout.gprs_offset
11845 + (i - cfun_frame_layout.first_save_gpr_slot)
11846 * UNITS_PER_LONG);
11847 addr = gen_rtx_MEM (Pmode, addr);
11848 set_mem_alias_set (addr, get_frame_alias_set ());
11849 emit_move_insn (addr, gen_rtx_REG (Pmode, i));
11851 else
11852 cfa_restores
11853 = alloc_reg_note (REG_CFA_RESTORE,
11854 gen_rtx_REG (Pmode, i), cfa_restores);
11857 /* Fetch return address from stack before load multiple,
11858 this will do good for scheduling.
11860 Only do this if we already decided that r14 needs to be
11861 saved to a stack slot. (And not just because r14 happens to
11862 be in between two GPRs which need saving.) Otherwise it
11863 would be difficult to take that decision back in
11864 s390_optimize_prologue.
11866 This optimization is only helpful on in-order machines. */
11867 if (! sibcall
11868 && cfun_gpr_save_slot (RETURN_REGNUM) == SAVE_SLOT_STACK
11869 && s390_tune <= PROCESSOR_2097_Z10)
11871 int return_regnum = find_unused_clobbered_reg();
11872 if (!return_regnum
11873 || (TARGET_INDIRECT_BRANCH_NOBP_RET_OPTION
11874 && !TARGET_CPU_Z10
11875 && return_regnum == INDIRECT_BRANCH_THUNK_REGNUM))
11877 gcc_assert (INDIRECT_BRANCH_THUNK_REGNUM != 4);
11878 return_regnum = 4;
11880 return_reg = gen_rtx_REG (Pmode, return_regnum);
11882 addr = plus_constant (Pmode, frame_pointer,
11883 offset + cfun_frame_layout.gprs_offset
11884 + (RETURN_REGNUM
11885 - cfun_frame_layout.first_save_gpr_slot)
11886 * UNITS_PER_LONG);
11887 addr = gen_rtx_MEM (Pmode, addr);
11888 set_mem_alias_set (addr, get_frame_alias_set ());
11889 emit_move_insn (return_reg, addr);
11891 /* Once we did that optimization we have to make sure
11892 s390_optimize_prologue does not try to remove the store
11893 of r14 since we will not be able to find the load issued
11894 here. */
11895 cfun_frame_layout.save_return_addr_p = true;
11898 insn = restore_gprs (frame_pointer,
11899 offset + cfun_frame_layout.gprs_offset
11900 + (cfun_frame_layout.first_restore_gpr
11901 - cfun_frame_layout.first_save_gpr_slot)
11902 * UNITS_PER_LONG,
11903 cfun_frame_layout.first_restore_gpr,
11904 cfun_frame_layout.last_restore_gpr);
11905 insn = emit_insn (insn);
11906 REG_NOTES (insn) = cfa_restores;
11907 add_reg_note (insn, REG_CFA_DEF_CFA,
11908 plus_constant (Pmode, stack_pointer_rtx,
11909 STACK_POINTER_OFFSET));
11910 RTX_FRAME_RELATED_P (insn) = 1;
11913 s390_restore_gprs_from_fprs ();
11915 if (! sibcall)
11916 emit_jump_insn (gen_return_use (return_reg));
11919 /* Implement TARGET_SET_UP_BY_PROLOGUE. */
11921 static void
11922 s300_set_up_by_prologue (hard_reg_set_container *regs)
11924 if (cfun->machine->base_reg
11925 && !call_really_used_regs[REGNO (cfun->machine->base_reg)])
11926 SET_HARD_REG_BIT (regs->set, REGNO (cfun->machine->base_reg));
11929 /* -fsplit-stack support. */
11931 /* A SYMBOL_REF for __morestack. */
11932 static GTY(()) rtx morestack_ref;
11934 /* When using -fsplit-stack, the allocation routines set a field in
11935 the TCB to the bottom of the stack plus this much space, measured
11936 in bytes. */
11938 #define SPLIT_STACK_AVAILABLE 1024
11940 /* Emit -fsplit-stack prologue, which goes before the regular function
11941 prologue. */
11943 void
11944 s390_expand_split_stack_prologue (void)
11946 rtx r1, guard, cc = NULL;
11947 rtx_insn *insn;
11948 /* Offset from thread pointer to __private_ss. */
11949 int psso = TARGET_64BIT ? 0x38 : 0x20;
11950 /* Pointer size in bytes. */
11951 /* Frame size and argument size - the two parameters to __morestack. */
11952 HOST_WIDE_INT frame_size = cfun_frame_layout.frame_size;
11953 /* Align argument size to 8 bytes - simplifies __morestack code. */
11954 HOST_WIDE_INT args_size = crtl->args.size >= 0
11955 ? ((crtl->args.size + 7) & ~7)
11956 : 0;
11957 /* Label to be called by __morestack. */
11958 rtx_code_label *call_done = NULL;
11959 rtx_code_label *parm_base = NULL;
11960 rtx tmp;
11962 gcc_assert (flag_split_stack && reload_completed);
11963 if (!TARGET_CPU_ZARCH)
11965 sorry ("CPUs older than z900 are not supported for -fsplit-stack");
11966 return;
11969 r1 = gen_rtx_REG (Pmode, 1);
11971 /* If no stack frame will be allocated, don't do anything. */
11972 if (!frame_size)
11974 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
11976 /* If va_start is used, just use r15. */
11977 emit_move_insn (r1,
11978 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
11979 GEN_INT (STACK_POINTER_OFFSET)));
11982 return;
11985 if (morestack_ref == NULL_RTX)
11987 morestack_ref = gen_rtx_SYMBOL_REF (Pmode, "__morestack");
11988 SYMBOL_REF_FLAGS (morestack_ref) |= (SYMBOL_FLAG_LOCAL
11989 | SYMBOL_FLAG_FUNCTION);
11992 if (CONST_OK_FOR_K (frame_size) || CONST_OK_FOR_Op (frame_size))
11994 /* If frame_size will fit in an add instruction, do a stack space
11995 check, and only call __morestack if there's not enough space. */
11997 /* Get thread pointer. r1 is the only register we can always destroy - r0
11998 could contain a static chain (and cannot be used to address memory
11999 anyway), r2-r6 can contain parameters, and r6-r15 are callee-saved. */
12000 emit_move_insn (r1, gen_rtx_REG (Pmode, TP_REGNUM));
12001 /* Aim at __private_ss. */
12002 guard = gen_rtx_MEM (Pmode, plus_constant (Pmode, r1, psso));
12004 /* If less that 1kiB used, skip addition and compare directly with
12005 __private_ss. */
12006 if (frame_size > SPLIT_STACK_AVAILABLE)
12008 emit_move_insn (r1, guard);
12009 if (TARGET_64BIT)
12010 emit_insn (gen_adddi3 (r1, r1, GEN_INT (frame_size)));
12011 else
12012 emit_insn (gen_addsi3 (r1, r1, GEN_INT (frame_size)));
12013 guard = r1;
12016 /* Compare the (maybe adjusted) guard with the stack pointer. */
12017 cc = s390_emit_compare (LT, stack_pointer_rtx, guard);
12020 call_done = gen_label_rtx ();
12021 parm_base = gen_label_rtx ();
12023 /* Emit the parameter block. */
12024 tmp = gen_split_stack_data (parm_base, call_done,
12025 GEN_INT (frame_size),
12026 GEN_INT (args_size));
12027 insn = emit_insn (tmp);
12028 add_reg_note (insn, REG_LABEL_OPERAND, call_done);
12029 LABEL_NUSES (call_done)++;
12030 add_reg_note (insn, REG_LABEL_OPERAND, parm_base);
12031 LABEL_NUSES (parm_base)++;
12033 /* %r1 = litbase. */
12034 insn = emit_move_insn (r1, gen_rtx_LABEL_REF (VOIDmode, parm_base));
12035 add_reg_note (insn, REG_LABEL_OPERAND, parm_base);
12036 LABEL_NUSES (parm_base)++;
12038 /* Now, we need to call __morestack. It has very special calling
12039 conventions: it preserves param/return/static chain registers for
12040 calling main function body, and looks for its own parameters at %r1. */
12042 if (cc != NULL)
12044 tmp = gen_split_stack_cond_call (morestack_ref, cc, call_done);
12046 insn = emit_jump_insn (tmp);
12047 JUMP_LABEL (insn) = call_done;
12048 LABEL_NUSES (call_done)++;
12050 /* Mark the jump as very unlikely to be taken. */
12051 add_reg_br_prob_note (insn,
12052 profile_probability::very_unlikely ());
12054 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
12056 /* If va_start is used, and __morestack was not called, just use
12057 r15. */
12058 emit_move_insn (r1,
12059 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
12060 GEN_INT (STACK_POINTER_OFFSET)));
12063 else
12065 tmp = gen_split_stack_call (morestack_ref, call_done);
12066 insn = emit_jump_insn (tmp);
12067 JUMP_LABEL (insn) = call_done;
12068 LABEL_NUSES (call_done)++;
12069 emit_barrier ();
12072 /* __morestack will call us here. */
12074 emit_label (call_done);
12077 /* We may have to tell the dataflow pass that the split stack prologue
12078 is initializing a register. */
12080 static void
12081 s390_live_on_entry (bitmap regs)
12083 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
12085 gcc_assert (flag_split_stack);
12086 bitmap_set_bit (regs, 1);
12090 /* Return true if the function can use simple_return to return outside
12091 of a shrink-wrapped region. At present shrink-wrapping is supported
12092 in all cases. */
12094 bool
12095 s390_can_use_simple_return_insn (void)
12097 return true;
12100 /* Return true if the epilogue is guaranteed to contain only a return
12101 instruction and if a direct return can therefore be used instead.
12102 One of the main advantages of using direct return instructions
12103 is that we can then use conditional returns. */
12105 bool
12106 s390_can_use_return_insn (void)
12108 int i;
12110 if (!reload_completed)
12111 return false;
12113 if (crtl->profile)
12114 return false;
12116 if (TARGET_TPF_PROFILING)
12117 return false;
12119 for (i = 0; i < 16; i++)
12120 if (cfun_gpr_save_slot (i) != SAVE_SLOT_NONE)
12121 return false;
12123 /* For 31 bit this is not covered by the frame_size check below
12124 since f4, f6 are saved in the register save area without needing
12125 additional stack space. */
12126 if (!TARGET_64BIT
12127 && (cfun_fpr_save_p (FPR4_REGNUM) || cfun_fpr_save_p (FPR6_REGNUM)))
12128 return false;
12130 if (cfun->machine->base_reg
12131 && !call_really_used_regs[REGNO (cfun->machine->base_reg)])
12132 return false;
12134 return cfun_frame_layout.frame_size == 0;
12137 /* The VX ABI differs for vararg functions. Therefore we need the
12138 prototype of the callee to be available when passing vector type
12139 values. */
12140 static const char *
12141 s390_invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
12143 return ((TARGET_VX_ABI
12144 && typelist == 0
12145 && VECTOR_TYPE_P (TREE_TYPE (val))
12146 && (funcdecl == NULL_TREE
12147 || (TREE_CODE (funcdecl) == FUNCTION_DECL
12148 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
12149 ? N_("vector argument passed to unprototyped function")
12150 : NULL);
12154 /* Return the size in bytes of a function argument of
12155 type TYPE and/or mode MODE. At least one of TYPE or
12156 MODE must be specified. */
12158 static int
12159 s390_function_arg_size (machine_mode mode, const_tree type)
12161 if (type)
12162 return int_size_in_bytes (type);
12164 /* No type info available for some library calls ... */
12165 if (mode != BLKmode)
12166 return GET_MODE_SIZE (mode);
12168 /* If we have neither type nor mode, abort */
12169 gcc_unreachable ();
12172 /* Return true if a function argument of type TYPE and mode MODE
12173 is to be passed in a vector register, if available. */
12175 bool
12176 s390_function_arg_vector (machine_mode mode, const_tree type)
12178 if (!TARGET_VX_ABI)
12179 return false;
12181 if (s390_function_arg_size (mode, type) > 16)
12182 return false;
12184 /* No type info available for some library calls ... */
12185 if (!type)
12186 return VECTOR_MODE_P (mode);
12188 /* The ABI says that record types with a single member are treated
12189 just like that member would be. */
12190 while (TREE_CODE (type) == RECORD_TYPE)
12192 tree field, single = NULL_TREE;
12194 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
12196 if (TREE_CODE (field) != FIELD_DECL)
12197 continue;
12199 if (single == NULL_TREE)
12200 single = TREE_TYPE (field);
12201 else
12202 return false;
12205 if (single == NULL_TREE)
12206 return false;
12207 else
12209 /* If the field declaration adds extra byte due to
12210 e.g. padding this is not accepted as vector type. */
12211 if (int_size_in_bytes (single) <= 0
12212 || int_size_in_bytes (single) != int_size_in_bytes (type))
12213 return false;
12214 type = single;
12218 return VECTOR_TYPE_P (type);
12221 /* Return true if a function argument of type TYPE and mode MODE
12222 is to be passed in a floating-point register, if available. */
12224 static bool
12225 s390_function_arg_float (machine_mode mode, const_tree type)
12227 if (s390_function_arg_size (mode, type) > 8)
12228 return false;
12230 /* Soft-float changes the ABI: no floating-point registers are used. */
12231 if (TARGET_SOFT_FLOAT)
12232 return false;
12234 /* No type info available for some library calls ... */
12235 if (!type)
12236 return mode == SFmode || mode == DFmode || mode == SDmode || mode == DDmode;
12238 /* The ABI says that record types with a single member are treated
12239 just like that member would be. */
12240 while (TREE_CODE (type) == RECORD_TYPE)
12242 tree field, single = NULL_TREE;
12244 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
12246 if (TREE_CODE (field) != FIELD_DECL)
12247 continue;
12249 if (single == NULL_TREE)
12250 single = TREE_TYPE (field);
12251 else
12252 return false;
12255 if (single == NULL_TREE)
12256 return false;
12257 else
12258 type = single;
12261 return TREE_CODE (type) == REAL_TYPE;
12264 /* Return true if a function argument of type TYPE and mode MODE
12265 is to be passed in an integer register, or a pair of integer
12266 registers, if available. */
12268 static bool
12269 s390_function_arg_integer (machine_mode mode, const_tree type)
12271 int size = s390_function_arg_size (mode, type);
12272 if (size > 8)
12273 return false;
12275 /* No type info available for some library calls ... */
12276 if (!type)
12277 return GET_MODE_CLASS (mode) == MODE_INT
12278 || (TARGET_SOFT_FLOAT && SCALAR_FLOAT_MODE_P (mode));
12280 /* We accept small integral (and similar) types. */
12281 if (INTEGRAL_TYPE_P (type)
12282 || POINTER_TYPE_P (type)
12283 || TREE_CODE (type) == NULLPTR_TYPE
12284 || TREE_CODE (type) == OFFSET_TYPE
12285 || (TARGET_SOFT_FLOAT && TREE_CODE (type) == REAL_TYPE))
12286 return true;
12288 /* We also accept structs of size 1, 2, 4, 8 that are not
12289 passed in floating-point registers. */
12290 if (AGGREGATE_TYPE_P (type)
12291 && exact_log2 (size) >= 0
12292 && !s390_function_arg_float (mode, type))
12293 return true;
12295 return false;
12298 /* Return 1 if a function argument of type TYPE and mode MODE
12299 is to be passed by reference. The ABI specifies that only
12300 structures of size 1, 2, 4, or 8 bytes are passed by value,
12301 all other structures (and complex numbers) are passed by
12302 reference. */
12304 static bool
12305 s390_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
12306 machine_mode mode, const_tree type,
12307 bool named ATTRIBUTE_UNUSED)
12309 int size = s390_function_arg_size (mode, type);
12311 if (s390_function_arg_vector (mode, type))
12312 return false;
12314 if (size > 8)
12315 return true;
12317 if (type)
12319 if (AGGREGATE_TYPE_P (type) && exact_log2 (size) < 0)
12320 return true;
12322 if (TREE_CODE (type) == COMPLEX_TYPE
12323 || TREE_CODE (type) == VECTOR_TYPE)
12324 return true;
12327 return false;
12330 /* Update the data in CUM to advance over an argument of mode MODE and
12331 data type TYPE. (TYPE is null for libcalls where that information
12332 may not be available.). The boolean NAMED specifies whether the
12333 argument is a named argument (as opposed to an unnamed argument
12334 matching an ellipsis). */
12336 static void
12337 s390_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
12338 const_tree type, bool named)
12340 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
12342 if (s390_function_arg_vector (mode, type))
12344 /* We are called for unnamed vector stdarg arguments which are
12345 passed on the stack. In this case this hook does not have to
12346 do anything since stack arguments are tracked by common
12347 code. */
12348 if (!named)
12349 return;
12350 cum->vrs += 1;
12352 else if (s390_function_arg_float (mode, type))
12354 cum->fprs += 1;
12356 else if (s390_function_arg_integer (mode, type))
12358 int size = s390_function_arg_size (mode, type);
12359 cum->gprs += ((size + UNITS_PER_LONG - 1) / UNITS_PER_LONG);
12361 else
12362 gcc_unreachable ();
12365 /* Define where to put the arguments to a function.
12366 Value is zero to push the argument on the stack,
12367 or a hard register in which to store the argument.
12369 MODE is the argument's machine mode.
12370 TYPE is the data type of the argument (as a tree).
12371 This is null for libcalls where that information may
12372 not be available.
12373 CUM is a variable of type CUMULATIVE_ARGS which gives info about
12374 the preceding args and about the function being called.
12375 NAMED is nonzero if this argument is a named parameter
12376 (otherwise it is an extra parameter matching an ellipsis).
12378 On S/390, we use general purpose registers 2 through 6 to
12379 pass integer, pointer, and certain structure arguments, and
12380 floating point registers 0 and 2 (0, 2, 4, and 6 on 64-bit)
12381 to pass floating point arguments. All remaining arguments
12382 are pushed to the stack. */
12384 static rtx
12385 s390_function_arg (cumulative_args_t cum_v, machine_mode mode,
12386 const_tree type, bool named)
12388 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
12390 if (!named)
12391 s390_check_type_for_vector_abi (type, true, false);
12393 if (s390_function_arg_vector (mode, type))
12395 /* Vector arguments being part of the ellipsis are passed on the
12396 stack. */
12397 if (!named || (cum->vrs + 1 > VEC_ARG_NUM_REG))
12398 return NULL_RTX;
12400 return gen_rtx_REG (mode, cum->vrs + FIRST_VEC_ARG_REGNO);
12402 else if (s390_function_arg_float (mode, type))
12404 if (cum->fprs + 1 > FP_ARG_NUM_REG)
12405 return NULL_RTX;
12406 else
12407 return gen_rtx_REG (mode, cum->fprs + 16);
12409 else if (s390_function_arg_integer (mode, type))
12411 int size = s390_function_arg_size (mode, type);
12412 int n_gprs = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
12414 if (cum->gprs + n_gprs > GP_ARG_NUM_REG)
12415 return NULL_RTX;
12416 else if (n_gprs == 1 || UNITS_PER_WORD == UNITS_PER_LONG)
12417 return gen_rtx_REG (mode, cum->gprs + 2);
12418 else if (n_gprs == 2)
12420 rtvec p = rtvec_alloc (2);
12422 RTVEC_ELT (p, 0)
12423 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 2),
12424 const0_rtx);
12425 RTVEC_ELT (p, 1)
12426 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 3),
12427 GEN_INT (4));
12429 return gen_rtx_PARALLEL (mode, p);
12433 /* After the real arguments, expand_call calls us once again
12434 with a void_type_node type. Whatever we return here is
12435 passed as operand 2 to the call expanders.
12437 We don't need this feature ... */
12438 else if (type == void_type_node)
12439 return const0_rtx;
12441 gcc_unreachable ();
12444 /* Implement TARGET_FUNCTION_ARG_BOUNDARY. Vector arguments are
12445 left-justified when placed on the stack during parameter passing. */
12447 static pad_direction
12448 s390_function_arg_padding (machine_mode mode, const_tree type)
12450 if (s390_function_arg_vector (mode, type))
12451 return PAD_UPWARD;
12453 return default_function_arg_padding (mode, type);
12456 /* Return true if return values of type TYPE should be returned
12457 in a memory buffer whose address is passed by the caller as
12458 hidden first argument. */
12460 static bool
12461 s390_return_in_memory (const_tree type, const_tree fundecl ATTRIBUTE_UNUSED)
12463 /* We accept small integral (and similar) types. */
12464 if (INTEGRAL_TYPE_P (type)
12465 || POINTER_TYPE_P (type)
12466 || TREE_CODE (type) == OFFSET_TYPE
12467 || TREE_CODE (type) == REAL_TYPE)
12468 return int_size_in_bytes (type) > 8;
12470 /* vector types which fit into a VR. */
12471 if (TARGET_VX_ABI
12472 && VECTOR_TYPE_P (type)
12473 && int_size_in_bytes (type) <= 16)
12474 return false;
12476 /* Aggregates and similar constructs are always returned
12477 in memory. */
12478 if (AGGREGATE_TYPE_P (type)
12479 || TREE_CODE (type) == COMPLEX_TYPE
12480 || VECTOR_TYPE_P (type))
12481 return true;
12483 /* ??? We get called on all sorts of random stuff from
12484 aggregate_value_p. We can't abort, but it's not clear
12485 what's safe to return. Pretend it's a struct I guess. */
12486 return true;
12489 /* Function arguments and return values are promoted to word size. */
12491 static machine_mode
12492 s390_promote_function_mode (const_tree type, machine_mode mode,
12493 int *punsignedp,
12494 const_tree fntype ATTRIBUTE_UNUSED,
12495 int for_return ATTRIBUTE_UNUSED)
12497 if (INTEGRAL_MODE_P (mode)
12498 && GET_MODE_SIZE (mode) < UNITS_PER_LONG)
12500 if (type != NULL_TREE && POINTER_TYPE_P (type))
12501 *punsignedp = POINTERS_EXTEND_UNSIGNED;
12502 return Pmode;
12505 return mode;
12508 /* Define where to return a (scalar) value of type RET_TYPE.
12509 If RET_TYPE is null, define where to return a (scalar)
12510 value of mode MODE from a libcall. */
12512 static rtx
12513 s390_function_and_libcall_value (machine_mode mode,
12514 const_tree ret_type,
12515 const_tree fntype_or_decl,
12516 bool outgoing ATTRIBUTE_UNUSED)
12518 /* For vector return types it is important to use the RET_TYPE
12519 argument whenever available since the middle-end might have
12520 changed the mode to a scalar mode. */
12521 bool vector_ret_type_p = ((ret_type && VECTOR_TYPE_P (ret_type))
12522 || (!ret_type && VECTOR_MODE_P (mode)));
12524 /* For normal functions perform the promotion as
12525 promote_function_mode would do. */
12526 if (ret_type)
12528 int unsignedp = TYPE_UNSIGNED (ret_type);
12529 mode = promote_function_mode (ret_type, mode, &unsignedp,
12530 fntype_or_decl, 1);
12533 gcc_assert (GET_MODE_CLASS (mode) == MODE_INT
12534 || SCALAR_FLOAT_MODE_P (mode)
12535 || (TARGET_VX_ABI && vector_ret_type_p));
12536 gcc_assert (GET_MODE_SIZE (mode) <= (TARGET_VX_ABI ? 16 : 8));
12538 if (TARGET_VX_ABI && vector_ret_type_p)
12539 return gen_rtx_REG (mode, FIRST_VEC_ARG_REGNO);
12540 else if (TARGET_HARD_FLOAT && SCALAR_FLOAT_MODE_P (mode))
12541 return gen_rtx_REG (mode, 16);
12542 else if (GET_MODE_SIZE (mode) <= UNITS_PER_LONG
12543 || UNITS_PER_LONG == UNITS_PER_WORD)
12544 return gen_rtx_REG (mode, 2);
12545 else if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_LONG)
12547 /* This case is triggered when returning a 64 bit value with
12548 -m31 -mzarch. Although the value would fit into a single
12549 register it has to be forced into a 32 bit register pair in
12550 order to match the ABI. */
12551 rtvec p = rtvec_alloc (2);
12553 RTVEC_ELT (p, 0)
12554 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 2), const0_rtx);
12555 RTVEC_ELT (p, 1)
12556 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 3), GEN_INT (4));
12558 return gen_rtx_PARALLEL (mode, p);
12561 gcc_unreachable ();
12564 /* Define where to return a scalar return value of type RET_TYPE. */
12566 static rtx
12567 s390_function_value (const_tree ret_type, const_tree fn_decl_or_type,
12568 bool outgoing)
12570 return s390_function_and_libcall_value (TYPE_MODE (ret_type), ret_type,
12571 fn_decl_or_type, outgoing);
12574 /* Define where to return a scalar libcall return value of mode
12575 MODE. */
12577 static rtx
12578 s390_libcall_value (machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
12580 return s390_function_and_libcall_value (mode, NULL_TREE,
12581 NULL_TREE, true);
12585 /* Create and return the va_list datatype.
12587 On S/390, va_list is an array type equivalent to
12589 typedef struct __va_list_tag
12591 long __gpr;
12592 long __fpr;
12593 void *__overflow_arg_area;
12594 void *__reg_save_area;
12595 } va_list[1];
12597 where __gpr and __fpr hold the number of general purpose
12598 or floating point arguments used up to now, respectively,
12599 __overflow_arg_area points to the stack location of the
12600 next argument passed on the stack, and __reg_save_area
12601 always points to the start of the register area in the
12602 call frame of the current function. The function prologue
12603 saves all registers used for argument passing into this
12604 area if the function uses variable arguments. */
12606 static tree
12607 s390_build_builtin_va_list (void)
12609 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
12611 record = lang_hooks.types.make_type (RECORD_TYPE);
12613 type_decl =
12614 build_decl (BUILTINS_LOCATION,
12615 TYPE_DECL, get_identifier ("__va_list_tag"), record);
12617 f_gpr = build_decl (BUILTINS_LOCATION,
12618 FIELD_DECL, get_identifier ("__gpr"),
12619 long_integer_type_node);
12620 f_fpr = build_decl (BUILTINS_LOCATION,
12621 FIELD_DECL, get_identifier ("__fpr"),
12622 long_integer_type_node);
12623 f_ovf = build_decl (BUILTINS_LOCATION,
12624 FIELD_DECL, get_identifier ("__overflow_arg_area"),
12625 ptr_type_node);
12626 f_sav = build_decl (BUILTINS_LOCATION,
12627 FIELD_DECL, get_identifier ("__reg_save_area"),
12628 ptr_type_node);
12630 va_list_gpr_counter_field = f_gpr;
12631 va_list_fpr_counter_field = f_fpr;
12633 DECL_FIELD_CONTEXT (f_gpr) = record;
12634 DECL_FIELD_CONTEXT (f_fpr) = record;
12635 DECL_FIELD_CONTEXT (f_ovf) = record;
12636 DECL_FIELD_CONTEXT (f_sav) = record;
12638 TYPE_STUB_DECL (record) = type_decl;
12639 TYPE_NAME (record) = type_decl;
12640 TYPE_FIELDS (record) = f_gpr;
12641 DECL_CHAIN (f_gpr) = f_fpr;
12642 DECL_CHAIN (f_fpr) = f_ovf;
12643 DECL_CHAIN (f_ovf) = f_sav;
12645 layout_type (record);
12647 /* The correct type is an array type of one element. */
12648 return build_array_type (record, build_index_type (size_zero_node));
12651 /* Implement va_start by filling the va_list structure VALIST.
12652 STDARG_P is always true, and ignored.
12653 NEXTARG points to the first anonymous stack argument.
12655 The following global variables are used to initialize
12656 the va_list structure:
12658 crtl->args.info:
12659 holds number of gprs and fprs used for named arguments.
12660 crtl->args.arg_offset_rtx:
12661 holds the offset of the first anonymous stack argument
12662 (relative to the virtual arg pointer). */
12664 static void
12665 s390_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
12667 HOST_WIDE_INT n_gpr, n_fpr;
12668 int off;
12669 tree f_gpr, f_fpr, f_ovf, f_sav;
12670 tree gpr, fpr, ovf, sav, t;
12672 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12673 f_fpr = DECL_CHAIN (f_gpr);
12674 f_ovf = DECL_CHAIN (f_fpr);
12675 f_sav = DECL_CHAIN (f_ovf);
12677 valist = build_simple_mem_ref (valist);
12678 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12679 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
12680 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
12681 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
12683 /* Count number of gp and fp argument registers used. */
12685 n_gpr = crtl->args.info.gprs;
12686 n_fpr = crtl->args.info.fprs;
12688 if (cfun->va_list_gpr_size)
12690 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
12691 build_int_cst (NULL_TREE, n_gpr));
12692 TREE_SIDE_EFFECTS (t) = 1;
12693 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12696 if (cfun->va_list_fpr_size)
12698 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
12699 build_int_cst (NULL_TREE, n_fpr));
12700 TREE_SIDE_EFFECTS (t) = 1;
12701 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12704 if (flag_split_stack
12705 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun->decl))
12706 == NULL)
12707 && cfun->machine->split_stack_varargs_pointer == NULL_RTX)
12709 rtx reg;
12710 rtx_insn *seq;
12712 reg = gen_reg_rtx (Pmode);
12713 cfun->machine->split_stack_varargs_pointer = reg;
12715 start_sequence ();
12716 emit_move_insn (reg, gen_rtx_REG (Pmode, 1));
12717 seq = get_insns ();
12718 end_sequence ();
12720 push_topmost_sequence ();
12721 emit_insn_after (seq, entry_of_function ());
12722 pop_topmost_sequence ();
12725 /* Find the overflow area.
12726 FIXME: This currently is too pessimistic when the vector ABI is
12727 enabled. In that case we *always* set up the overflow area
12728 pointer. */
12729 if (n_gpr + cfun->va_list_gpr_size > GP_ARG_NUM_REG
12730 || n_fpr + cfun->va_list_fpr_size > FP_ARG_NUM_REG
12731 || TARGET_VX_ABI)
12733 if (cfun->machine->split_stack_varargs_pointer == NULL_RTX)
12734 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
12735 else
12736 t = make_tree (TREE_TYPE (ovf), cfun->machine->split_stack_varargs_pointer);
12738 off = INTVAL (crtl->args.arg_offset_rtx);
12739 off = off < 0 ? 0 : off;
12740 if (TARGET_DEBUG_ARG)
12741 fprintf (stderr, "va_start: n_gpr = %d, n_fpr = %d off %d\n",
12742 (int)n_gpr, (int)n_fpr, off);
12744 t = fold_build_pointer_plus_hwi (t, off);
12746 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
12747 TREE_SIDE_EFFECTS (t) = 1;
12748 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12751 /* Find the register save area. */
12752 if ((cfun->va_list_gpr_size && n_gpr < GP_ARG_NUM_REG)
12753 || (cfun->va_list_fpr_size && n_fpr < FP_ARG_NUM_REG))
12755 t = make_tree (TREE_TYPE (sav), return_address_pointer_rtx);
12756 t = fold_build_pointer_plus_hwi (t, -RETURN_REGNUM * UNITS_PER_LONG);
12758 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
12759 TREE_SIDE_EFFECTS (t) = 1;
12760 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12764 /* Implement va_arg by updating the va_list structure
12765 VALIST as required to retrieve an argument of type
12766 TYPE, and returning that argument.
12768 Generates code equivalent to:
12770 if (integral value) {
12771 if (size <= 4 && args.gpr < 5 ||
12772 size > 4 && args.gpr < 4 )
12773 ret = args.reg_save_area[args.gpr+8]
12774 else
12775 ret = *args.overflow_arg_area++;
12776 } else if (vector value) {
12777 ret = *args.overflow_arg_area;
12778 args.overflow_arg_area += size / 8;
12779 } else if (float value) {
12780 if (args.fgpr < 2)
12781 ret = args.reg_save_area[args.fpr+64]
12782 else
12783 ret = *args.overflow_arg_area++;
12784 } else if (aggregate value) {
12785 if (args.gpr < 5)
12786 ret = *args.reg_save_area[args.gpr]
12787 else
12788 ret = **args.overflow_arg_area++;
12789 } */
12791 static tree
12792 s390_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
12793 gimple_seq *post_p ATTRIBUTE_UNUSED)
12795 tree f_gpr, f_fpr, f_ovf, f_sav;
12796 tree gpr, fpr, ovf, sav, reg, t, u;
12797 int indirect_p, size, n_reg, sav_ofs, sav_scale, max_reg;
12798 tree lab_false, lab_over = NULL_TREE;
12799 tree addr = create_tmp_var (ptr_type_node, "addr");
12800 bool left_align_p; /* How a value < UNITS_PER_LONG is aligned within
12801 a stack slot. */
12803 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12804 f_fpr = DECL_CHAIN (f_gpr);
12805 f_ovf = DECL_CHAIN (f_fpr);
12806 f_sav = DECL_CHAIN (f_ovf);
12808 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12809 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
12810 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
12812 /* The tree for args* cannot be shared between gpr/fpr and ovf since
12813 both appear on a lhs. */
12814 valist = unshare_expr (valist);
12815 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
12817 size = int_size_in_bytes (type);
12819 s390_check_type_for_vector_abi (type, true, false);
12821 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
12823 if (TARGET_DEBUG_ARG)
12825 fprintf (stderr, "va_arg: aggregate type");
12826 debug_tree (type);
12829 /* Aggregates are passed by reference. */
12830 indirect_p = 1;
12831 reg = gpr;
12832 n_reg = 1;
12834 /* kernel stack layout on 31 bit: It is assumed here that no padding
12835 will be added by s390_frame_info because for va_args always an even
12836 number of gprs has to be saved r15-r2 = 14 regs. */
12837 sav_ofs = 2 * UNITS_PER_LONG;
12838 sav_scale = UNITS_PER_LONG;
12839 size = UNITS_PER_LONG;
12840 max_reg = GP_ARG_NUM_REG - n_reg;
12841 left_align_p = false;
12843 else if (s390_function_arg_vector (TYPE_MODE (type), type))
12845 if (TARGET_DEBUG_ARG)
12847 fprintf (stderr, "va_arg: vector type");
12848 debug_tree (type);
12851 indirect_p = 0;
12852 reg = NULL_TREE;
12853 n_reg = 0;
12854 sav_ofs = 0;
12855 sav_scale = 8;
12856 max_reg = 0;
12857 left_align_p = true;
12859 else if (s390_function_arg_float (TYPE_MODE (type), type))
12861 if (TARGET_DEBUG_ARG)
12863 fprintf (stderr, "va_arg: float type");
12864 debug_tree (type);
12867 /* FP args go in FP registers, if present. */
12868 indirect_p = 0;
12869 reg = fpr;
12870 n_reg = 1;
12871 sav_ofs = 16 * UNITS_PER_LONG;
12872 sav_scale = 8;
12873 max_reg = FP_ARG_NUM_REG - n_reg;
12874 left_align_p = false;
12876 else
12878 if (TARGET_DEBUG_ARG)
12880 fprintf (stderr, "va_arg: other type");
12881 debug_tree (type);
12884 /* Otherwise into GP registers. */
12885 indirect_p = 0;
12886 reg = gpr;
12887 n_reg = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
12889 /* kernel stack layout on 31 bit: It is assumed here that no padding
12890 will be added by s390_frame_info because for va_args always an even
12891 number of gprs has to be saved r15-r2 = 14 regs. */
12892 sav_ofs = 2 * UNITS_PER_LONG;
12894 if (size < UNITS_PER_LONG)
12895 sav_ofs += UNITS_PER_LONG - size;
12897 sav_scale = UNITS_PER_LONG;
12898 max_reg = GP_ARG_NUM_REG - n_reg;
12899 left_align_p = false;
12902 /* Pull the value out of the saved registers ... */
12904 if (reg != NULL_TREE)
12907 if (reg > ((typeof (reg))max_reg))
12908 goto lab_false;
12910 addr = sav + sav_ofs + reg * save_scale;
12912 goto lab_over;
12914 lab_false:
12917 lab_false = create_artificial_label (UNKNOWN_LOCATION);
12918 lab_over = create_artificial_label (UNKNOWN_LOCATION);
12920 t = fold_convert (TREE_TYPE (reg), size_int (max_reg));
12921 t = build2 (GT_EXPR, boolean_type_node, reg, t);
12922 u = build1 (GOTO_EXPR, void_type_node, lab_false);
12923 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
12924 gimplify_and_add (t, pre_p);
12926 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
12927 u = build2 (MULT_EXPR, TREE_TYPE (reg), reg,
12928 fold_convert (TREE_TYPE (reg), size_int (sav_scale)));
12929 t = fold_build_pointer_plus (t, u);
12931 gimplify_assign (addr, t, pre_p);
12933 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
12935 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
12938 /* ... Otherwise out of the overflow area. */
12940 t = ovf;
12941 if (size < UNITS_PER_LONG && !left_align_p)
12942 t = fold_build_pointer_plus_hwi (t, UNITS_PER_LONG - size);
12944 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
12946 gimplify_assign (addr, t, pre_p);
12948 if (size < UNITS_PER_LONG && left_align_p)
12949 t = fold_build_pointer_plus_hwi (t, UNITS_PER_LONG);
12950 else
12951 t = fold_build_pointer_plus_hwi (t, size);
12953 gimplify_assign (ovf, t, pre_p);
12955 if (reg != NULL_TREE)
12956 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
12959 /* Increment register save count. */
12961 if (n_reg > 0)
12963 u = build2 (PREINCREMENT_EXPR, TREE_TYPE (reg), reg,
12964 fold_convert (TREE_TYPE (reg), size_int (n_reg)));
12965 gimplify_and_add (u, pre_p);
12968 if (indirect_p)
12970 t = build_pointer_type_for_mode (build_pointer_type (type),
12971 ptr_mode, true);
12972 addr = fold_convert (t, addr);
12973 addr = build_va_arg_indirect_ref (addr);
12975 else
12977 t = build_pointer_type_for_mode (type, ptr_mode, true);
12978 addr = fold_convert (t, addr);
12981 return build_va_arg_indirect_ref (addr);
12984 /* Emit rtl for the tbegin or tbegin_retry (RETRY != NULL_RTX)
12985 expanders.
12986 DEST - Register location where CC will be stored.
12987 TDB - Pointer to a 256 byte area where to store the transaction.
12988 diagnostic block. NULL if TDB is not needed.
12989 RETRY - Retry count value. If non-NULL a retry loop for CC2
12990 is emitted
12991 CLOBBER_FPRS_P - If true clobbers for all FPRs are emitted as part
12992 of the tbegin instruction pattern. */
12994 void
12995 s390_expand_tbegin (rtx dest, rtx tdb, rtx retry, bool clobber_fprs_p)
12997 rtx retry_plus_two = gen_reg_rtx (SImode);
12998 rtx retry_reg = gen_reg_rtx (SImode);
12999 rtx_code_label *retry_label = NULL;
13001 if (retry != NULL_RTX)
13003 emit_move_insn (retry_reg, retry);
13004 emit_insn (gen_addsi3 (retry_plus_two, retry_reg, const2_rtx));
13005 emit_insn (gen_addsi3 (retry_reg, retry_reg, const1_rtx));
13006 retry_label = gen_label_rtx ();
13007 emit_label (retry_label);
13010 if (clobber_fprs_p)
13012 if (TARGET_VX)
13013 emit_insn (gen_tbegin_1_z13 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
13014 tdb));
13015 else
13016 emit_insn (gen_tbegin_1 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
13017 tdb));
13019 else
13020 emit_insn (gen_tbegin_nofloat_1 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
13021 tdb));
13023 emit_move_insn (dest, gen_rtx_UNSPEC (SImode,
13024 gen_rtvec (1, gen_rtx_REG (CCRAWmode,
13025 CC_REGNUM)),
13026 UNSPEC_CC_TO_INT));
13027 if (retry != NULL_RTX)
13029 const int CC0 = 1 << 3;
13030 const int CC1 = 1 << 2;
13031 const int CC3 = 1 << 0;
13032 rtx jump;
13033 rtx count = gen_reg_rtx (SImode);
13034 rtx_code_label *leave_label = gen_label_rtx ();
13036 /* Exit for success and permanent failures. */
13037 jump = s390_emit_jump (leave_label,
13038 gen_rtx_EQ (VOIDmode,
13039 gen_rtx_REG (CCRAWmode, CC_REGNUM),
13040 gen_rtx_CONST_INT (VOIDmode, CC0 | CC1 | CC3)));
13041 LABEL_NUSES (leave_label) = 1;
13043 /* CC2 - transient failure. Perform retry with ppa. */
13044 emit_move_insn (count, retry_plus_two);
13045 emit_insn (gen_subsi3 (count, count, retry_reg));
13046 emit_insn (gen_tx_assist (count));
13047 jump = emit_jump_insn (gen_doloop_si64 (retry_label,
13048 retry_reg,
13049 retry_reg));
13050 JUMP_LABEL (jump) = retry_label;
13051 LABEL_NUSES (retry_label) = 1;
13052 emit_label (leave_label);
13057 /* Return the decl for the target specific builtin with the function
13058 code FCODE. */
13060 static tree
13061 s390_builtin_decl (unsigned fcode, bool initialized_p ATTRIBUTE_UNUSED)
13063 if (fcode >= S390_BUILTIN_MAX)
13064 return error_mark_node;
13066 return s390_builtin_decls[fcode];
13069 /* We call mcount before the function prologue. So a profiled leaf
13070 function should stay a leaf function. */
13072 static bool
13073 s390_keep_leaf_when_profiled ()
13075 return true;
13078 /* Output assembly code for the trampoline template to
13079 stdio stream FILE.
13081 On S/390, we use gpr 1 internally in the trampoline code;
13082 gpr 0 is used to hold the static chain. */
13084 static void
13085 s390_asm_trampoline_template (FILE *file)
13087 rtx op[2];
13088 op[0] = gen_rtx_REG (Pmode, 0);
13089 op[1] = gen_rtx_REG (Pmode, 1);
13091 if (TARGET_64BIT)
13093 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
13094 output_asm_insn ("lmg\t%0,%1,14(%1)", op); /* 6 byte */
13095 output_asm_insn ("br\t%1", op); /* 2 byte */
13096 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 10));
13098 else
13100 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
13101 output_asm_insn ("lm\t%0,%1,6(%1)", op); /* 4 byte */
13102 output_asm_insn ("br\t%1", op); /* 2 byte */
13103 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 8));
13107 /* Emit RTL insns to initialize the variable parts of a trampoline.
13108 FNADDR is an RTX for the address of the function's pure code.
13109 CXT is an RTX for the static chain value for the function. */
13111 static void
13112 s390_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
13114 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
13115 rtx mem;
13117 emit_block_move (m_tramp, assemble_trampoline_template (),
13118 GEN_INT (2 * UNITS_PER_LONG), BLOCK_OP_NORMAL);
13120 mem = adjust_address (m_tramp, Pmode, 2 * UNITS_PER_LONG);
13121 emit_move_insn (mem, cxt);
13122 mem = adjust_address (m_tramp, Pmode, 3 * UNITS_PER_LONG);
13123 emit_move_insn (mem, fnaddr);
13126 /* Output assembler code to FILE to increment profiler label # LABELNO
13127 for profiling a function entry. */
13129 void
13130 s390_function_profiler (FILE *file, int labelno)
13132 rtx op[7];
13134 char label[128];
13135 ASM_GENERATE_INTERNAL_LABEL (label, "LP", labelno);
13137 fprintf (file, "# function profiler \n");
13139 op[0] = gen_rtx_REG (Pmode, RETURN_REGNUM);
13140 op[1] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
13141 op[1] = gen_rtx_MEM (Pmode, plus_constant (Pmode, op[1], UNITS_PER_LONG));
13143 op[2] = gen_rtx_REG (Pmode, 1);
13144 op[3] = gen_rtx_SYMBOL_REF (Pmode, label);
13145 SYMBOL_REF_FLAGS (op[3]) = SYMBOL_FLAG_LOCAL;
13147 op[4] = gen_rtx_SYMBOL_REF (Pmode, "_mcount");
13148 if (flag_pic)
13150 op[4] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[4]), UNSPEC_PLT);
13151 op[4] = gen_rtx_CONST (Pmode, op[4]);
13154 if (TARGET_64BIT)
13156 output_asm_insn ("stg\t%0,%1", op);
13157 output_asm_insn ("larl\t%2,%3", op);
13158 output_asm_insn ("brasl\t%0,%4", op);
13159 output_asm_insn ("lg\t%0,%1", op);
13161 else if (TARGET_CPU_ZARCH)
13163 output_asm_insn ("st\t%0,%1", op);
13164 output_asm_insn ("larl\t%2,%3", op);
13165 output_asm_insn ("brasl\t%0,%4", op);
13166 output_asm_insn ("l\t%0,%1", op);
13168 else if (!flag_pic)
13170 op[6] = gen_label_rtx ();
13172 output_asm_insn ("st\t%0,%1", op);
13173 output_asm_insn ("bras\t%2,%l6", op);
13174 output_asm_insn (".long\t%4", op);
13175 output_asm_insn (".long\t%3", op);
13176 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
13177 output_asm_insn ("l\t%0,0(%2)", op);
13178 output_asm_insn ("l\t%2,4(%2)", op);
13179 output_asm_insn ("basr\t%0,%0", op);
13180 output_asm_insn ("l\t%0,%1", op);
13182 else
13184 op[5] = gen_label_rtx ();
13185 op[6] = gen_label_rtx ();
13187 output_asm_insn ("st\t%0,%1", op);
13188 output_asm_insn ("bras\t%2,%l6", op);
13189 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[5]));
13190 output_asm_insn (".long\t%4-%l5", op);
13191 output_asm_insn (".long\t%3-%l5", op);
13192 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
13193 output_asm_insn ("lr\t%0,%2", op);
13194 output_asm_insn ("a\t%0,0(%2)", op);
13195 output_asm_insn ("a\t%2,4(%2)", op);
13196 output_asm_insn ("basr\t%0,%0", op);
13197 output_asm_insn ("l\t%0,%1", op);
13201 /* Encode symbol attributes (local vs. global, tls model) of a SYMBOL_REF
13202 into its SYMBOL_REF_FLAGS. */
13204 static void
13205 s390_encode_section_info (tree decl, rtx rtl, int first)
13207 default_encode_section_info (decl, rtl, first);
13209 if (TREE_CODE (decl) == VAR_DECL)
13211 /* Store the alignment to be able to check if we can use
13212 a larl/load-relative instruction. We only handle the cases
13213 that can go wrong (i.e. no FUNC_DECLs). */
13214 if (DECL_ALIGN (decl) == 0 || DECL_ALIGN (decl) % 16)
13215 SYMBOL_FLAG_SET_NOTALIGN2 (XEXP (rtl, 0));
13216 else if (DECL_ALIGN (decl) % 32)
13217 SYMBOL_FLAG_SET_NOTALIGN4 (XEXP (rtl, 0));
13218 else if (DECL_ALIGN (decl) % 64)
13219 SYMBOL_FLAG_SET_NOTALIGN8 (XEXP (rtl, 0));
13222 /* Literal pool references don't have a decl so they are handled
13223 differently here. We rely on the information in the MEM_ALIGN
13224 entry to decide upon the alignment. */
13225 if (MEM_P (rtl)
13226 && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF
13227 && TREE_CONSTANT_POOL_ADDRESS_P (XEXP (rtl, 0)))
13229 if (MEM_ALIGN (rtl) == 0 || MEM_ALIGN (rtl) % 16)
13230 SYMBOL_FLAG_SET_NOTALIGN2 (XEXP (rtl, 0));
13231 else if (MEM_ALIGN (rtl) % 32)
13232 SYMBOL_FLAG_SET_NOTALIGN4 (XEXP (rtl, 0));
13233 else if (MEM_ALIGN (rtl) % 64)
13234 SYMBOL_FLAG_SET_NOTALIGN8 (XEXP (rtl, 0));
13238 /* Output thunk to FILE that implements a C++ virtual function call (with
13239 multiple inheritance) to FUNCTION. The thunk adjusts the this pointer
13240 by DELTA, and unless VCALL_OFFSET is zero, applies an additional adjustment
13241 stored at VCALL_OFFSET in the vtable whose address is located at offset 0
13242 relative to the resulting this pointer. */
13244 static void
13245 s390_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
13246 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
13247 tree function)
13249 rtx op[10];
13250 int nonlocal = 0;
13252 /* Make sure unwind info is emitted for the thunk if needed. */
13253 final_start_function (emit_barrier (), file, 1);
13255 /* Operand 0 is the target function. */
13256 op[0] = XEXP (DECL_RTL (function), 0);
13257 if (flag_pic && !SYMBOL_REF_LOCAL_P (op[0]))
13259 nonlocal = 1;
13260 op[0] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[0]),
13261 TARGET_64BIT ? UNSPEC_PLT : UNSPEC_GOT);
13262 op[0] = gen_rtx_CONST (Pmode, op[0]);
13265 /* Operand 1 is the 'this' pointer. */
13266 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
13267 op[1] = gen_rtx_REG (Pmode, 3);
13268 else
13269 op[1] = gen_rtx_REG (Pmode, 2);
13271 /* Operand 2 is the delta. */
13272 op[2] = GEN_INT (delta);
13274 /* Operand 3 is the vcall_offset. */
13275 op[3] = GEN_INT (vcall_offset);
13277 /* Operand 4 is the temporary register. */
13278 op[4] = gen_rtx_REG (Pmode, 1);
13280 /* Operands 5 to 8 can be used as labels. */
13281 op[5] = NULL_RTX;
13282 op[6] = NULL_RTX;
13283 op[7] = NULL_RTX;
13284 op[8] = NULL_RTX;
13286 /* Operand 9 can be used for temporary register. */
13287 op[9] = NULL_RTX;
13289 /* Generate code. */
13290 if (TARGET_64BIT)
13292 /* Setup literal pool pointer if required. */
13293 if ((!DISP_IN_RANGE (delta)
13294 && !CONST_OK_FOR_K (delta)
13295 && !CONST_OK_FOR_Os (delta))
13296 || (!DISP_IN_RANGE (vcall_offset)
13297 && !CONST_OK_FOR_K (vcall_offset)
13298 && !CONST_OK_FOR_Os (vcall_offset)))
13300 op[5] = gen_label_rtx ();
13301 output_asm_insn ("larl\t%4,%5", op);
13304 /* Add DELTA to this pointer. */
13305 if (delta)
13307 if (CONST_OK_FOR_J (delta))
13308 output_asm_insn ("la\t%1,%2(%1)", op);
13309 else if (DISP_IN_RANGE (delta))
13310 output_asm_insn ("lay\t%1,%2(%1)", op);
13311 else if (CONST_OK_FOR_K (delta))
13312 output_asm_insn ("aghi\t%1,%2", op);
13313 else if (CONST_OK_FOR_Os (delta))
13314 output_asm_insn ("agfi\t%1,%2", op);
13315 else
13317 op[6] = gen_label_rtx ();
13318 output_asm_insn ("agf\t%1,%6-%5(%4)", op);
13322 /* Perform vcall adjustment. */
13323 if (vcall_offset)
13325 if (DISP_IN_RANGE (vcall_offset))
13327 output_asm_insn ("lg\t%4,0(%1)", op);
13328 output_asm_insn ("ag\t%1,%3(%4)", op);
13330 else if (CONST_OK_FOR_K (vcall_offset))
13332 output_asm_insn ("lghi\t%4,%3", op);
13333 output_asm_insn ("ag\t%4,0(%1)", op);
13334 output_asm_insn ("ag\t%1,0(%4)", op);
13336 else if (CONST_OK_FOR_Os (vcall_offset))
13338 output_asm_insn ("lgfi\t%4,%3", op);
13339 output_asm_insn ("ag\t%4,0(%1)", op);
13340 output_asm_insn ("ag\t%1,0(%4)", op);
13342 else
13344 op[7] = gen_label_rtx ();
13345 output_asm_insn ("llgf\t%4,%7-%5(%4)", op);
13346 output_asm_insn ("ag\t%4,0(%1)", op);
13347 output_asm_insn ("ag\t%1,0(%4)", op);
13351 /* Jump to target. */
13352 output_asm_insn ("jg\t%0", op);
13354 /* Output literal pool if required. */
13355 if (op[5])
13357 output_asm_insn (".align\t4", op);
13358 targetm.asm_out.internal_label (file, "L",
13359 CODE_LABEL_NUMBER (op[5]));
13361 if (op[6])
13363 targetm.asm_out.internal_label (file, "L",
13364 CODE_LABEL_NUMBER (op[6]));
13365 output_asm_insn (".long\t%2", op);
13367 if (op[7])
13369 targetm.asm_out.internal_label (file, "L",
13370 CODE_LABEL_NUMBER (op[7]));
13371 output_asm_insn (".long\t%3", op);
13374 else
13376 /* Setup base pointer if required. */
13377 if (!vcall_offset
13378 || (!DISP_IN_RANGE (delta)
13379 && !CONST_OK_FOR_K (delta)
13380 && !CONST_OK_FOR_Os (delta))
13381 || (!DISP_IN_RANGE (delta)
13382 && !CONST_OK_FOR_K (vcall_offset)
13383 && !CONST_OK_FOR_Os (vcall_offset)))
13385 op[5] = gen_label_rtx ();
13386 output_asm_insn ("basr\t%4,0", op);
13387 targetm.asm_out.internal_label (file, "L",
13388 CODE_LABEL_NUMBER (op[5]));
13391 /* Add DELTA to this pointer. */
13392 if (delta)
13394 if (CONST_OK_FOR_J (delta))
13395 output_asm_insn ("la\t%1,%2(%1)", op);
13396 else if (DISP_IN_RANGE (delta))
13397 output_asm_insn ("lay\t%1,%2(%1)", op);
13398 else if (CONST_OK_FOR_K (delta))
13399 output_asm_insn ("ahi\t%1,%2", op);
13400 else if (CONST_OK_FOR_Os (delta))
13401 output_asm_insn ("afi\t%1,%2", op);
13402 else
13404 op[6] = gen_label_rtx ();
13405 output_asm_insn ("a\t%1,%6-%5(%4)", op);
13409 /* Perform vcall adjustment. */
13410 if (vcall_offset)
13412 if (CONST_OK_FOR_J (vcall_offset))
13414 output_asm_insn ("l\t%4,0(%1)", op);
13415 output_asm_insn ("a\t%1,%3(%4)", op);
13417 else if (DISP_IN_RANGE (vcall_offset))
13419 output_asm_insn ("l\t%4,0(%1)", op);
13420 output_asm_insn ("ay\t%1,%3(%4)", op);
13422 else if (CONST_OK_FOR_K (vcall_offset))
13424 output_asm_insn ("lhi\t%4,%3", op);
13425 output_asm_insn ("a\t%4,0(%1)", op);
13426 output_asm_insn ("a\t%1,0(%4)", op);
13428 else if (CONST_OK_FOR_Os (vcall_offset))
13430 output_asm_insn ("iilf\t%4,%3", op);
13431 output_asm_insn ("a\t%4,0(%1)", op);
13432 output_asm_insn ("a\t%1,0(%4)", op);
13434 else
13436 op[7] = gen_label_rtx ();
13437 output_asm_insn ("l\t%4,%7-%5(%4)", op);
13438 output_asm_insn ("a\t%4,0(%1)", op);
13439 output_asm_insn ("a\t%1,0(%4)", op);
13442 /* We had to clobber the base pointer register.
13443 Re-setup the base pointer (with a different base). */
13444 op[5] = gen_label_rtx ();
13445 output_asm_insn ("basr\t%4,0", op);
13446 targetm.asm_out.internal_label (file, "L",
13447 CODE_LABEL_NUMBER (op[5]));
13450 /* Jump to target. */
13451 op[8] = gen_label_rtx ();
13453 if (!flag_pic)
13454 output_asm_insn ("l\t%4,%8-%5(%4)", op);
13455 else if (!nonlocal)
13456 output_asm_insn ("a\t%4,%8-%5(%4)", op);
13457 /* We cannot call through .plt, since .plt requires %r12 loaded. */
13458 else if (flag_pic == 1)
13460 output_asm_insn ("a\t%4,%8-%5(%4)", op);
13461 output_asm_insn ("l\t%4,%0(%4)", op);
13463 else if (flag_pic == 2)
13465 op[9] = gen_rtx_REG (Pmode, 0);
13466 output_asm_insn ("l\t%9,%8-4-%5(%4)", op);
13467 output_asm_insn ("a\t%4,%8-%5(%4)", op);
13468 output_asm_insn ("ar\t%4,%9", op);
13469 output_asm_insn ("l\t%4,0(%4)", op);
13472 output_asm_insn ("br\t%4", op);
13474 /* Output literal pool. */
13475 output_asm_insn (".align\t4", op);
13477 if (nonlocal && flag_pic == 2)
13478 output_asm_insn (".long\t%0", op);
13479 if (nonlocal)
13481 op[0] = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
13482 SYMBOL_REF_FLAGS (op[0]) = SYMBOL_FLAG_LOCAL;
13485 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[8]));
13486 if (!flag_pic)
13487 output_asm_insn (".long\t%0", op);
13488 else
13489 output_asm_insn (".long\t%0-%5", op);
13491 if (op[6])
13493 targetm.asm_out.internal_label (file, "L",
13494 CODE_LABEL_NUMBER (op[6]));
13495 output_asm_insn (".long\t%2", op);
13497 if (op[7])
13499 targetm.asm_out.internal_label (file, "L",
13500 CODE_LABEL_NUMBER (op[7]));
13501 output_asm_insn (".long\t%3", op);
13504 final_end_function ();
13507 /* Output either an indirect jump or a an indirect call
13508 (RETURN_ADDR_REGNO != INVALID_REGNUM) with target register REGNO
13509 using a branch trampoline disabling branch target prediction. */
13511 void
13512 s390_indirect_branch_via_thunk (unsigned int regno,
13513 unsigned int return_addr_regno,
13514 rtx comparison_operator,
13515 enum s390_indirect_branch_type type)
13517 enum s390_indirect_branch_option option;
13519 if (type == s390_indirect_branch_type_return)
13521 if (s390_return_addr_from_memory ())
13522 option = s390_opt_function_return_mem;
13523 else
13524 option = s390_opt_function_return_reg;
13526 else if (type == s390_indirect_branch_type_jump)
13527 option = s390_opt_indirect_branch_jump;
13528 else if (type == s390_indirect_branch_type_call)
13529 option = s390_opt_indirect_branch_call;
13530 else
13531 gcc_unreachable ();
13533 if (TARGET_INDIRECT_BRANCH_TABLE)
13535 char label[32];
13537 ASM_GENERATE_INTERNAL_LABEL (label,
13538 indirect_branch_table_label[option],
13539 indirect_branch_table_label_no[option]++);
13540 ASM_OUTPUT_LABEL (asm_out_file, label);
13543 if (return_addr_regno != INVALID_REGNUM)
13545 gcc_assert (comparison_operator == NULL_RTX);
13546 fprintf (asm_out_file, " \tbrasl\t%%r%d,", return_addr_regno);
13548 else
13550 fputs (" \tjg", asm_out_file);
13551 if (comparison_operator != NULL_RTX)
13552 print_operand (asm_out_file, comparison_operator, 'C');
13554 fputs ("\t", asm_out_file);
13557 if (TARGET_CPU_Z10)
13558 fprintf (asm_out_file,
13559 TARGET_INDIRECT_BRANCH_THUNK_NAME_EXRL "\n",
13560 regno);
13561 else
13562 fprintf (asm_out_file,
13563 TARGET_INDIRECT_BRANCH_THUNK_NAME_EX "\n",
13564 INDIRECT_BRANCH_THUNK_REGNUM, regno);
13566 if ((option == s390_opt_indirect_branch_jump
13567 && cfun->machine->indirect_branch_jump == indirect_branch_thunk)
13568 || (option == s390_opt_indirect_branch_call
13569 && cfun->machine->indirect_branch_call == indirect_branch_thunk)
13570 || (option == s390_opt_function_return_reg
13571 && cfun->machine->function_return_reg == indirect_branch_thunk)
13572 || (option == s390_opt_function_return_mem
13573 && cfun->machine->function_return_mem == indirect_branch_thunk))
13575 if (TARGET_CPU_Z10)
13576 indirect_branch_z10thunk_mask |= (1 << regno);
13577 else
13578 indirect_branch_prez10thunk_mask |= (1 << regno);
13582 /* Output an inline thunk for indirect jumps. EXECUTE_TARGET can
13583 either be an address register or a label pointing to the location
13584 of the jump instruction. */
13586 void
13587 s390_indirect_branch_via_inline_thunk (rtx execute_target)
13589 if (TARGET_INDIRECT_BRANCH_TABLE)
13591 char label[32];
13593 ASM_GENERATE_INTERNAL_LABEL (label,
13594 indirect_branch_table_label[s390_opt_indirect_branch_jump],
13595 indirect_branch_table_label_no[s390_opt_indirect_branch_jump]++);
13596 ASM_OUTPUT_LABEL (asm_out_file, label);
13599 if (!TARGET_ZARCH)
13600 fputs ("\t.machinemode zarch\n", asm_out_file);
13602 if (REG_P (execute_target))
13603 fprintf (asm_out_file, "\tex\t%%r0,0(%%r%d)\n", REGNO (execute_target));
13604 else
13605 output_asm_insn ("\texrl\t%%r0,%0", &execute_target);
13607 if (!TARGET_ZARCH)
13608 fputs ("\t.machinemode esa\n", asm_out_file);
13610 fputs ("0:\tj\t0b\n", asm_out_file);
13613 static bool
13614 s390_valid_pointer_mode (scalar_int_mode mode)
13616 return (mode == SImode || (TARGET_64BIT && mode == DImode));
13619 /* Checks whether the given CALL_EXPR would use a caller
13620 saved register. This is used to decide whether sibling call
13621 optimization could be performed on the respective function
13622 call. */
13624 static bool
13625 s390_call_saved_register_used (tree call_expr)
13627 CUMULATIVE_ARGS cum_v;
13628 cumulative_args_t cum;
13629 tree parameter;
13630 machine_mode mode;
13631 tree type;
13632 rtx parm_rtx;
13633 int reg, i;
13635 INIT_CUMULATIVE_ARGS (cum_v, NULL, NULL, 0, 0);
13636 cum = pack_cumulative_args (&cum_v);
13638 for (i = 0; i < call_expr_nargs (call_expr); i++)
13640 parameter = CALL_EXPR_ARG (call_expr, i);
13641 gcc_assert (parameter);
13643 /* For an undeclared variable passed as parameter we will get
13644 an ERROR_MARK node here. */
13645 if (TREE_CODE (parameter) == ERROR_MARK)
13646 return true;
13648 type = TREE_TYPE (parameter);
13649 gcc_assert (type);
13651 mode = TYPE_MODE (type);
13652 gcc_assert (mode);
13654 /* We assume that in the target function all parameters are
13655 named. This only has an impact on vector argument register
13656 usage none of which is call-saved. */
13657 if (pass_by_reference (&cum_v, mode, type, true))
13659 mode = Pmode;
13660 type = build_pointer_type (type);
13663 parm_rtx = s390_function_arg (cum, mode, type, true);
13665 s390_function_arg_advance (cum, mode, type, true);
13667 if (!parm_rtx)
13668 continue;
13670 if (REG_P (parm_rtx))
13672 for (reg = 0; reg < REG_NREGS (parm_rtx); reg++)
13673 if (!call_used_regs[reg + REGNO (parm_rtx)])
13674 return true;
13677 if (GET_CODE (parm_rtx) == PARALLEL)
13679 int i;
13681 for (i = 0; i < XVECLEN (parm_rtx, 0); i++)
13683 rtx r = XEXP (XVECEXP (parm_rtx, 0, i), 0);
13685 gcc_assert (REG_P (r));
13687 for (reg = 0; reg < REG_NREGS (r); reg++)
13688 if (!call_used_regs[reg + REGNO (r)])
13689 return true;
13694 return false;
13697 /* Return true if the given call expression can be
13698 turned into a sibling call.
13699 DECL holds the declaration of the function to be called whereas
13700 EXP is the call expression itself. */
13702 static bool
13703 s390_function_ok_for_sibcall (tree decl, tree exp)
13705 /* The TPF epilogue uses register 1. */
13706 if (TARGET_TPF_PROFILING)
13707 return false;
13709 /* The 31 bit PLT code uses register 12 (GOT pointer - caller saved)
13710 which would have to be restored before the sibcall. */
13711 if (!TARGET_64BIT && flag_pic && decl && !targetm.binds_local_p (decl))
13712 return false;
13714 /* The thunks for indirect branches require r1 if no exrl is
13715 available. r1 might not be available when doing a sibling
13716 call. */
13717 if (TARGET_INDIRECT_BRANCH_NOBP_CALL
13718 && !TARGET_CPU_Z10
13719 && !decl)
13720 return false;
13722 /* Register 6 on s390 is available as an argument register but unfortunately
13723 "caller saved". This makes functions needing this register for arguments
13724 not suitable for sibcalls. */
13725 return !s390_call_saved_register_used (exp);
13728 /* Return the fixed registers used for condition codes. */
13730 static bool
13731 s390_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
13733 *p1 = CC_REGNUM;
13734 *p2 = INVALID_REGNUM;
13736 return true;
13739 /* This function is used by the call expanders of the machine description.
13740 It emits the call insn itself together with the necessary operations
13741 to adjust the target address and returns the emitted insn.
13742 ADDR_LOCATION is the target address rtx
13743 TLS_CALL the location of the thread-local symbol
13744 RESULT_REG the register where the result of the call should be stored
13745 RETADDR_REG the register where the return address should be stored
13746 If this parameter is NULL_RTX the call is considered
13747 to be a sibling call. */
13749 rtx_insn *
13750 s390_emit_call (rtx addr_location, rtx tls_call, rtx result_reg,
13751 rtx retaddr_reg)
13753 bool plt_call = false;
13754 rtx_insn *insn;
13755 rtx vec[4] = { NULL_RTX };
13756 int elts = 0;
13757 rtx *call = &vec[0];
13758 rtx *clobber_ret_reg = &vec[1];
13759 rtx *use = &vec[2];
13760 rtx *clobber_thunk_reg = &vec[3];
13761 int i;
13763 /* Direct function calls need special treatment. */
13764 if (GET_CODE (addr_location) == SYMBOL_REF)
13766 /* When calling a global routine in PIC mode, we must
13767 replace the symbol itself with the PLT stub. */
13768 if (flag_pic && !SYMBOL_REF_LOCAL_P (addr_location))
13770 if (TARGET_64BIT || retaddr_reg != NULL_RTX)
13772 addr_location = gen_rtx_UNSPEC (Pmode,
13773 gen_rtvec (1, addr_location),
13774 UNSPEC_PLT);
13775 addr_location = gen_rtx_CONST (Pmode, addr_location);
13776 plt_call = true;
13778 else
13779 /* For -fpic code the PLT entries might use r12 which is
13780 call-saved. Therefore we cannot do a sibcall when
13781 calling directly using a symbol ref. When reaching
13782 this point we decided (in s390_function_ok_for_sibcall)
13783 to do a sibcall for a function pointer but one of the
13784 optimizers was able to get rid of the function pointer
13785 by propagating the symbol ref into the call. This
13786 optimization is illegal for S/390 so we turn the direct
13787 call into a indirect call again. */
13788 addr_location = force_reg (Pmode, addr_location);
13791 /* Unless we can use the bras(l) insn, force the
13792 routine address into a register. */
13793 if (!TARGET_SMALL_EXEC && !TARGET_CPU_ZARCH)
13795 if (flag_pic)
13796 addr_location = legitimize_pic_address (addr_location, 0);
13797 else
13798 addr_location = force_reg (Pmode, addr_location);
13802 /* If it is already an indirect call or the code above moved the
13803 SYMBOL_REF to somewhere else make sure the address can be found in
13804 register 1. */
13805 if (retaddr_reg == NULL_RTX
13806 && GET_CODE (addr_location) != SYMBOL_REF
13807 && !plt_call)
13809 emit_move_insn (gen_rtx_REG (Pmode, SIBCALL_REGNUM), addr_location);
13810 addr_location = gen_rtx_REG (Pmode, SIBCALL_REGNUM);
13813 if (TARGET_INDIRECT_BRANCH_NOBP_CALL
13814 && GET_CODE (addr_location) != SYMBOL_REF
13815 && !plt_call)
13817 /* Indirect branch thunks require the target to be a single GPR. */
13818 addr_location = force_reg (Pmode, addr_location);
13820 /* Without exrl the indirect branch thunks need an additional
13821 register for larl;ex */
13822 if (!TARGET_CPU_Z10)
13824 *clobber_thunk_reg = gen_rtx_REG (Pmode, INDIRECT_BRANCH_THUNK_REGNUM);
13825 *clobber_thunk_reg = gen_rtx_CLOBBER (VOIDmode, *clobber_thunk_reg);
13829 addr_location = gen_rtx_MEM (QImode, addr_location);
13830 *call = gen_rtx_CALL (VOIDmode, addr_location, const0_rtx);
13832 if (result_reg != NULL_RTX)
13833 *call = gen_rtx_SET (result_reg, *call);
13835 if (retaddr_reg != NULL_RTX)
13837 *clobber_ret_reg = gen_rtx_CLOBBER (VOIDmode, retaddr_reg);
13839 if (tls_call != NULL_RTX)
13840 *use = gen_rtx_USE (VOIDmode, tls_call);
13844 for (i = 0; i < 4; i++)
13845 if (vec[i] != NULL_RTX)
13846 elts++;
13848 if (elts > 1)
13850 rtvec v;
13851 int e = 0;
13853 v = rtvec_alloc (elts);
13854 for (i = 0; i < 4; i++)
13855 if (vec[i] != NULL_RTX)
13857 RTVEC_ELT (v, e) = vec[i];
13858 e++;
13861 *call = gen_rtx_PARALLEL (VOIDmode, v);
13864 insn = emit_call_insn (*call);
13866 /* 31-bit PLT stubs and tls calls use the GOT register implicitly. */
13867 if ((!TARGET_64BIT && plt_call) || tls_call != NULL_RTX)
13869 /* s390_function_ok_for_sibcall should
13870 have denied sibcalls in this case. */
13871 gcc_assert (retaddr_reg != NULL_RTX);
13872 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, 12));
13874 return insn;
13877 /* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
13879 static void
13880 s390_conditional_register_usage (void)
13882 int i;
13884 if (flag_pic)
13886 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
13887 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
13889 if (TARGET_CPU_ZARCH)
13891 fixed_regs[BASE_REGNUM] = 0;
13892 call_used_regs[BASE_REGNUM] = 0;
13893 fixed_regs[RETURN_REGNUM] = 0;
13894 call_used_regs[RETURN_REGNUM] = 0;
13896 if (TARGET_64BIT)
13898 for (i = FPR8_REGNUM; i <= FPR15_REGNUM; i++)
13899 call_used_regs[i] = call_really_used_regs[i] = 0;
13901 else
13903 call_used_regs[FPR4_REGNUM] = call_really_used_regs[FPR4_REGNUM] = 0;
13904 call_used_regs[FPR6_REGNUM] = call_really_used_regs[FPR6_REGNUM] = 0;
13907 if (TARGET_SOFT_FLOAT)
13909 for (i = FPR0_REGNUM; i <= FPR15_REGNUM; i++)
13910 call_used_regs[i] = fixed_regs[i] = 1;
13913 /* Disable v16 - v31 for non-vector target. */
13914 if (!TARGET_VX)
13916 for (i = VR16_REGNUM; i <= VR31_REGNUM; i++)
13917 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
13921 /* Corresponding function to eh_return expander. */
13923 static GTY(()) rtx s390_tpf_eh_return_symbol;
13924 void
13925 s390_emit_tpf_eh_return (rtx target)
13927 rtx_insn *insn;
13928 rtx reg, orig_ra;
13930 if (!s390_tpf_eh_return_symbol)
13931 s390_tpf_eh_return_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tpf_eh_return");
13933 reg = gen_rtx_REG (Pmode, 2);
13934 orig_ra = gen_rtx_REG (Pmode, 3);
13936 emit_move_insn (reg, target);
13937 emit_move_insn (orig_ra, get_hard_reg_initial_val (Pmode, RETURN_REGNUM));
13938 insn = s390_emit_call (s390_tpf_eh_return_symbol, NULL_RTX, reg,
13939 gen_rtx_REG (Pmode, RETURN_REGNUM));
13940 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), reg);
13941 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), orig_ra);
13943 emit_move_insn (EH_RETURN_HANDLER_RTX, reg);
13946 /* Rework the prologue/epilogue to avoid saving/restoring
13947 registers unnecessarily. */
13949 static void
13950 s390_optimize_prologue (void)
13952 rtx_insn *insn, *new_insn, *next_insn;
13954 /* Do a final recompute of the frame-related data. */
13955 s390_optimize_register_info ();
13957 /* If all special registers are in fact used, there's nothing we
13958 can do, so no point in walking the insn list. */
13960 if (cfun_frame_layout.first_save_gpr <= BASE_REGNUM
13961 && cfun_frame_layout.last_save_gpr >= BASE_REGNUM
13962 && (TARGET_CPU_ZARCH
13963 || (cfun_frame_layout.first_save_gpr <= RETURN_REGNUM
13964 && cfun_frame_layout.last_save_gpr >= RETURN_REGNUM)))
13965 return;
13967 /* Search for prologue/epilogue insns and replace them. */
13969 for (insn = get_insns (); insn; insn = next_insn)
13971 int first, last, off;
13972 rtx set, base, offset;
13973 rtx pat;
13975 next_insn = NEXT_INSN (insn);
13977 if (! NONJUMP_INSN_P (insn) || ! RTX_FRAME_RELATED_P (insn))
13978 continue;
13980 pat = PATTERN (insn);
13982 /* Remove ldgr/lgdr instructions used for saving and restore
13983 GPRs if possible. */
13984 if (TARGET_Z10)
13986 rtx tmp_pat = pat;
13988 if (INSN_CODE (insn) == CODE_FOR_stack_restore_from_fpr)
13989 tmp_pat = XVECEXP (pat, 0, 0);
13991 if (GET_CODE (tmp_pat) == SET
13992 && GET_MODE (SET_SRC (tmp_pat)) == DImode
13993 && REG_P (SET_SRC (tmp_pat))
13994 && REG_P (SET_DEST (tmp_pat)))
13996 int src_regno = REGNO (SET_SRC (tmp_pat));
13997 int dest_regno = REGNO (SET_DEST (tmp_pat));
13998 int gpr_regno;
13999 int fpr_regno;
14001 if (!((GENERAL_REGNO_P (src_regno)
14002 && FP_REGNO_P (dest_regno))
14003 || (FP_REGNO_P (src_regno)
14004 && GENERAL_REGNO_P (dest_regno))))
14005 continue;
14007 gpr_regno = GENERAL_REGNO_P (src_regno) ? src_regno : dest_regno;
14008 fpr_regno = FP_REGNO_P (src_regno) ? src_regno : dest_regno;
14010 /* GPR must be call-saved, FPR must be call-clobbered. */
14011 if (!call_really_used_regs[fpr_regno]
14012 || call_really_used_regs[gpr_regno])
14013 continue;
14015 /* It must not happen that what we once saved in an FPR now
14016 needs a stack slot. */
14017 gcc_assert (cfun_gpr_save_slot (gpr_regno) != SAVE_SLOT_STACK);
14019 if (cfun_gpr_save_slot (gpr_regno) == SAVE_SLOT_NONE)
14021 remove_insn (insn);
14022 continue;
14027 if (GET_CODE (pat) == PARALLEL
14028 && store_multiple_operation (pat, VOIDmode))
14030 set = XVECEXP (pat, 0, 0);
14031 first = REGNO (SET_SRC (set));
14032 last = first + XVECLEN (pat, 0) - 1;
14033 offset = const0_rtx;
14034 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
14035 off = INTVAL (offset);
14037 if (GET_CODE (base) != REG || off < 0)
14038 continue;
14039 if (cfun_frame_layout.first_save_gpr != -1
14040 && (cfun_frame_layout.first_save_gpr < first
14041 || cfun_frame_layout.last_save_gpr > last))
14042 continue;
14043 if (REGNO (base) != STACK_POINTER_REGNUM
14044 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
14045 continue;
14046 if (first > BASE_REGNUM || last < BASE_REGNUM)
14047 continue;
14049 if (cfun_frame_layout.first_save_gpr != -1)
14051 rtx s_pat = save_gprs (base,
14052 off + (cfun_frame_layout.first_save_gpr
14053 - first) * UNITS_PER_LONG,
14054 cfun_frame_layout.first_save_gpr,
14055 cfun_frame_layout.last_save_gpr);
14056 new_insn = emit_insn_before (s_pat, insn);
14057 INSN_ADDRESSES_NEW (new_insn, -1);
14060 remove_insn (insn);
14061 continue;
14064 if (cfun_frame_layout.first_save_gpr == -1
14065 && GET_CODE (pat) == SET
14066 && GENERAL_REG_P (SET_SRC (pat))
14067 && GET_CODE (SET_DEST (pat)) == MEM)
14069 set = pat;
14070 first = REGNO (SET_SRC (set));
14071 offset = const0_rtx;
14072 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
14073 off = INTVAL (offset);
14075 if (GET_CODE (base) != REG || off < 0)
14076 continue;
14077 if (REGNO (base) != STACK_POINTER_REGNUM
14078 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
14079 continue;
14081 remove_insn (insn);
14082 continue;
14085 if (GET_CODE (pat) == PARALLEL
14086 && load_multiple_operation (pat, VOIDmode))
14088 set = XVECEXP (pat, 0, 0);
14089 first = REGNO (SET_DEST (set));
14090 last = first + XVECLEN (pat, 0) - 1;
14091 offset = const0_rtx;
14092 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
14093 off = INTVAL (offset);
14095 if (GET_CODE (base) != REG || off < 0)
14096 continue;
14098 if (cfun_frame_layout.first_restore_gpr != -1
14099 && (cfun_frame_layout.first_restore_gpr < first
14100 || cfun_frame_layout.last_restore_gpr > last))
14101 continue;
14102 if (REGNO (base) != STACK_POINTER_REGNUM
14103 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
14104 continue;
14105 if (first > BASE_REGNUM || last < BASE_REGNUM)
14106 continue;
14108 if (cfun_frame_layout.first_restore_gpr != -1)
14110 rtx rpat = restore_gprs (base,
14111 off + (cfun_frame_layout.first_restore_gpr
14112 - first) * UNITS_PER_LONG,
14113 cfun_frame_layout.first_restore_gpr,
14114 cfun_frame_layout.last_restore_gpr);
14116 /* Remove REG_CFA_RESTOREs for registers that we no
14117 longer need to save. */
14118 REG_NOTES (rpat) = REG_NOTES (insn);
14119 for (rtx *ptr = &REG_NOTES (rpat); *ptr; )
14120 if (REG_NOTE_KIND (*ptr) == REG_CFA_RESTORE
14121 && ((int) REGNO (XEXP (*ptr, 0))
14122 < cfun_frame_layout.first_restore_gpr))
14123 *ptr = XEXP (*ptr, 1);
14124 else
14125 ptr = &XEXP (*ptr, 1);
14126 new_insn = emit_insn_before (rpat, insn);
14127 RTX_FRAME_RELATED_P (new_insn) = 1;
14128 INSN_ADDRESSES_NEW (new_insn, -1);
14131 remove_insn (insn);
14132 continue;
14135 if (cfun_frame_layout.first_restore_gpr == -1
14136 && GET_CODE (pat) == SET
14137 && GENERAL_REG_P (SET_DEST (pat))
14138 && GET_CODE (SET_SRC (pat)) == MEM)
14140 set = pat;
14141 first = REGNO (SET_DEST (set));
14142 offset = const0_rtx;
14143 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
14144 off = INTVAL (offset);
14146 if (GET_CODE (base) != REG || off < 0)
14147 continue;
14149 if (REGNO (base) != STACK_POINTER_REGNUM
14150 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
14151 continue;
14153 remove_insn (insn);
14154 continue;
14159 /* On z10 and later the dynamic branch prediction must see the
14160 backward jump within a certain windows. If not it falls back to
14161 the static prediction. This function rearranges the loop backward
14162 branch in a way which makes the static prediction always correct.
14163 The function returns true if it added an instruction. */
14164 static bool
14165 s390_fix_long_loop_prediction (rtx_insn *insn)
14167 rtx set = single_set (insn);
14168 rtx code_label, label_ref;
14169 rtx_insn *uncond_jump;
14170 rtx_insn *cur_insn;
14171 rtx tmp;
14172 int distance;
14174 /* This will exclude branch on count and branch on index patterns
14175 since these are correctly statically predicted. */
14176 if (!set
14177 || SET_DEST (set) != pc_rtx
14178 || GET_CODE (SET_SRC(set)) != IF_THEN_ELSE)
14179 return false;
14181 /* Skip conditional returns. */
14182 if (ANY_RETURN_P (XEXP (SET_SRC (set), 1))
14183 && XEXP (SET_SRC (set), 2) == pc_rtx)
14184 return false;
14186 label_ref = (GET_CODE (XEXP (SET_SRC (set), 1)) == LABEL_REF ?
14187 XEXP (SET_SRC (set), 1) : XEXP (SET_SRC (set), 2));
14189 gcc_assert (GET_CODE (label_ref) == LABEL_REF);
14191 code_label = XEXP (label_ref, 0);
14193 if (INSN_ADDRESSES (INSN_UID (code_label)) == -1
14194 || INSN_ADDRESSES (INSN_UID (insn)) == -1
14195 || (INSN_ADDRESSES (INSN_UID (insn))
14196 - INSN_ADDRESSES (INSN_UID (code_label)) < PREDICT_DISTANCE))
14197 return false;
14199 for (distance = 0, cur_insn = PREV_INSN (insn);
14200 distance < PREDICT_DISTANCE - 6;
14201 distance += get_attr_length (cur_insn), cur_insn = PREV_INSN (cur_insn))
14202 if (!cur_insn || JUMP_P (cur_insn) || LABEL_P (cur_insn))
14203 return false;
14205 rtx_code_label *new_label = gen_label_rtx ();
14206 uncond_jump = emit_jump_insn_after (
14207 gen_rtx_SET (pc_rtx,
14208 gen_rtx_LABEL_REF (VOIDmode, code_label)),
14209 insn);
14210 emit_label_after (new_label, uncond_jump);
14212 tmp = XEXP (SET_SRC (set), 1);
14213 XEXP (SET_SRC (set), 1) = XEXP (SET_SRC (set), 2);
14214 XEXP (SET_SRC (set), 2) = tmp;
14215 INSN_CODE (insn) = -1;
14217 XEXP (label_ref, 0) = new_label;
14218 JUMP_LABEL (insn) = new_label;
14219 JUMP_LABEL (uncond_jump) = code_label;
14221 return true;
14224 /* Returns 1 if INSN reads the value of REG for purposes not related
14225 to addressing of memory, and 0 otherwise. */
14226 static int
14227 s390_non_addr_reg_read_p (rtx reg, rtx_insn *insn)
14229 return reg_referenced_p (reg, PATTERN (insn))
14230 && !reg_used_in_mem_p (REGNO (reg), PATTERN (insn));
14233 /* Starting from INSN find_cond_jump looks downwards in the insn
14234 stream for a single jump insn which is the last user of the
14235 condition code set in INSN. */
14236 static rtx_insn *
14237 find_cond_jump (rtx_insn *insn)
14239 for (; insn; insn = NEXT_INSN (insn))
14241 rtx ite, cc;
14243 if (LABEL_P (insn))
14244 break;
14246 if (!JUMP_P (insn))
14248 if (reg_mentioned_p (gen_rtx_REG (CCmode, CC_REGNUM), insn))
14249 break;
14250 continue;
14253 /* This will be triggered by a return. */
14254 if (GET_CODE (PATTERN (insn)) != SET)
14255 break;
14257 gcc_assert (SET_DEST (PATTERN (insn)) == pc_rtx);
14258 ite = SET_SRC (PATTERN (insn));
14260 if (GET_CODE (ite) != IF_THEN_ELSE)
14261 break;
14263 cc = XEXP (XEXP (ite, 0), 0);
14264 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc)))
14265 break;
14267 if (find_reg_note (insn, REG_DEAD, cc))
14268 return insn;
14269 break;
14272 return NULL;
14275 /* Swap the condition in COND and the operands in OP0 and OP1 so that
14276 the semantics does not change. If NULL_RTX is passed as COND the
14277 function tries to find the conditional jump starting with INSN. */
14278 static void
14279 s390_swap_cmp (rtx cond, rtx *op0, rtx *op1, rtx_insn *insn)
14281 rtx tmp = *op0;
14283 if (cond == NULL_RTX)
14285 rtx_insn *jump = find_cond_jump (NEXT_INSN (insn));
14286 rtx set = jump ? single_set (jump) : NULL_RTX;
14288 if (set == NULL_RTX)
14289 return;
14291 cond = XEXP (SET_SRC (set), 0);
14294 *op0 = *op1;
14295 *op1 = tmp;
14296 PUT_CODE (cond, swap_condition (GET_CODE (cond)));
14299 /* On z10, instructions of the compare-and-branch family have the
14300 property to access the register occurring as second operand with
14301 its bits complemented. If such a compare is grouped with a second
14302 instruction that accesses the same register non-complemented, and
14303 if that register's value is delivered via a bypass, then the
14304 pipeline recycles, thereby causing significant performance decline.
14305 This function locates such situations and exchanges the two
14306 operands of the compare. The function return true whenever it
14307 added an insn. */
14308 static bool
14309 s390_z10_optimize_cmp (rtx_insn *insn)
14311 rtx_insn *prev_insn, *next_insn;
14312 bool insn_added_p = false;
14313 rtx cond, *op0, *op1;
14315 if (GET_CODE (PATTERN (insn)) == PARALLEL)
14317 /* Handle compare and branch and branch on count
14318 instructions. */
14319 rtx pattern = single_set (insn);
14321 if (!pattern
14322 || SET_DEST (pattern) != pc_rtx
14323 || GET_CODE (SET_SRC (pattern)) != IF_THEN_ELSE)
14324 return false;
14326 cond = XEXP (SET_SRC (pattern), 0);
14327 op0 = &XEXP (cond, 0);
14328 op1 = &XEXP (cond, 1);
14330 else if (GET_CODE (PATTERN (insn)) == SET)
14332 rtx src, dest;
14334 /* Handle normal compare instructions. */
14335 src = SET_SRC (PATTERN (insn));
14336 dest = SET_DEST (PATTERN (insn));
14338 if (!REG_P (dest)
14339 || !CC_REGNO_P (REGNO (dest))
14340 || GET_CODE (src) != COMPARE)
14341 return false;
14343 /* s390_swap_cmp will try to find the conditional
14344 jump when passing NULL_RTX as condition. */
14345 cond = NULL_RTX;
14346 op0 = &XEXP (src, 0);
14347 op1 = &XEXP (src, 1);
14349 else
14350 return false;
14352 if (!REG_P (*op0) || !REG_P (*op1))
14353 return false;
14355 if (GET_MODE_CLASS (GET_MODE (*op0)) != MODE_INT)
14356 return false;
14358 /* Swap the COMPARE arguments and its mask if there is a
14359 conflicting access in the previous insn. */
14360 prev_insn = prev_active_insn (insn);
14361 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
14362 && reg_referenced_p (*op1, PATTERN (prev_insn)))
14363 s390_swap_cmp (cond, op0, op1, insn);
14365 /* Check if there is a conflict with the next insn. If there
14366 was no conflict with the previous insn, then swap the
14367 COMPARE arguments and its mask. If we already swapped
14368 the operands, or if swapping them would cause a conflict
14369 with the previous insn, issue a NOP after the COMPARE in
14370 order to separate the two instuctions. */
14371 next_insn = next_active_insn (insn);
14372 if (next_insn != NULL_RTX && INSN_P (next_insn)
14373 && s390_non_addr_reg_read_p (*op1, next_insn))
14375 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
14376 && s390_non_addr_reg_read_p (*op0, prev_insn))
14378 if (REGNO (*op1) == 0)
14379 emit_insn_after (gen_nop_lr1 (), insn);
14380 else
14381 emit_insn_after (gen_nop_lr0 (), insn);
14382 insn_added_p = true;
14384 else
14385 s390_swap_cmp (cond, op0, op1, insn);
14387 return insn_added_p;
14390 /* Number of INSNs to be scanned backward in the last BB of the loop
14391 and forward in the first BB of the loop. This usually should be a
14392 bit more than the number of INSNs which could go into one
14393 group. */
14394 #define S390_OSC_SCAN_INSN_NUM 5
14396 /* Scan LOOP for static OSC collisions and return true if a osc_break
14397 should be issued for this loop. */
14398 static bool
14399 s390_adjust_loop_scan_osc (struct loop* loop)
14402 HARD_REG_SET modregs, newregs;
14403 rtx_insn *insn, *store_insn = NULL;
14404 rtx set;
14405 struct s390_address addr_store, addr_load;
14406 subrtx_iterator::array_type array;
14407 int insn_count;
14409 CLEAR_HARD_REG_SET (modregs);
14411 insn_count = 0;
14412 FOR_BB_INSNS_REVERSE (loop->latch, insn)
14414 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
14415 continue;
14417 insn_count++;
14418 if (insn_count > S390_OSC_SCAN_INSN_NUM)
14419 return false;
14421 find_all_hard_reg_sets (insn, &newregs, true);
14422 IOR_HARD_REG_SET (modregs, newregs);
14424 set = single_set (insn);
14425 if (!set)
14426 continue;
14428 if (MEM_P (SET_DEST (set))
14429 && s390_decompose_address (XEXP (SET_DEST (set), 0), &addr_store))
14431 store_insn = insn;
14432 break;
14436 if (store_insn == NULL_RTX)
14437 return false;
14439 insn_count = 0;
14440 FOR_BB_INSNS (loop->header, insn)
14442 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
14443 continue;
14445 if (insn == store_insn)
14446 return false;
14448 insn_count++;
14449 if (insn_count > S390_OSC_SCAN_INSN_NUM)
14450 return false;
14452 find_all_hard_reg_sets (insn, &newregs, true);
14453 IOR_HARD_REG_SET (modregs, newregs);
14455 set = single_set (insn);
14456 if (!set)
14457 continue;
14459 /* An intermediate store disrupts static OSC checking
14460 anyway. */
14461 if (MEM_P (SET_DEST (set))
14462 && s390_decompose_address (XEXP (SET_DEST (set), 0), NULL))
14463 return false;
14465 FOR_EACH_SUBRTX (iter, array, SET_SRC (set), NONCONST)
14466 if (MEM_P (*iter)
14467 && s390_decompose_address (XEXP (*iter, 0), &addr_load)
14468 && rtx_equal_p (addr_load.base, addr_store.base)
14469 && rtx_equal_p (addr_load.indx, addr_store.indx)
14470 && rtx_equal_p (addr_load.disp, addr_store.disp))
14472 if ((addr_load.base != NULL_RTX
14473 && TEST_HARD_REG_BIT (modregs, REGNO (addr_load.base)))
14474 || (addr_load.indx != NULL_RTX
14475 && TEST_HARD_REG_BIT (modregs, REGNO (addr_load.indx))))
14476 return true;
14479 return false;
14482 /* Look for adjustments which can be done on simple innermost
14483 loops. */
14484 static void
14485 s390_adjust_loops ()
14487 struct loop *loop = NULL;
14489 df_analyze ();
14490 compute_bb_for_insn ();
14492 /* Find the loops. */
14493 loop_optimizer_init (AVOID_CFG_MODIFICATIONS);
14495 FOR_EACH_LOOP (loop, LI_ONLY_INNERMOST)
14497 if (dump_file)
14499 flow_loop_dump (loop, dump_file, NULL, 0);
14500 fprintf (dump_file, ";; OSC loop scan Loop: ");
14502 if (loop->latch == NULL
14503 || pc_set (BB_END (loop->latch)) == NULL_RTX
14504 || !s390_adjust_loop_scan_osc (loop))
14506 if (dump_file)
14508 if (loop->latch == NULL)
14509 fprintf (dump_file, " muliple backward jumps\n");
14510 else
14512 fprintf (dump_file, " header insn: %d latch insn: %d ",
14513 INSN_UID (BB_HEAD (loop->header)),
14514 INSN_UID (BB_END (loop->latch)));
14515 if (pc_set (BB_END (loop->latch)) == NULL_RTX)
14516 fprintf (dump_file, " loop does not end with jump\n");
14517 else
14518 fprintf (dump_file, " not instrumented\n");
14522 else
14524 rtx_insn *new_insn;
14526 if (dump_file)
14527 fprintf (dump_file, " adding OSC break insn: ");
14528 new_insn = emit_insn_before (gen_osc_break (),
14529 BB_END (loop->latch));
14530 INSN_ADDRESSES_NEW (new_insn, -1);
14534 loop_optimizer_finalize ();
14536 df_finish_pass (false);
14539 /* Perform machine-dependent processing. */
14541 static void
14542 s390_reorg (void)
14544 bool pool_overflow = false;
14545 int hw_before, hw_after;
14547 if (s390_tune == PROCESSOR_2964_Z13)
14548 s390_adjust_loops ();
14550 /* Make sure all splits have been performed; splits after
14551 machine_dependent_reorg might confuse insn length counts. */
14552 split_all_insns_noflow ();
14554 /* Install the main literal pool and the associated base
14555 register load insns.
14557 In addition, there are two problematic situations we need
14558 to correct:
14560 - the literal pool might be > 4096 bytes in size, so that
14561 some of its elements cannot be directly accessed
14563 - a branch target might be > 64K away from the branch, so that
14564 it is not possible to use a PC-relative instruction.
14566 To fix those, we split the single literal pool into multiple
14567 pool chunks, reloading the pool base register at various
14568 points throughout the function to ensure it always points to
14569 the pool chunk the following code expects, and / or replace
14570 PC-relative branches by absolute branches.
14572 However, the two problems are interdependent: splitting the
14573 literal pool can move a branch further away from its target,
14574 causing the 64K limit to overflow, and on the other hand,
14575 replacing a PC-relative branch by an absolute branch means
14576 we need to put the branch target address into the literal
14577 pool, possibly causing it to overflow.
14579 So, we loop trying to fix up both problems until we manage
14580 to satisfy both conditions at the same time. Note that the
14581 loop is guaranteed to terminate as every pass of the loop
14582 strictly decreases the total number of PC-relative branches
14583 in the function. (This is not completely true as there
14584 might be branch-over-pool insns introduced by chunkify_start.
14585 Those never need to be split however.) */
14587 for (;;)
14589 struct constant_pool *pool = NULL;
14591 /* Collect the literal pool. */
14592 if (!pool_overflow)
14594 pool = s390_mainpool_start ();
14595 if (!pool)
14596 pool_overflow = true;
14599 /* If literal pool overflowed, start to chunkify it. */
14600 if (pool_overflow)
14601 pool = s390_chunkify_start ();
14603 /* Split out-of-range branches. If this has created new
14604 literal pool entries, cancel current chunk list and
14605 recompute it. zSeries machines have large branch
14606 instructions, so we never need to split a branch. */
14607 if (!TARGET_CPU_ZARCH && s390_split_branches ())
14609 if (pool_overflow)
14610 s390_chunkify_cancel (pool);
14611 else
14612 s390_mainpool_cancel (pool);
14614 continue;
14617 /* If we made it up to here, both conditions are satisfied.
14618 Finish up literal pool related changes. */
14619 if (pool_overflow)
14620 s390_chunkify_finish (pool);
14621 else
14622 s390_mainpool_finish (pool);
14624 /* We're done splitting branches. */
14625 cfun->machine->split_branches_pending_p = false;
14626 break;
14629 /* Generate out-of-pool execute target insns. */
14630 if (TARGET_CPU_ZARCH)
14632 rtx_insn *insn, *target;
14633 rtx label;
14635 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
14637 label = s390_execute_label (insn);
14638 if (!label)
14639 continue;
14641 gcc_assert (label != const0_rtx);
14643 target = emit_label (XEXP (label, 0));
14644 INSN_ADDRESSES_NEW (target, -1);
14646 if (JUMP_P (insn))
14648 target = emit_jump_insn (s390_execute_target (insn));
14649 /* This is important in order to keep a table jump
14650 pointing at the jump table label. Only this makes it
14651 being recognized as table jump. */
14652 JUMP_LABEL (target) = JUMP_LABEL (insn);
14654 else
14655 target = emit_insn (s390_execute_target (insn));
14656 INSN_ADDRESSES_NEW (target, -1);
14660 /* Try to optimize prologue and epilogue further. */
14661 s390_optimize_prologue ();
14663 /* Walk over the insns and do some >=z10 specific changes. */
14664 if (s390_tune >= PROCESSOR_2097_Z10)
14666 rtx_insn *insn;
14667 bool insn_added_p = false;
14669 /* The insn lengths and addresses have to be up to date for the
14670 following manipulations. */
14671 shorten_branches (get_insns ());
14673 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
14675 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
14676 continue;
14678 if (JUMP_P (insn))
14679 insn_added_p |= s390_fix_long_loop_prediction (insn);
14681 if ((GET_CODE (PATTERN (insn)) == PARALLEL
14682 || GET_CODE (PATTERN (insn)) == SET)
14683 && s390_tune == PROCESSOR_2097_Z10)
14684 insn_added_p |= s390_z10_optimize_cmp (insn);
14687 /* Adjust branches if we added new instructions. */
14688 if (insn_added_p)
14689 shorten_branches (get_insns ());
14692 s390_function_num_hotpatch_hw (current_function_decl, &hw_before, &hw_after);
14693 if (hw_after > 0)
14695 rtx_insn *insn;
14697 /* Insert NOPs for hotpatching. */
14698 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
14699 /* Emit NOPs
14700 1. inside the area covered by debug information to allow setting
14701 breakpoints at the NOPs,
14702 2. before any insn which results in an asm instruction,
14703 3. before in-function labels to avoid jumping to the NOPs, for
14704 example as part of a loop,
14705 4. before any barrier in case the function is completely empty
14706 (__builtin_unreachable ()) and has neither internal labels nor
14707 active insns.
14709 if (active_insn_p (insn) || BARRIER_P (insn) || LABEL_P (insn))
14710 break;
14711 /* Output a series of NOPs before the first active insn. */
14712 while (insn && hw_after > 0)
14714 if (hw_after >= 3 && TARGET_CPU_ZARCH)
14716 emit_insn_before (gen_nop_6_byte (), insn);
14717 hw_after -= 3;
14719 else if (hw_after >= 2)
14721 emit_insn_before (gen_nop_4_byte (), insn);
14722 hw_after -= 2;
14724 else
14726 emit_insn_before (gen_nop_2_byte (), insn);
14727 hw_after -= 1;
14733 /* Return true if INSN is a fp load insn writing register REGNO. */
14734 static inline bool
14735 s390_fpload_toreg (rtx_insn *insn, unsigned int regno)
14737 rtx set;
14738 enum attr_type flag = s390_safe_attr_type (insn);
14740 if (flag != TYPE_FLOADSF && flag != TYPE_FLOADDF)
14741 return false;
14743 set = single_set (insn);
14745 if (set == NULL_RTX)
14746 return false;
14748 if (!REG_P (SET_DEST (set)) || !MEM_P (SET_SRC (set)))
14749 return false;
14751 if (REGNO (SET_DEST (set)) != regno)
14752 return false;
14754 return true;
14757 /* This value describes the distance to be avoided between an
14758 arithmetic fp instruction and an fp load writing the same register.
14759 Z10_EARLYLOAD_DISTANCE - 1 as well as Z10_EARLYLOAD_DISTANCE + 1 is
14760 fine but the exact value has to be avoided. Otherwise the FP
14761 pipeline will throw an exception causing a major penalty. */
14762 #define Z10_EARLYLOAD_DISTANCE 7
14764 /* Rearrange the ready list in order to avoid the situation described
14765 for Z10_EARLYLOAD_DISTANCE. A problematic load instruction is
14766 moved to the very end of the ready list. */
14767 static void
14768 s390_z10_prevent_earlyload_conflicts (rtx_insn **ready, int *nready_p)
14770 unsigned int regno;
14771 int nready = *nready_p;
14772 rtx_insn *tmp;
14773 int i;
14774 rtx_insn *insn;
14775 rtx set;
14776 enum attr_type flag;
14777 int distance;
14779 /* Skip DISTANCE - 1 active insns. */
14780 for (insn = last_scheduled_insn, distance = Z10_EARLYLOAD_DISTANCE - 1;
14781 distance > 0 && insn != NULL_RTX;
14782 distance--, insn = prev_active_insn (insn))
14783 if (CALL_P (insn) || JUMP_P (insn))
14784 return;
14786 if (insn == NULL_RTX)
14787 return;
14789 set = single_set (insn);
14791 if (set == NULL_RTX || !REG_P (SET_DEST (set))
14792 || GET_MODE_CLASS (GET_MODE (SET_DEST (set))) != MODE_FLOAT)
14793 return;
14795 flag = s390_safe_attr_type (insn);
14797 if (flag == TYPE_FLOADSF || flag == TYPE_FLOADDF)
14798 return;
14800 regno = REGNO (SET_DEST (set));
14801 i = nready - 1;
14803 while (!s390_fpload_toreg (ready[i], regno) && i > 0)
14804 i--;
14806 if (!i)
14807 return;
14809 tmp = ready[i];
14810 memmove (&ready[1], &ready[0], sizeof (rtx_insn *) * i);
14811 ready[0] = tmp;
14814 /* Returns TRUE if BB is entered via a fallthru edge and all other
14815 incoming edges are less than unlikely. */
14816 static bool
14817 s390_bb_fallthru_entry_likely (basic_block bb)
14819 edge e, fallthru_edge;
14820 edge_iterator ei;
14822 if (!bb)
14823 return false;
14825 fallthru_edge = find_fallthru_edge (bb->preds);
14826 if (!fallthru_edge)
14827 return false;
14829 FOR_EACH_EDGE (e, ei, bb->preds)
14830 if (e != fallthru_edge
14831 && e->probability >= profile_probability::unlikely ())
14832 return false;
14834 return true;
14837 /* The s390_sched_state variable tracks the state of the current or
14838 the last instruction group.
14840 0,1,2 number of instructions scheduled in the current group
14841 3 the last group is complete - normal insns
14842 4 the last group was a cracked/expanded insn */
14844 static int s390_sched_state = 0;
14846 #define S390_SCHED_STATE_NORMAL 3
14847 #define S390_SCHED_STATE_CRACKED 4
14849 #define S390_SCHED_ATTR_MASK_CRACKED 0x1
14850 #define S390_SCHED_ATTR_MASK_EXPANDED 0x2
14851 #define S390_SCHED_ATTR_MASK_ENDGROUP 0x4
14852 #define S390_SCHED_ATTR_MASK_GROUPALONE 0x8
14854 static unsigned int
14855 s390_get_sched_attrmask (rtx_insn *insn)
14857 unsigned int mask = 0;
14859 switch (s390_tune)
14861 case PROCESSOR_2827_ZEC12:
14862 if (get_attr_zEC12_cracked (insn))
14863 mask |= S390_SCHED_ATTR_MASK_CRACKED;
14864 if (get_attr_zEC12_expanded (insn))
14865 mask |= S390_SCHED_ATTR_MASK_EXPANDED;
14866 if (get_attr_zEC12_endgroup (insn))
14867 mask |= S390_SCHED_ATTR_MASK_ENDGROUP;
14868 if (get_attr_zEC12_groupalone (insn))
14869 mask |= S390_SCHED_ATTR_MASK_GROUPALONE;
14870 break;
14871 case PROCESSOR_2964_Z13:
14872 case PROCESSOR_3906_Z14:
14873 if (get_attr_z13_cracked (insn))
14874 mask |= S390_SCHED_ATTR_MASK_CRACKED;
14875 if (get_attr_z13_expanded (insn))
14876 mask |= S390_SCHED_ATTR_MASK_EXPANDED;
14877 if (get_attr_z13_endgroup (insn))
14878 mask |= S390_SCHED_ATTR_MASK_ENDGROUP;
14879 if (get_attr_z13_groupalone (insn))
14880 mask |= S390_SCHED_ATTR_MASK_GROUPALONE;
14881 break;
14882 default:
14883 gcc_unreachable ();
14885 return mask;
14888 static unsigned int
14889 s390_get_unit_mask (rtx_insn *insn, int *units)
14891 unsigned int mask = 0;
14893 switch (s390_tune)
14895 case PROCESSOR_2964_Z13:
14896 case PROCESSOR_3906_Z14:
14897 *units = 3;
14898 if (get_attr_z13_unit_lsu (insn))
14899 mask |= 1 << 0;
14900 if (get_attr_z13_unit_fxu (insn))
14901 mask |= 1 << 1;
14902 if (get_attr_z13_unit_vfu (insn))
14903 mask |= 1 << 2;
14904 break;
14905 default:
14906 gcc_unreachable ();
14908 return mask;
14911 /* Return the scheduling score for INSN. The higher the score the
14912 better. The score is calculated from the OOO scheduling attributes
14913 of INSN and the scheduling state s390_sched_state. */
14914 static int
14915 s390_sched_score (rtx_insn *insn)
14917 unsigned int mask = s390_get_sched_attrmask (insn);
14918 int score = 0;
14920 switch (s390_sched_state)
14922 case 0:
14923 /* Try to put insns into the first slot which would otherwise
14924 break a group. */
14925 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) != 0
14926 || (mask & S390_SCHED_ATTR_MASK_EXPANDED) != 0)
14927 score += 5;
14928 if ((mask & S390_SCHED_ATTR_MASK_GROUPALONE) != 0)
14929 score += 10;
14930 /* fallthrough */
14931 case 1:
14932 /* Prefer not cracked insns while trying to put together a
14933 group. */
14934 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) == 0
14935 && (mask & S390_SCHED_ATTR_MASK_EXPANDED) == 0
14936 && (mask & S390_SCHED_ATTR_MASK_GROUPALONE) == 0)
14937 score += 10;
14938 if ((mask & S390_SCHED_ATTR_MASK_ENDGROUP) == 0)
14939 score += 5;
14940 break;
14941 case 2:
14942 /* Prefer not cracked insns while trying to put together a
14943 group. */
14944 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) == 0
14945 && (mask & S390_SCHED_ATTR_MASK_EXPANDED) == 0
14946 && (mask & S390_SCHED_ATTR_MASK_GROUPALONE) == 0)
14947 score += 10;
14948 /* Prefer endgroup insns in the last slot. */
14949 if ((mask & S390_SCHED_ATTR_MASK_ENDGROUP) != 0)
14950 score += 10;
14951 break;
14952 case S390_SCHED_STATE_NORMAL:
14953 /* Prefer not cracked insns if the last was not cracked. */
14954 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) == 0
14955 && (mask & S390_SCHED_ATTR_MASK_EXPANDED) == 0)
14956 score += 5;
14957 if ((mask & S390_SCHED_ATTR_MASK_GROUPALONE) != 0)
14958 score += 10;
14959 break;
14960 case S390_SCHED_STATE_CRACKED:
14961 /* Try to keep cracked insns together to prevent them from
14962 interrupting groups. */
14963 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) != 0
14964 || (mask & S390_SCHED_ATTR_MASK_EXPANDED) != 0)
14965 score += 5;
14966 break;
14969 if (s390_tune >= PROCESSOR_2964_Z13)
14971 int units, i;
14972 unsigned unit_mask, m = 1;
14974 unit_mask = s390_get_unit_mask (insn, &units);
14975 gcc_assert (units <= MAX_SCHED_UNITS);
14977 /* Add a score in range 0..MAX_SCHED_MIX_SCORE depending on how long
14978 ago the last insn of this unit type got scheduled. This is
14979 supposed to help providing a proper instruction mix to the
14980 CPU. */
14981 for (i = 0; i < units; i++, m <<= 1)
14982 if (m & unit_mask)
14983 score += (last_scheduled_unit_distance[i] * MAX_SCHED_MIX_SCORE /
14984 MAX_SCHED_MIX_DISTANCE);
14986 unsigned latency = insn_default_latency (insn);
14988 int other_side = 1 - current_side;
14990 /* Try to delay long-running insns when side is busy. */
14991 if (latency > LONGRUNNING_THRESHOLD)
14993 if (get_attr_z13_unit_fxu (insn) && fxu_longrunning[current_side]
14994 && fxu_longrunning[other_side] <= fxu_longrunning[current_side])
14995 score = MAX (0, score - 10);
14997 if (get_attr_z13_unit_vfu (insn) && vfu_longrunning[current_side]
14998 && vfu_longrunning[other_side] <= vfu_longrunning[current_side])
14999 score = MAX (0, score - 10);
15003 return score;
15006 /* This function is called via hook TARGET_SCHED_REORDER before
15007 issuing one insn from list READY which contains *NREADYP entries.
15008 For target z10 it reorders load instructions to avoid early load
15009 conflicts in the floating point pipeline */
15010 static int
15011 s390_sched_reorder (FILE *file, int verbose,
15012 rtx_insn **ready, int *nreadyp, int clock ATTRIBUTE_UNUSED)
15014 if (s390_tune == PROCESSOR_2097_Z10
15015 && reload_completed
15016 && *nreadyp > 1)
15017 s390_z10_prevent_earlyload_conflicts (ready, nreadyp);
15019 if (s390_tune >= PROCESSOR_2827_ZEC12
15020 && reload_completed
15021 && *nreadyp > 1)
15023 int i;
15024 int last_index = *nreadyp - 1;
15025 int max_index = -1;
15026 int max_score = -1;
15027 rtx_insn *tmp;
15029 /* Just move the insn with the highest score to the top (the
15030 end) of the list. A full sort is not needed since a conflict
15031 in the hazard recognition cannot happen. So the top insn in
15032 the ready list will always be taken. */
15033 for (i = last_index; i >= 0; i--)
15035 int score;
15037 if (recog_memoized (ready[i]) < 0)
15038 continue;
15040 score = s390_sched_score (ready[i]);
15041 if (score > max_score)
15043 max_score = score;
15044 max_index = i;
15048 if (max_index != -1)
15050 if (max_index != last_index)
15052 tmp = ready[max_index];
15053 ready[max_index] = ready[last_index];
15054 ready[last_index] = tmp;
15056 if (verbose > 5)
15057 fprintf (file,
15058 ";;\t\tBACKEND: move insn %d to the top of list\n",
15059 INSN_UID (ready[last_index]));
15061 else if (verbose > 5)
15062 fprintf (file,
15063 ";;\t\tBACKEND: best insn %d already on top\n",
15064 INSN_UID (ready[last_index]));
15067 if (verbose > 5)
15069 fprintf (file, "ready list ooo attributes - sched state: %d\n",
15070 s390_sched_state);
15072 for (i = last_index; i >= 0; i--)
15074 unsigned int sched_mask;
15075 rtx_insn *insn = ready[i];
15077 if (recog_memoized (insn) < 0)
15078 continue;
15080 sched_mask = s390_get_sched_attrmask (insn);
15081 fprintf (file, ";;\t\tBACKEND: insn %d score: %d: ",
15082 INSN_UID (insn),
15083 s390_sched_score (insn));
15084 #define PRINT_SCHED_ATTR(M, ATTR) fprintf (file, "%s ",\
15085 ((M) & sched_mask) ? #ATTR : "");
15086 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_CRACKED, cracked);
15087 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_EXPANDED, expanded);
15088 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_ENDGROUP, endgroup);
15089 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_GROUPALONE, groupalone);
15090 #undef PRINT_SCHED_ATTR
15091 if (s390_tune >= PROCESSOR_2964_Z13)
15093 unsigned int unit_mask, m = 1;
15094 int units, j;
15096 unit_mask = s390_get_unit_mask (insn, &units);
15097 fprintf (file, "(units:");
15098 for (j = 0; j < units; j++, m <<= 1)
15099 if (m & unit_mask)
15100 fprintf (file, " u%d", j);
15101 fprintf (file, ")");
15103 fprintf (file, "\n");
15108 return s390_issue_rate ();
15112 /* This function is called via hook TARGET_SCHED_VARIABLE_ISSUE after
15113 the scheduler has issued INSN. It stores the last issued insn into
15114 last_scheduled_insn in order to make it available for
15115 s390_sched_reorder. */
15116 static int
15117 s390_sched_variable_issue (FILE *file, int verbose, rtx_insn *insn, int more)
15119 last_scheduled_insn = insn;
15121 bool starts_group = false;
15123 if (s390_tune >= PROCESSOR_2827_ZEC12
15124 && reload_completed
15125 && recog_memoized (insn) >= 0)
15127 unsigned int mask = s390_get_sched_attrmask (insn);
15129 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) != 0
15130 || (mask & S390_SCHED_ATTR_MASK_EXPANDED) != 0
15131 || (mask & S390_SCHED_ATTR_MASK_GROUPALONE) != 0)
15132 starts_group = true;
15134 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) != 0
15135 || (mask & S390_SCHED_ATTR_MASK_EXPANDED) != 0)
15136 s390_sched_state = S390_SCHED_STATE_CRACKED;
15137 else if ((mask & S390_SCHED_ATTR_MASK_ENDGROUP) != 0
15138 || (mask & S390_SCHED_ATTR_MASK_GROUPALONE) != 0)
15139 s390_sched_state = S390_SCHED_STATE_NORMAL;
15140 else
15142 /* Only normal insns are left (mask == 0). */
15143 switch (s390_sched_state)
15145 case 0:
15146 starts_group = true;
15147 /* fallthrough */
15148 case 1:
15149 case 2:
15150 s390_sched_state++;
15151 break;
15152 case S390_SCHED_STATE_NORMAL:
15153 starts_group = true;
15154 s390_sched_state = 1;
15155 break;
15156 case S390_SCHED_STATE_CRACKED:
15157 s390_sched_state = S390_SCHED_STATE_NORMAL;
15158 break;
15162 if (s390_tune >= PROCESSOR_2964_Z13)
15164 int units, i;
15165 unsigned unit_mask, m = 1;
15167 unit_mask = s390_get_unit_mask (insn, &units);
15168 gcc_assert (units <= MAX_SCHED_UNITS);
15170 for (i = 0; i < units; i++, m <<= 1)
15171 if (m & unit_mask)
15172 last_scheduled_unit_distance[i] = 0;
15173 else if (last_scheduled_unit_distance[i] < MAX_SCHED_MIX_DISTANCE)
15174 last_scheduled_unit_distance[i]++;
15177 /* If this insn started a new group, the side flipped. */
15178 if (starts_group)
15179 current_side = current_side ? 0 : 1;
15181 for (int i = 0; i < 2; i++)
15183 if (fxu_longrunning[i] >= 1)
15184 fxu_longrunning[i] -= 1;
15185 if (vfu_longrunning[i] >= 1)
15186 vfu_longrunning[i] -= 1;
15189 unsigned latency = insn_default_latency (insn);
15190 if (latency > LONGRUNNING_THRESHOLD)
15192 if (get_attr_z13_unit_fxu (insn))
15193 fxu_longrunning[current_side] = latency * LATENCY_FACTOR;
15194 else
15195 vfu_longrunning[current_side] = latency * LATENCY_FACTOR;
15198 if (verbose > 5)
15200 unsigned int sched_mask;
15202 sched_mask = s390_get_sched_attrmask (insn);
15204 fprintf (file, ";;\t\tBACKEND: insn %d: ", INSN_UID (insn));
15205 #define PRINT_SCHED_ATTR(M, ATTR) fprintf (file, "%s ", ((M) & sched_mask) ? #ATTR : "");
15206 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_CRACKED, cracked);
15207 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_EXPANDED, expanded);
15208 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_ENDGROUP, endgroup);
15209 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_GROUPALONE, groupalone);
15210 #undef PRINT_SCHED_ATTR
15212 if (s390_tune >= PROCESSOR_2964_Z13)
15214 unsigned int unit_mask, m = 1;
15215 int units, j;
15217 unit_mask = s390_get_unit_mask (insn, &units);
15218 fprintf (file, "(units:");
15219 for (j = 0; j < units; j++, m <<= 1)
15220 if (m & unit_mask)
15221 fprintf (file, " %d", j);
15222 fprintf (file, ")");
15224 fprintf (file, " sched state: %d\n", s390_sched_state);
15226 if (s390_tune >= PROCESSOR_2964_Z13)
15228 int units, j;
15230 s390_get_unit_mask (insn, &units);
15232 fprintf (file, ";;\t\tBACKEND: units unused for: ");
15233 for (j = 0; j < units; j++)
15234 fprintf (file, "%d:%d ", j, last_scheduled_unit_distance[j]);
15235 fprintf (file, "\n");
15240 if (GET_CODE (PATTERN (insn)) != USE
15241 && GET_CODE (PATTERN (insn)) != CLOBBER)
15242 return more - 1;
15243 else
15244 return more;
15247 static void
15248 s390_sched_init (FILE *file ATTRIBUTE_UNUSED,
15249 int verbose ATTRIBUTE_UNUSED,
15250 int max_ready ATTRIBUTE_UNUSED)
15252 last_scheduled_insn = NULL;
15253 memset (last_scheduled_unit_distance, 0, MAX_SCHED_UNITS * sizeof (int));
15255 /* If the next basic block is most likely entered via a fallthru edge
15256 we keep the last sched state. Otherwise we start a new group.
15257 The scheduler traverses basic blocks in "instruction stream" ordering
15258 so if we see a fallthru edge here, s390_sched_state will be of its
15259 source block.
15261 current_sched_info->prev_head is the insn before the first insn of the
15262 block of insns to be scheduled.
15264 rtx_insn *insn = current_sched_info->prev_head
15265 ? NEXT_INSN (current_sched_info->prev_head) : NULL;
15266 basic_block bb = insn ? BLOCK_FOR_INSN (insn) : NULL;
15267 if (s390_tune < PROCESSOR_2964_Z13 || !s390_bb_fallthru_entry_likely (bb))
15268 s390_sched_state = 0;
15271 /* This target hook implementation for TARGET_LOOP_UNROLL_ADJUST calculates
15272 a new number struct loop *loop should be unrolled if tuned for cpus with
15273 a built-in stride prefetcher.
15274 The loop is analyzed for memory accesses by calling check_dpu for
15275 each rtx of the loop. Depending on the loop_depth and the amount of
15276 memory accesses a new number <=nunroll is returned to improve the
15277 behavior of the hardware prefetch unit. */
15278 static unsigned
15279 s390_loop_unroll_adjust (unsigned nunroll, struct loop *loop)
15281 basic_block *bbs;
15282 rtx_insn *insn;
15283 unsigned i;
15284 unsigned mem_count = 0;
15286 if (s390_tune < PROCESSOR_2097_Z10)
15287 return nunroll;
15289 /* Count the number of memory references within the loop body. */
15290 bbs = get_loop_body (loop);
15291 subrtx_iterator::array_type array;
15292 for (i = 0; i < loop->num_nodes; i++)
15293 FOR_BB_INSNS (bbs[i], insn)
15294 if (INSN_P (insn) && INSN_CODE (insn) != -1)
15295 FOR_EACH_SUBRTX (iter, array, PATTERN (insn), NONCONST)
15296 if (MEM_P (*iter))
15297 mem_count += 1;
15298 free (bbs);
15300 /* Prevent division by zero, and we do not need to adjust nunroll in this case. */
15301 if (mem_count == 0)
15302 return nunroll;
15304 switch (loop_depth(loop))
15306 case 1:
15307 return MIN (nunroll, 28 / mem_count);
15308 case 2:
15309 return MIN (nunroll, 22 / mem_count);
15310 default:
15311 return MIN (nunroll, 16 / mem_count);
15315 /* Restore the current options. This is a hook function and also called
15316 internally. */
15318 static void
15319 s390_function_specific_restore (struct gcc_options *opts,
15320 struct cl_target_option *ptr ATTRIBUTE_UNUSED)
15322 opts->x_s390_cost_pointer = (long)processor_table[opts->x_s390_tune].cost;
15325 static void
15326 s390_option_override_internal (bool main_args_p,
15327 struct gcc_options *opts,
15328 const struct gcc_options *opts_set)
15330 const char *prefix;
15331 const char *suffix;
15333 /* Set up prefix/suffix so the error messages refer to either the command
15334 line argument, or the attribute(target). */
15335 if (main_args_p)
15337 prefix = "-m";
15338 suffix = "";
15340 else
15342 prefix = "option(\"";
15343 suffix = "\")";
15347 /* Architecture mode defaults according to ABI. */
15348 if (!(opts_set->x_target_flags & MASK_ZARCH))
15350 if (TARGET_64BIT)
15351 opts->x_target_flags |= MASK_ZARCH;
15352 else
15353 opts->x_target_flags &= ~MASK_ZARCH;
15356 /* Set the march default in case it hasn't been specified on cmdline. */
15357 if (!opts_set->x_s390_arch)
15358 opts->x_s390_arch = PROCESSOR_2064_Z900;
15359 else if (opts->x_s390_arch == PROCESSOR_9672_G5
15360 || opts->x_s390_arch == PROCESSOR_9672_G6)
15361 warning (OPT_Wdeprecated, "%sarch=%s%s is deprecated and will be removed "
15362 "in future releases; use at least %sarch=z900%s",
15363 prefix, opts->x_s390_arch == PROCESSOR_9672_G5 ? "g5" : "g6",
15364 suffix, prefix, suffix);
15366 opts->x_s390_arch_flags = processor_flags_table[(int) opts->x_s390_arch];
15368 /* Determine processor to tune for. */
15369 if (!opts_set->x_s390_tune)
15370 opts->x_s390_tune = opts->x_s390_arch;
15371 else if (opts->x_s390_tune == PROCESSOR_9672_G5
15372 || opts->x_s390_tune == PROCESSOR_9672_G6)
15373 warning (OPT_Wdeprecated, "%stune=%s%s is deprecated and will be removed "
15374 "in future releases; use at least %stune=z900%s",
15375 prefix, opts->x_s390_tune == PROCESSOR_9672_G5 ? "g5" : "g6",
15376 suffix, prefix, suffix);
15378 opts->x_s390_tune_flags = processor_flags_table[opts->x_s390_tune];
15380 /* Sanity checks. */
15381 if (opts->x_s390_arch == PROCESSOR_NATIVE
15382 || opts->x_s390_tune == PROCESSOR_NATIVE)
15383 gcc_unreachable ();
15384 if (TARGET_ZARCH_P (opts->x_target_flags) && !TARGET_CPU_ZARCH_P (opts))
15385 error ("z/Architecture mode not supported on %s",
15386 processor_table[(int)opts->x_s390_arch].name);
15387 if (TARGET_64BIT && !TARGET_ZARCH_P (opts->x_target_flags))
15388 error ("64-bit ABI not supported in ESA/390 mode");
15390 if (opts->x_s390_indirect_branch == indirect_branch_thunk_inline
15391 || opts->x_s390_indirect_branch_call == indirect_branch_thunk_inline
15392 || opts->x_s390_function_return == indirect_branch_thunk_inline
15393 || opts->x_s390_function_return_reg == indirect_branch_thunk_inline
15394 || opts->x_s390_function_return_mem == indirect_branch_thunk_inline)
15395 error ("thunk-inline is only supported with -mindirect-branch-jump");
15397 if (opts->x_s390_indirect_branch != indirect_branch_keep)
15399 if (!opts_set->x_s390_indirect_branch_call)
15400 opts->x_s390_indirect_branch_call = opts->x_s390_indirect_branch;
15402 if (!opts_set->x_s390_indirect_branch_jump)
15403 opts->x_s390_indirect_branch_jump = opts->x_s390_indirect_branch;
15406 if (opts->x_s390_function_return != indirect_branch_keep)
15408 if (!opts_set->x_s390_function_return_reg)
15409 opts->x_s390_function_return_reg = opts->x_s390_function_return;
15411 if (!opts_set->x_s390_function_return_mem)
15412 opts->x_s390_function_return_mem = opts->x_s390_function_return;
15415 if (!TARGET_CPU_ZARCH)
15417 if (opts->x_s390_indirect_branch_call != indirect_branch_keep
15418 || opts->x_s390_indirect_branch_jump != indirect_branch_keep)
15419 error ("-mindirect-branch* options require -march=z900 or higher");
15420 if (opts->x_s390_function_return_reg != indirect_branch_keep
15421 || opts->x_s390_function_return_mem != indirect_branch_keep)
15422 error ("-mfunction-return* options require -march=z900 or higher");
15426 /* Enable hardware transactions if available and not explicitly
15427 disabled by user. E.g. with -m31 -march=zEC12 -mzarch */
15428 if (!TARGET_OPT_HTM_P (opts_set->x_target_flags))
15430 if (TARGET_CPU_HTM_P (opts) && TARGET_ZARCH_P (opts->x_target_flags))
15431 opts->x_target_flags |= MASK_OPT_HTM;
15432 else
15433 opts->x_target_flags &= ~MASK_OPT_HTM;
15436 if (TARGET_OPT_VX_P (opts_set->x_target_flags))
15438 if (TARGET_OPT_VX_P (opts->x_target_flags))
15440 if (!TARGET_CPU_VX_P (opts))
15441 error ("hardware vector support not available on %s",
15442 processor_table[(int)opts->x_s390_arch].name);
15443 if (TARGET_SOFT_FLOAT_P (opts->x_target_flags))
15444 error ("hardware vector support not available with -msoft-float");
15447 else
15449 if (TARGET_CPU_VX_P (opts))
15450 /* Enable vector support if available and not explicitly disabled
15451 by user. E.g. with -m31 -march=z13 -mzarch */
15452 opts->x_target_flags |= MASK_OPT_VX;
15453 else
15454 opts->x_target_flags &= ~MASK_OPT_VX;
15457 /* Use hardware DFP if available and not explicitly disabled by
15458 user. E.g. with -m31 -march=z10 -mzarch */
15459 if (!TARGET_HARD_DFP_P (opts_set->x_target_flags))
15461 if (TARGET_DFP_P (opts))
15462 opts->x_target_flags |= MASK_HARD_DFP;
15463 else
15464 opts->x_target_flags &= ~MASK_HARD_DFP;
15467 if (TARGET_HARD_DFP_P (opts->x_target_flags) && !TARGET_DFP_P (opts))
15469 if (TARGET_HARD_DFP_P (opts_set->x_target_flags))
15471 if (!TARGET_CPU_DFP_P (opts))
15472 error ("hardware decimal floating point instructions"
15473 " not available on %s",
15474 processor_table[(int)opts->x_s390_arch].name);
15475 if (!TARGET_ZARCH_P (opts->x_target_flags))
15476 error ("hardware decimal floating point instructions"
15477 " not available in ESA/390 mode");
15479 else
15480 opts->x_target_flags &= ~MASK_HARD_DFP;
15483 if (TARGET_SOFT_FLOAT_P (opts_set->x_target_flags)
15484 && TARGET_SOFT_FLOAT_P (opts->x_target_flags))
15486 if (TARGET_HARD_DFP_P (opts_set->x_target_flags)
15487 && TARGET_HARD_DFP_P (opts->x_target_flags))
15488 error ("-mhard-dfp can%'t be used in conjunction with -msoft-float");
15490 opts->x_target_flags &= ~MASK_HARD_DFP;
15493 if (TARGET_BACKCHAIN_P (opts->x_target_flags)
15494 && TARGET_PACKED_STACK_P (opts->x_target_flags)
15495 && TARGET_HARD_FLOAT_P (opts->x_target_flags))
15496 error ("-mbackchain -mpacked-stack -mhard-float are not supported "
15497 "in combination");
15499 if (opts->x_s390_stack_size)
15501 if (opts->x_s390_stack_guard >= opts->x_s390_stack_size)
15502 error ("stack size must be greater than the stack guard value");
15503 else if (opts->x_s390_stack_size > 1 << 16)
15504 error ("stack size must not be greater than 64k");
15506 else if (opts->x_s390_stack_guard)
15507 error ("-mstack-guard implies use of -mstack-size");
15509 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
15510 if (!TARGET_LONG_DOUBLE_128_P (opts_set->x_target_flags))
15511 opts->x_target_flags |= MASK_LONG_DOUBLE_128;
15512 #endif
15514 if (opts->x_s390_tune >= PROCESSOR_2097_Z10)
15516 maybe_set_param_value (PARAM_MAX_UNROLLED_INSNS, 100,
15517 opts->x_param_values,
15518 opts_set->x_param_values);
15519 maybe_set_param_value (PARAM_MAX_UNROLL_TIMES, 32,
15520 opts->x_param_values,
15521 opts_set->x_param_values);
15522 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 2000,
15523 opts->x_param_values,
15524 opts_set->x_param_values);
15525 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEEL_TIMES, 64,
15526 opts->x_param_values,
15527 opts_set->x_param_values);
15530 maybe_set_param_value (PARAM_MAX_PENDING_LIST_LENGTH, 256,
15531 opts->x_param_values,
15532 opts_set->x_param_values);
15533 /* values for loop prefetching */
15534 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, 256,
15535 opts->x_param_values,
15536 opts_set->x_param_values);
15537 maybe_set_param_value (PARAM_L1_CACHE_SIZE, 128,
15538 opts->x_param_values,
15539 opts_set->x_param_values);
15540 /* s390 has more than 2 levels and the size is much larger. Since
15541 we are always running virtualized assume that we only get a small
15542 part of the caches above l1. */
15543 maybe_set_param_value (PARAM_L2_CACHE_SIZE, 1500,
15544 opts->x_param_values,
15545 opts_set->x_param_values);
15546 maybe_set_param_value (PARAM_PREFETCH_MIN_INSN_TO_MEM_RATIO, 2,
15547 opts->x_param_values,
15548 opts_set->x_param_values);
15549 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 6,
15550 opts->x_param_values,
15551 opts_set->x_param_values);
15553 /* Use the alternative scheduling-pressure algorithm by default. */
15554 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM, 2,
15555 opts->x_param_values,
15556 opts_set->x_param_values);
15558 maybe_set_param_value (PARAM_MIN_VECT_LOOP_BOUND, 2,
15559 opts->x_param_values,
15560 opts_set->x_param_values);
15562 /* Call target specific restore function to do post-init work. At the moment,
15563 this just sets opts->x_s390_cost_pointer. */
15564 s390_function_specific_restore (opts, NULL);
15567 static void
15568 s390_option_override (void)
15570 unsigned int i;
15571 cl_deferred_option *opt;
15572 vec<cl_deferred_option> *v =
15573 (vec<cl_deferred_option> *) s390_deferred_options;
15575 if (v)
15576 FOR_EACH_VEC_ELT (*v, i, opt)
15578 switch (opt->opt_index)
15580 case OPT_mhotpatch_:
15582 int val1;
15583 int val2;
15584 char *s = strtok (ASTRDUP (opt->arg), ",");
15585 char *t = strtok (NULL, "\0");
15587 if (t != NULL)
15589 val1 = integral_argument (s);
15590 val2 = integral_argument (t);
15592 else
15594 val1 = -1;
15595 val2 = -1;
15597 if (val1 == -1 || val2 == -1)
15599 /* argument is not a plain number */
15600 error ("arguments to %qs should be non-negative integers",
15601 "-mhotpatch=n,m");
15602 break;
15604 else if (val1 > s390_hotpatch_hw_max
15605 || val2 > s390_hotpatch_hw_max)
15607 error ("argument to %qs is too large (max. %d)",
15608 "-mhotpatch=n,m", s390_hotpatch_hw_max);
15609 break;
15611 s390_hotpatch_hw_before_label = val1;
15612 s390_hotpatch_hw_after_label = val2;
15613 break;
15615 default:
15616 gcc_unreachable ();
15620 /* Set up function hooks. */
15621 init_machine_status = s390_init_machine_status;
15623 s390_option_override_internal (true, &global_options, &global_options_set);
15625 /* Save the initial options in case the user does function specific
15626 options. */
15627 target_option_default_node = build_target_option_node (&global_options);
15628 target_option_current_node = target_option_default_node;
15630 /* This cannot reside in s390_option_optimization_table since HAVE_prefetch
15631 requires the arch flags to be evaluated already. Since prefetching
15632 is beneficial on s390, we enable it if available. */
15633 if (flag_prefetch_loop_arrays < 0 && HAVE_prefetch && optimize >= 3)
15634 flag_prefetch_loop_arrays = 1;
15636 if (!s390_pic_data_is_text_relative && !flag_pic)
15637 error ("-mno-pic-data-is-text-relative cannot be used without -fpic/-fPIC");
15639 if (TARGET_TPF)
15641 /* Don't emit DWARF3/4 unless specifically selected. The TPF
15642 debuggers do not yet support DWARF 3/4. */
15643 if (!global_options_set.x_dwarf_strict)
15644 dwarf_strict = 1;
15645 if (!global_options_set.x_dwarf_version)
15646 dwarf_version = 2;
15649 /* Register a target-specific optimization-and-lowering pass
15650 to run immediately before prologue and epilogue generation.
15652 Registering the pass must be done at start up. It's
15653 convenient to do it here. */
15654 opt_pass *new_pass = new pass_s390_early_mach (g);
15655 struct register_pass_info insert_pass_s390_early_mach =
15657 new_pass, /* pass */
15658 "pro_and_epilogue", /* reference_pass_name */
15659 1, /* ref_pass_instance_number */
15660 PASS_POS_INSERT_BEFORE /* po_op */
15662 register_pass (&insert_pass_s390_early_mach);
15665 #if S390_USE_TARGET_ATTRIBUTE
15666 /* Inner function to process the attribute((target(...))), take an argument and
15667 set the current options from the argument. If we have a list, recursively go
15668 over the list. */
15670 static bool
15671 s390_valid_target_attribute_inner_p (tree args,
15672 struct gcc_options *opts,
15673 struct gcc_options *new_opts_set,
15674 bool force_pragma)
15676 char *next_optstr;
15677 bool ret = true;
15679 #define S390_ATTRIB(S,O,A) { S, sizeof (S)-1, O, A, 0 }
15680 #define S390_PRAGMA(S,O,A) { S, sizeof (S)-1, O, A, 1 }
15681 static const struct
15683 const char *string;
15684 size_t len;
15685 int opt;
15686 int has_arg;
15687 int only_as_pragma;
15688 } attrs[] = {
15689 /* enum options */
15690 S390_ATTRIB ("arch=", OPT_march_, 1),
15691 S390_ATTRIB ("tune=", OPT_mtune_, 1),
15692 /* uinteger options */
15693 S390_ATTRIB ("stack-guard=", OPT_mstack_guard_, 1),
15694 S390_ATTRIB ("stack-size=", OPT_mstack_size_, 1),
15695 S390_ATTRIB ("branch-cost=", OPT_mbranch_cost_, 1),
15696 S390_ATTRIB ("warn-framesize=", OPT_mwarn_framesize_, 1),
15697 /* flag options */
15698 S390_ATTRIB ("backchain", OPT_mbackchain, 0),
15699 S390_ATTRIB ("hard-dfp", OPT_mhard_dfp, 0),
15700 S390_ATTRIB ("hard-float", OPT_mhard_float, 0),
15701 S390_ATTRIB ("htm", OPT_mhtm, 0),
15702 S390_ATTRIB ("vx", OPT_mvx, 0),
15703 S390_ATTRIB ("packed-stack", OPT_mpacked_stack, 0),
15704 S390_ATTRIB ("small-exec", OPT_msmall_exec, 0),
15705 S390_ATTRIB ("soft-float", OPT_msoft_float, 0),
15706 S390_ATTRIB ("mvcle", OPT_mmvcle, 0),
15707 S390_PRAGMA ("zvector", OPT_mzvector, 0),
15708 /* boolean options */
15709 S390_ATTRIB ("warn-dynamicstack", OPT_mwarn_dynamicstack, 0),
15711 #undef S390_ATTRIB
15712 #undef S390_PRAGMA
15714 /* If this is a list, recurse to get the options. */
15715 if (TREE_CODE (args) == TREE_LIST)
15717 bool ret = true;
15718 int num_pragma_values;
15719 int i;
15721 /* Note: attribs.c:decl_attributes prepends the values from
15722 current_target_pragma to the list of target attributes. To determine
15723 whether we're looking at a value of the attribute or the pragma we
15724 assume that the first [list_length (current_target_pragma)] values in
15725 the list are the values from the pragma. */
15726 num_pragma_values = (!force_pragma && current_target_pragma != NULL)
15727 ? list_length (current_target_pragma) : 0;
15728 for (i = 0; args; args = TREE_CHAIN (args), i++)
15730 bool is_pragma;
15732 is_pragma = (force_pragma || i < num_pragma_values);
15733 if (TREE_VALUE (args)
15734 && !s390_valid_target_attribute_inner_p (TREE_VALUE (args),
15735 opts, new_opts_set,
15736 is_pragma))
15738 ret = false;
15741 return ret;
15744 else if (TREE_CODE (args) != STRING_CST)
15746 error ("attribute %<target%> argument not a string");
15747 return false;
15750 /* Handle multiple arguments separated by commas. */
15751 next_optstr = ASTRDUP (TREE_STRING_POINTER (args));
15753 while (next_optstr && *next_optstr != '\0')
15755 char *p = next_optstr;
15756 char *orig_p = p;
15757 char *comma = strchr (next_optstr, ',');
15758 size_t len, opt_len;
15759 int opt;
15760 bool opt_set_p;
15761 char ch;
15762 unsigned i;
15763 int mask = 0;
15764 enum cl_var_type var_type;
15765 bool found;
15767 if (comma)
15769 *comma = '\0';
15770 len = comma - next_optstr;
15771 next_optstr = comma + 1;
15773 else
15775 len = strlen (p);
15776 next_optstr = NULL;
15779 /* Recognize no-xxx. */
15780 if (len > 3 && p[0] == 'n' && p[1] == 'o' && p[2] == '-')
15782 opt_set_p = false;
15783 p += 3;
15784 len -= 3;
15786 else
15787 opt_set_p = true;
15789 /* Find the option. */
15790 ch = *p;
15791 found = false;
15792 for (i = 0; i < ARRAY_SIZE (attrs); i++)
15794 opt_len = attrs[i].len;
15795 if (ch == attrs[i].string[0]
15796 && ((attrs[i].has_arg) ? len > opt_len : len == opt_len)
15797 && memcmp (p, attrs[i].string, opt_len) == 0)
15799 opt = attrs[i].opt;
15800 if (!opt_set_p && cl_options[opt].cl_reject_negative)
15801 continue;
15802 mask = cl_options[opt].var_value;
15803 var_type = cl_options[opt].var_type;
15804 found = true;
15805 break;
15809 /* Process the option. */
15810 if (!found)
15812 error ("attribute(target(\"%s\")) is unknown", orig_p);
15813 return false;
15815 else if (attrs[i].only_as_pragma && !force_pragma)
15817 /* Value is not allowed for the target attribute. */
15818 error ("value %qs is not supported by attribute %<target%>",
15819 attrs[i].string);
15820 return false;
15823 else if (var_type == CLVC_BIT_SET || var_type == CLVC_BIT_CLEAR)
15825 if (var_type == CLVC_BIT_CLEAR)
15826 opt_set_p = !opt_set_p;
15828 if (opt_set_p)
15829 opts->x_target_flags |= mask;
15830 else
15831 opts->x_target_flags &= ~mask;
15832 new_opts_set->x_target_flags |= mask;
15835 else if (cl_options[opt].var_type == CLVC_BOOLEAN)
15837 int value;
15839 if (cl_options[opt].cl_uinteger)
15841 /* Unsigned integer argument. Code based on the function
15842 decode_cmdline_option () in opts-common.c. */
15843 value = integral_argument (p + opt_len);
15845 else
15846 value = (opt_set_p) ? 1 : 0;
15848 if (value != -1)
15850 struct cl_decoded_option decoded;
15852 /* Value range check; only implemented for numeric and boolean
15853 options at the moment. */
15854 generate_option (opt, NULL, value, CL_TARGET, &decoded);
15855 s390_handle_option (opts, new_opts_set, &decoded, input_location);
15856 set_option (opts, new_opts_set, opt, value,
15857 p + opt_len, DK_UNSPECIFIED, input_location,
15858 global_dc);
15860 else
15862 error ("attribute(target(\"%s\")) is unknown", orig_p);
15863 ret = false;
15867 else if (cl_options[opt].var_type == CLVC_ENUM)
15869 bool arg_ok;
15870 int value;
15872 arg_ok = opt_enum_arg_to_value (opt, p + opt_len, &value, CL_TARGET);
15873 if (arg_ok)
15874 set_option (opts, new_opts_set, opt, value,
15875 p + opt_len, DK_UNSPECIFIED, input_location,
15876 global_dc);
15877 else
15879 error ("attribute(target(\"%s\")) is unknown", orig_p);
15880 ret = false;
15884 else
15885 gcc_unreachable ();
15887 return ret;
15890 /* Return a TARGET_OPTION_NODE tree of the target options listed or NULL. */
15892 tree
15893 s390_valid_target_attribute_tree (tree args,
15894 struct gcc_options *opts,
15895 const struct gcc_options *opts_set,
15896 bool force_pragma)
15898 tree t = NULL_TREE;
15899 struct gcc_options new_opts_set;
15901 memset (&new_opts_set, 0, sizeof (new_opts_set));
15903 /* Process each of the options on the chain. */
15904 if (! s390_valid_target_attribute_inner_p (args, opts, &new_opts_set,
15905 force_pragma))
15906 return error_mark_node;
15908 /* If some option was set (even if it has not changed), rerun
15909 s390_option_override_internal, and then save the options away. */
15910 if (new_opts_set.x_target_flags
15911 || new_opts_set.x_s390_arch
15912 || new_opts_set.x_s390_tune
15913 || new_opts_set.x_s390_stack_guard
15914 || new_opts_set.x_s390_stack_size
15915 || new_opts_set.x_s390_branch_cost
15916 || new_opts_set.x_s390_warn_framesize
15917 || new_opts_set.x_s390_warn_dynamicstack_p)
15919 const unsigned char *src = (const unsigned char *)opts_set;
15920 unsigned char *dest = (unsigned char *)&new_opts_set;
15921 unsigned int i;
15923 /* Merge the original option flags into the new ones. */
15924 for (i = 0; i < sizeof(*opts_set); i++)
15925 dest[i] |= src[i];
15927 /* Do any overrides, such as arch=xxx, or tune=xxx support. */
15928 s390_option_override_internal (false, opts, &new_opts_set);
15929 /* Save the current options unless we are validating options for
15930 #pragma. */
15931 t = build_target_option_node (opts);
15933 return t;
15936 /* Hook to validate attribute((target("string"))). */
15938 static bool
15939 s390_valid_target_attribute_p (tree fndecl,
15940 tree ARG_UNUSED (name),
15941 tree args,
15942 int ARG_UNUSED (flags))
15944 struct gcc_options func_options;
15945 tree new_target, new_optimize;
15946 bool ret = true;
15948 /* attribute((target("default"))) does nothing, beyond
15949 affecting multi-versioning. */
15950 if (TREE_VALUE (args)
15951 && TREE_CODE (TREE_VALUE (args)) == STRING_CST
15952 && TREE_CHAIN (args) == NULL_TREE
15953 && strcmp (TREE_STRING_POINTER (TREE_VALUE (args)), "default") == 0)
15954 return true;
15956 tree old_optimize = build_optimization_node (&global_options);
15958 /* Get the optimization options of the current function. */
15959 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
15961 if (!func_optimize)
15962 func_optimize = old_optimize;
15964 /* Init func_options. */
15965 memset (&func_options, 0, sizeof (func_options));
15966 init_options_struct (&func_options, NULL);
15967 lang_hooks.init_options_struct (&func_options);
15969 cl_optimization_restore (&func_options, TREE_OPTIMIZATION (func_optimize));
15971 /* Initialize func_options to the default before its target options can
15972 be set. */
15973 cl_target_option_restore (&func_options,
15974 TREE_TARGET_OPTION (target_option_default_node));
15976 new_target = s390_valid_target_attribute_tree (args, &func_options,
15977 &global_options_set,
15978 (args ==
15979 current_target_pragma));
15980 new_optimize = build_optimization_node (&func_options);
15981 if (new_target == error_mark_node)
15982 ret = false;
15983 else if (fndecl && new_target)
15985 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
15986 if (old_optimize != new_optimize)
15987 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
15989 return ret;
15992 /* Hook to determine if one function can safely inline another. */
15994 static bool
15995 s390_can_inline_p (tree caller, tree callee)
15997 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
15998 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
16000 if (!callee_tree)
16001 callee_tree = target_option_default_node;
16002 if (!caller_tree)
16003 caller_tree = target_option_default_node;
16004 if (callee_tree == caller_tree)
16005 return true;
16007 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
16008 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
16009 bool ret = true;
16011 if ((caller_opts->x_target_flags & ~(MASK_SOFT_FLOAT | MASK_HARD_DFP))
16012 != (callee_opts->x_target_flags & ~(MASK_SOFT_FLOAT | MASK_HARD_DFP)))
16013 ret = false;
16015 /* Don't inline functions to be compiled for a more recent arch into a
16016 function for an older arch. */
16017 else if (caller_opts->x_s390_arch < callee_opts->x_s390_arch)
16018 ret = false;
16020 /* Inlining a hard float function into a soft float function is only
16021 allowed if the hard float function doesn't actually make use of
16022 floating point.
16024 We are called from FEs for multi-versioning call optimization, so
16025 beware of ipa_fn_summaries not available. */
16026 else if (((TARGET_SOFT_FLOAT_P (caller_opts->x_target_flags)
16027 && !TARGET_SOFT_FLOAT_P (callee_opts->x_target_flags))
16028 || (!TARGET_HARD_DFP_P (caller_opts->x_target_flags)
16029 && TARGET_HARD_DFP_P (callee_opts->x_target_flags)))
16030 && (! ipa_fn_summaries
16031 || ipa_fn_summaries->get
16032 (cgraph_node::get (callee))->fp_expressions))
16033 ret = false;
16035 return ret;
16038 /* Set VAL to correct enum value according to the indirect-branch or
16039 function-return attribute in ATTR. */
16041 static inline void
16042 s390_indirect_branch_attrvalue (tree attr, enum indirect_branch *val)
16044 const char *str = TREE_STRING_POINTER (TREE_VALUE (TREE_VALUE (attr)));
16045 if (strcmp (str, "keep") == 0)
16046 *val = indirect_branch_keep;
16047 else if (strcmp (str, "thunk") == 0)
16048 *val = indirect_branch_thunk;
16049 else if (strcmp (str, "thunk-inline") == 0)
16050 *val = indirect_branch_thunk_inline;
16051 else if (strcmp (str, "thunk-extern") == 0)
16052 *val = indirect_branch_thunk_extern;
16055 /* Memorize the setting for -mindirect-branch* and -mfunction-return*
16056 from either the cmdline or the function attributes in
16057 cfun->machine. */
16059 static void
16060 s390_indirect_branch_settings (tree fndecl)
16062 tree attr;
16064 if (!fndecl)
16065 return;
16067 /* Initialize with the cmdline options and let the attributes
16068 override it. */
16069 cfun->machine->indirect_branch_jump = s390_indirect_branch_jump;
16070 cfun->machine->indirect_branch_call = s390_indirect_branch_call;
16072 cfun->machine->function_return_reg = s390_function_return_reg;
16073 cfun->machine->function_return_mem = s390_function_return_mem;
16075 if ((attr = lookup_attribute ("indirect_branch",
16076 DECL_ATTRIBUTES (fndecl))))
16078 s390_indirect_branch_attrvalue (attr,
16079 &cfun->machine->indirect_branch_jump);
16080 s390_indirect_branch_attrvalue (attr,
16081 &cfun->machine->indirect_branch_call);
16084 if ((attr = lookup_attribute ("indirect_branch_jump",
16085 DECL_ATTRIBUTES (fndecl))))
16086 s390_indirect_branch_attrvalue (attr, &cfun->machine->indirect_branch_jump);
16088 if ((attr = lookup_attribute ("indirect_branch_call",
16089 DECL_ATTRIBUTES (fndecl))))
16090 s390_indirect_branch_attrvalue (attr, &cfun->machine->indirect_branch_call);
16092 if ((attr = lookup_attribute ("function_return",
16093 DECL_ATTRIBUTES (fndecl))))
16095 s390_indirect_branch_attrvalue (attr,
16096 &cfun->machine->function_return_reg);
16097 s390_indirect_branch_attrvalue (attr,
16098 &cfun->machine->function_return_mem);
16101 if ((attr = lookup_attribute ("function_return_reg",
16102 DECL_ATTRIBUTES (fndecl))))
16103 s390_indirect_branch_attrvalue (attr, &cfun->machine->function_return_reg);
16105 if ((attr = lookup_attribute ("function_return_mem",
16106 DECL_ATTRIBUTES (fndecl))))
16107 s390_indirect_branch_attrvalue (attr, &cfun->machine->function_return_mem);
16110 /* Restore targets globals from NEW_TREE and invalidate s390_previous_fndecl
16111 cache. */
16113 void
16114 s390_activate_target_options (tree new_tree)
16116 cl_target_option_restore (&global_options, TREE_TARGET_OPTION (new_tree));
16117 if (TREE_TARGET_GLOBALS (new_tree))
16118 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
16119 else if (new_tree == target_option_default_node)
16120 restore_target_globals (&default_target_globals);
16121 else
16122 TREE_TARGET_GLOBALS (new_tree) = save_target_globals_default_opts ();
16123 s390_previous_fndecl = NULL_TREE;
16126 /* Establish appropriate back-end context for processing the function
16127 FNDECL. The argument might be NULL to indicate processing at top
16128 level, outside of any function scope. */
16129 static void
16130 s390_set_current_function (tree fndecl)
16132 /* Only change the context if the function changes. This hook is called
16133 several times in the course of compiling a function, and we don't want to
16134 slow things down too much or call target_reinit when it isn't safe. */
16135 if (fndecl == s390_previous_fndecl)
16137 s390_indirect_branch_settings (fndecl);
16138 return;
16141 tree old_tree;
16142 if (s390_previous_fndecl == NULL_TREE)
16143 old_tree = target_option_current_node;
16144 else if (DECL_FUNCTION_SPECIFIC_TARGET (s390_previous_fndecl))
16145 old_tree = DECL_FUNCTION_SPECIFIC_TARGET (s390_previous_fndecl);
16146 else
16147 old_tree = target_option_default_node;
16149 if (fndecl == NULL_TREE)
16151 if (old_tree != target_option_current_node)
16152 s390_activate_target_options (target_option_current_node);
16153 return;
16156 tree new_tree = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
16157 if (new_tree == NULL_TREE)
16158 new_tree = target_option_default_node;
16160 if (old_tree != new_tree)
16161 s390_activate_target_options (new_tree);
16162 s390_previous_fndecl = fndecl;
16164 s390_indirect_branch_settings (fndecl);
16166 #endif
16168 /* Implement TARGET_USE_BY_PIECES_INFRASTRUCTURE_P. */
16170 static bool
16171 s390_use_by_pieces_infrastructure_p (unsigned HOST_WIDE_INT size,
16172 unsigned int align ATTRIBUTE_UNUSED,
16173 enum by_pieces_operation op ATTRIBUTE_UNUSED,
16174 bool speed_p ATTRIBUTE_UNUSED)
16176 return (size == 1 || size == 2
16177 || size == 4 || (TARGET_ZARCH && size == 8));
16180 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
16182 static void
16183 s390_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
16185 tree sfpc = s390_builtin_decls[S390_BUILTIN_s390_sfpc];
16186 tree efpc = s390_builtin_decls[S390_BUILTIN_s390_efpc];
16187 tree call_efpc = build_call_expr (efpc, 0);
16188 tree fenv_var = create_tmp_var_raw (unsigned_type_node);
16190 #define FPC_EXCEPTION_MASK HOST_WIDE_INT_UC (0xf8000000)
16191 #define FPC_FLAGS_MASK HOST_WIDE_INT_UC (0x00f80000)
16192 #define FPC_DXC_MASK HOST_WIDE_INT_UC (0x0000ff00)
16193 #define FPC_EXCEPTION_MASK_SHIFT HOST_WIDE_INT_UC (24)
16194 #define FPC_FLAGS_SHIFT HOST_WIDE_INT_UC (16)
16195 #define FPC_DXC_SHIFT HOST_WIDE_INT_UC (8)
16197 /* Generates the equivalent of feholdexcept (&fenv_var)
16199 fenv_var = __builtin_s390_efpc ();
16200 __builtin_s390_sfpc (fenv_var & mask) */
16201 tree old_fpc = build2 (MODIFY_EXPR, unsigned_type_node, fenv_var, call_efpc);
16202 tree new_fpc =
16203 build2 (BIT_AND_EXPR, unsigned_type_node, fenv_var,
16204 build_int_cst (unsigned_type_node,
16205 ~(FPC_DXC_MASK | FPC_FLAGS_MASK |
16206 FPC_EXCEPTION_MASK)));
16207 tree set_new_fpc = build_call_expr (sfpc, 1, new_fpc);
16208 *hold = build2 (COMPOUND_EXPR, void_type_node, old_fpc, set_new_fpc);
16210 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT)
16212 __builtin_s390_sfpc (__builtin_s390_efpc () & mask) */
16213 new_fpc = build2 (BIT_AND_EXPR, unsigned_type_node, call_efpc,
16214 build_int_cst (unsigned_type_node,
16215 ~(FPC_DXC_MASK | FPC_FLAGS_MASK)));
16216 *clear = build_call_expr (sfpc, 1, new_fpc);
16218 /* Generates the equivalent of feupdateenv (fenv_var)
16220 old_fpc = __builtin_s390_efpc ();
16221 __builtin_s390_sfpc (fenv_var);
16222 __atomic_feraiseexcept ((old_fpc & FPC_FLAGS_MASK) >> FPC_FLAGS_SHIFT); */
16224 old_fpc = create_tmp_var_raw (unsigned_type_node);
16225 tree store_old_fpc = build2 (MODIFY_EXPR, void_type_node,
16226 old_fpc, call_efpc);
16228 set_new_fpc = build_call_expr (sfpc, 1, fenv_var);
16230 tree raise_old_except = build2 (BIT_AND_EXPR, unsigned_type_node, old_fpc,
16231 build_int_cst (unsigned_type_node,
16232 FPC_FLAGS_MASK));
16233 raise_old_except = build2 (RSHIFT_EXPR, unsigned_type_node, raise_old_except,
16234 build_int_cst (unsigned_type_node,
16235 FPC_FLAGS_SHIFT));
16236 tree atomic_feraiseexcept
16237 = builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT);
16238 raise_old_except = build_call_expr (atomic_feraiseexcept,
16239 1, raise_old_except);
16241 *update = build2 (COMPOUND_EXPR, void_type_node,
16242 build2 (COMPOUND_EXPR, void_type_node,
16243 store_old_fpc, set_new_fpc),
16244 raise_old_except);
16246 #undef FPC_EXCEPTION_MASK
16247 #undef FPC_FLAGS_MASK
16248 #undef FPC_DXC_MASK
16249 #undef FPC_EXCEPTION_MASK_SHIFT
16250 #undef FPC_FLAGS_SHIFT
16251 #undef FPC_DXC_SHIFT
16254 /* Return the vector mode to be used for inner mode MODE when doing
16255 vectorization. */
16256 static machine_mode
16257 s390_preferred_simd_mode (scalar_mode mode)
16259 if (TARGET_VXE)
16260 switch (mode)
16262 case E_SFmode:
16263 return V4SFmode;
16264 default:;
16267 if (TARGET_VX)
16268 switch (mode)
16270 case E_DFmode:
16271 return V2DFmode;
16272 case E_DImode:
16273 return V2DImode;
16274 case E_SImode:
16275 return V4SImode;
16276 case E_HImode:
16277 return V8HImode;
16278 case E_QImode:
16279 return V16QImode;
16280 default:;
16282 return word_mode;
16285 /* Our hardware does not require vectors to be strictly aligned. */
16286 static bool
16287 s390_support_vector_misalignment (machine_mode mode ATTRIBUTE_UNUSED,
16288 const_tree type ATTRIBUTE_UNUSED,
16289 int misalignment ATTRIBUTE_UNUSED,
16290 bool is_packed ATTRIBUTE_UNUSED)
16292 if (TARGET_VX)
16293 return true;
16295 return default_builtin_support_vector_misalignment (mode, type, misalignment,
16296 is_packed);
16299 /* The vector ABI requires vector types to be aligned on an 8 byte
16300 boundary (our stack alignment). However, we allow this to be
16301 overriden by the user, while this definitely breaks the ABI. */
16302 static HOST_WIDE_INT
16303 s390_vector_alignment (const_tree type)
16305 if (!TARGET_VX_ABI)
16306 return default_vector_alignment (type);
16308 if (TYPE_USER_ALIGN (type))
16309 return TYPE_ALIGN (type);
16311 return MIN (64, tree_to_shwi (TYPE_SIZE (type)));
16314 /* Implement TARGET_CONSTANT_ALIGNMENT. Alignment on even addresses for
16315 LARL instruction. */
16317 static HOST_WIDE_INT
16318 s390_constant_alignment (const_tree, HOST_WIDE_INT align)
16320 return MAX (align, 16);
16323 #ifdef HAVE_AS_MACHINE_MACHINEMODE
16324 /* Implement TARGET_ASM_FILE_START. */
16325 static void
16326 s390_asm_file_start (void)
16328 default_file_start ();
16329 s390_asm_output_machine_for_arch (asm_out_file);
16331 #endif
16333 /* Implement TARGET_ASM_FILE_END. */
16334 static void
16335 s390_asm_file_end (void)
16337 #ifdef HAVE_AS_GNU_ATTRIBUTE
16338 varpool_node *vnode;
16339 cgraph_node *cnode;
16341 FOR_EACH_VARIABLE (vnode)
16342 if (TREE_PUBLIC (vnode->decl))
16343 s390_check_type_for_vector_abi (TREE_TYPE (vnode->decl), false, false);
16345 FOR_EACH_FUNCTION (cnode)
16346 if (TREE_PUBLIC (cnode->decl))
16347 s390_check_type_for_vector_abi (TREE_TYPE (cnode->decl), false, false);
16350 if (s390_vector_abi != 0)
16351 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
16352 s390_vector_abi);
16353 #endif
16354 file_end_indicate_exec_stack ();
16356 if (flag_split_stack)
16357 file_end_indicate_split_stack ();
16360 /* Return true if TYPE is a vector bool type. */
16361 static inline bool
16362 s390_vector_bool_type_p (const_tree type)
16364 return TYPE_VECTOR_OPAQUE (type);
16367 /* Return the diagnostic message string if the binary operation OP is
16368 not permitted on TYPE1 and TYPE2, NULL otherwise. */
16369 static const char*
16370 s390_invalid_binary_op (int op ATTRIBUTE_UNUSED, const_tree type1, const_tree type2)
16372 bool bool1_p, bool2_p;
16373 bool plusminus_p;
16374 bool muldiv_p;
16375 bool compare_p;
16376 machine_mode mode1, mode2;
16378 if (!TARGET_ZVECTOR)
16379 return NULL;
16381 if (!VECTOR_TYPE_P (type1) || !VECTOR_TYPE_P (type2))
16382 return NULL;
16384 bool1_p = s390_vector_bool_type_p (type1);
16385 bool2_p = s390_vector_bool_type_p (type2);
16387 /* Mixing signed and unsigned types is forbidden for all
16388 operators. */
16389 if (!bool1_p && !bool2_p
16390 && TYPE_UNSIGNED (type1) != TYPE_UNSIGNED (type2))
16391 return N_("types differ in signedness");
16393 plusminus_p = (op == PLUS_EXPR || op == MINUS_EXPR);
16394 muldiv_p = (op == MULT_EXPR || op == RDIV_EXPR || op == TRUNC_DIV_EXPR
16395 || op == CEIL_DIV_EXPR || op == FLOOR_DIV_EXPR
16396 || op == ROUND_DIV_EXPR);
16397 compare_p = (op == LT_EXPR || op == LE_EXPR || op == GT_EXPR || op == GE_EXPR
16398 || op == EQ_EXPR || op == NE_EXPR);
16400 if (bool1_p && bool2_p && (plusminus_p || muldiv_p))
16401 return N_("binary operator does not support two vector bool operands");
16403 if (bool1_p != bool2_p && (muldiv_p || compare_p))
16404 return N_("binary operator does not support vector bool operand");
16406 mode1 = TYPE_MODE (type1);
16407 mode2 = TYPE_MODE (type2);
16409 if (bool1_p != bool2_p && plusminus_p
16410 && (GET_MODE_CLASS (mode1) == MODE_VECTOR_FLOAT
16411 || GET_MODE_CLASS (mode2) == MODE_VECTOR_FLOAT))
16412 return N_("binary operator does not support mixing vector "
16413 "bool with floating point vector operands");
16415 return NULL;
16418 /* Implement TARGET_C_EXCESS_PRECISION.
16420 FIXME: For historical reasons, float_t and double_t are typedef'ed to
16421 double on s390, causing operations on float_t to operate in a higher
16422 precision than is necessary. However, it is not the case that SFmode
16423 operations have implicit excess precision, and we generate more optimal
16424 code if we let the compiler know no implicit extra precision is added.
16426 That means when we are compiling with -fexcess-precision=fast, the value
16427 we set for FLT_EVAL_METHOD will be out of line with the actual precision of
16428 float_t (though they would be correct for -fexcess-precision=standard).
16430 A complete fix would modify glibc to remove the unnecessary typedef
16431 of float_t to double. */
16433 static enum flt_eval_method
16434 s390_excess_precision (enum excess_precision_type type)
16436 switch (type)
16438 case EXCESS_PRECISION_TYPE_IMPLICIT:
16439 case EXCESS_PRECISION_TYPE_FAST:
16440 /* The fastest type to promote to will always be the native type,
16441 whether that occurs with implicit excess precision or
16442 otherwise. */
16443 return FLT_EVAL_METHOD_PROMOTE_TO_FLOAT;
16444 case EXCESS_PRECISION_TYPE_STANDARD:
16445 /* Otherwise, when we are in a standards compliant mode, to
16446 ensure consistency with the implementation in glibc, report that
16447 float is evaluated to the range and precision of double. */
16448 return FLT_EVAL_METHOD_PROMOTE_TO_DOUBLE;
16449 default:
16450 gcc_unreachable ();
16452 return FLT_EVAL_METHOD_UNPREDICTABLE;
16455 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
16457 static unsigned HOST_WIDE_INT
16458 s390_asan_shadow_offset (void)
16460 return TARGET_64BIT ? HOST_WIDE_INT_1U << 52 : HOST_WIDE_INT_UC (0x20000000);
16463 #ifdef HAVE_GAS_HIDDEN
16464 # define USE_HIDDEN_LINKONCE 1
16465 #else
16466 # define USE_HIDDEN_LINKONCE 0
16467 #endif
16469 /* Output an indirect branch trampoline for target register REGNO. */
16471 static void
16472 s390_output_indirect_thunk_function (unsigned int regno, bool z10_p)
16474 tree decl;
16475 char thunk_label[32];
16476 int i;
16478 if (z10_p)
16479 sprintf (thunk_label, TARGET_INDIRECT_BRANCH_THUNK_NAME_EXRL, regno);
16480 else
16481 sprintf (thunk_label, TARGET_INDIRECT_BRANCH_THUNK_NAME_EX,
16482 INDIRECT_BRANCH_THUNK_REGNUM, regno);
16484 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
16485 get_identifier (thunk_label),
16486 build_function_type_list (void_type_node, NULL_TREE));
16487 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
16488 NULL_TREE, void_type_node);
16489 TREE_PUBLIC (decl) = 1;
16490 TREE_STATIC (decl) = 1;
16491 DECL_IGNORED_P (decl) = 1;
16493 if (USE_HIDDEN_LINKONCE)
16495 cgraph_node::create (decl)->set_comdat_group (DECL_ASSEMBLER_NAME (decl));
16497 targetm.asm_out.unique_section (decl, 0);
16498 switch_to_section (get_named_section (decl, NULL, 0));
16500 targetm.asm_out.globalize_label (asm_out_file, thunk_label);
16501 fputs ("\t.hidden\t", asm_out_file);
16502 assemble_name (asm_out_file, thunk_label);
16503 putc ('\n', asm_out_file);
16504 ASM_DECLARE_FUNCTION_NAME (asm_out_file, thunk_label, decl);
16506 else
16508 switch_to_section (text_section);
16509 ASM_OUTPUT_LABEL (asm_out_file, thunk_label);
16512 DECL_INITIAL (decl) = make_node (BLOCK);
16513 current_function_decl = decl;
16514 allocate_struct_function (decl, false);
16515 init_function_start (decl);
16516 cfun->is_thunk = true;
16517 first_function_block_is_cold = false;
16518 final_start_function (emit_barrier (), asm_out_file, 1);
16520 /* This makes CFI at least usable for indirect jumps.
16522 Stopping in the thunk: backtrace will point to the thunk target
16523 is if it was interrupted by a signal. For a call this means that
16524 the call chain will be: caller->callee->thunk */
16525 if (flag_asynchronous_unwind_tables && flag_dwarf2_cfi_asm)
16527 fputs ("\t.cfi_signal_frame\n", asm_out_file);
16528 fprintf (asm_out_file, "\t.cfi_return_column %d\n", regno);
16529 for (i = 0; i < FPR15_REGNUM; i++)
16530 fprintf (asm_out_file, "\t.cfi_same_value %s\n", reg_names[i]);
16533 if (z10_p)
16535 /* exrl 0,1f */
16537 /* We generate a thunk for z10 compiled code although z10 is
16538 currently not enabled. Tell the assembler to accept the
16539 instruction. */
16540 if (!TARGET_CPU_Z10)
16542 fputs ("\t.machine push\n", asm_out_file);
16543 fputs ("\t.machine z10\n", asm_out_file);
16545 /* We use exrl even if -mzarch hasn't been specified on the
16546 command line so we have to tell the assembler to accept
16547 it. */
16548 if (!TARGET_ZARCH)
16549 fputs ("\t.machinemode zarch\n", asm_out_file);
16551 fputs ("\texrl\t0,1f\n", asm_out_file);
16553 if (!TARGET_ZARCH)
16554 fputs ("\t.machinemode esa\n", asm_out_file);
16556 if (!TARGET_CPU_Z10)
16557 fputs ("\t.machine pop\n", asm_out_file);
16559 else if (TARGET_CPU_ZARCH)
16561 /* larl %r1,1f */
16562 fprintf (asm_out_file, "\tlarl\t%%r%d,1f\n",
16563 INDIRECT_BRANCH_THUNK_REGNUM);
16565 /* ex 0,0(%r1) */
16566 fprintf (asm_out_file, "\tex\t0,0(%%r%d)\n",
16567 INDIRECT_BRANCH_THUNK_REGNUM);
16569 else
16570 gcc_unreachable ();
16572 /* 0: j 0b */
16573 fputs ("0:\tj\t0b\n", asm_out_file);
16575 /* 1: br <regno> */
16576 fprintf (asm_out_file, "1:\tbr\t%%r%d\n", regno);
16578 final_end_function ();
16579 init_insn_lengths ();
16580 free_after_compilation (cfun);
16581 set_cfun (NULL);
16582 current_function_decl = NULL;
16585 /* Implement the asm.code_end target hook. */
16587 static void
16588 s390_code_end (void)
16590 int i;
16592 for (i = 1; i < 16; i++)
16594 if (indirect_branch_z10thunk_mask & (1 << i))
16595 s390_output_indirect_thunk_function (i, true);
16597 if (indirect_branch_prez10thunk_mask & (1 << i))
16598 s390_output_indirect_thunk_function (i, false);
16601 if (TARGET_INDIRECT_BRANCH_TABLE)
16603 int o;
16604 int i;
16606 for (o = 0; o < INDIRECT_BRANCH_NUM_OPTIONS; o++)
16608 if (indirect_branch_table_label_no[o] == 0)
16609 continue;
16611 switch_to_section (get_section (indirect_branch_table_name[o],
16613 NULL_TREE));
16614 for (i = 0; i < indirect_branch_table_label_no[o]; i++)
16616 char label_start[32];
16618 ASM_GENERATE_INTERNAL_LABEL (label_start,
16619 indirect_branch_table_label[o], i);
16621 fputs ("\t.long\t", asm_out_file);
16622 assemble_name_raw (asm_out_file, label_start);
16623 fputs ("-.\n", asm_out_file);
16625 switch_to_section (current_function_section ());
16630 /* Implement the TARGET_CASE_VALUES_THRESHOLD target hook. */
16632 unsigned int
16633 s390_case_values_threshold (void)
16635 /* Disabling branch prediction for indirect jumps makes jump tables
16636 much more expensive. */
16637 if (TARGET_INDIRECT_BRANCH_NOBP_JUMP)
16638 return 20;
16640 return default_case_values_threshold ();
16643 /* Initialize GCC target structure. */
16645 #undef TARGET_ASM_ALIGNED_HI_OP
16646 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
16647 #undef TARGET_ASM_ALIGNED_DI_OP
16648 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
16649 #undef TARGET_ASM_INTEGER
16650 #define TARGET_ASM_INTEGER s390_assemble_integer
16652 #undef TARGET_ASM_OPEN_PAREN
16653 #define TARGET_ASM_OPEN_PAREN ""
16655 #undef TARGET_ASM_CLOSE_PAREN
16656 #define TARGET_ASM_CLOSE_PAREN ""
16658 #undef TARGET_OPTION_OVERRIDE
16659 #define TARGET_OPTION_OVERRIDE s390_option_override
16661 #ifdef TARGET_THREAD_SSP_OFFSET
16662 #undef TARGET_STACK_PROTECT_GUARD
16663 #define TARGET_STACK_PROTECT_GUARD hook_tree_void_null
16664 #endif
16666 #undef TARGET_ENCODE_SECTION_INFO
16667 #define TARGET_ENCODE_SECTION_INFO s390_encode_section_info
16669 #undef TARGET_SCALAR_MODE_SUPPORTED_P
16670 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
16672 #ifdef HAVE_AS_TLS
16673 #undef TARGET_HAVE_TLS
16674 #define TARGET_HAVE_TLS true
16675 #endif
16676 #undef TARGET_CANNOT_FORCE_CONST_MEM
16677 #define TARGET_CANNOT_FORCE_CONST_MEM s390_cannot_force_const_mem
16679 #undef TARGET_DELEGITIMIZE_ADDRESS
16680 #define TARGET_DELEGITIMIZE_ADDRESS s390_delegitimize_address
16682 #undef TARGET_LEGITIMIZE_ADDRESS
16683 #define TARGET_LEGITIMIZE_ADDRESS s390_legitimize_address
16685 #undef TARGET_RETURN_IN_MEMORY
16686 #define TARGET_RETURN_IN_MEMORY s390_return_in_memory
16688 #undef TARGET_INIT_BUILTINS
16689 #define TARGET_INIT_BUILTINS s390_init_builtins
16690 #undef TARGET_EXPAND_BUILTIN
16691 #define TARGET_EXPAND_BUILTIN s390_expand_builtin
16692 #undef TARGET_BUILTIN_DECL
16693 #define TARGET_BUILTIN_DECL s390_builtin_decl
16695 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
16696 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA s390_output_addr_const_extra
16698 #undef TARGET_ASM_OUTPUT_MI_THUNK
16699 #define TARGET_ASM_OUTPUT_MI_THUNK s390_output_mi_thunk
16700 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
16701 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
16703 #undef TARGET_C_EXCESS_PRECISION
16704 #define TARGET_C_EXCESS_PRECISION s390_excess_precision
16706 #undef TARGET_SCHED_ADJUST_PRIORITY
16707 #define TARGET_SCHED_ADJUST_PRIORITY s390_adjust_priority
16708 #undef TARGET_SCHED_ISSUE_RATE
16709 #define TARGET_SCHED_ISSUE_RATE s390_issue_rate
16710 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
16711 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD s390_first_cycle_multipass_dfa_lookahead
16713 #undef TARGET_SCHED_VARIABLE_ISSUE
16714 #define TARGET_SCHED_VARIABLE_ISSUE s390_sched_variable_issue
16715 #undef TARGET_SCHED_REORDER
16716 #define TARGET_SCHED_REORDER s390_sched_reorder
16717 #undef TARGET_SCHED_INIT
16718 #define TARGET_SCHED_INIT s390_sched_init
16720 #undef TARGET_CANNOT_COPY_INSN_P
16721 #define TARGET_CANNOT_COPY_INSN_P s390_cannot_copy_insn_p
16722 #undef TARGET_RTX_COSTS
16723 #define TARGET_RTX_COSTS s390_rtx_costs
16724 #undef TARGET_ADDRESS_COST
16725 #define TARGET_ADDRESS_COST s390_address_cost
16726 #undef TARGET_REGISTER_MOVE_COST
16727 #define TARGET_REGISTER_MOVE_COST s390_register_move_cost
16728 #undef TARGET_MEMORY_MOVE_COST
16729 #define TARGET_MEMORY_MOVE_COST s390_memory_move_cost
16730 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
16731 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
16732 s390_builtin_vectorization_cost
16734 #undef TARGET_MACHINE_DEPENDENT_REORG
16735 #define TARGET_MACHINE_DEPENDENT_REORG s390_reorg
16737 #undef TARGET_VALID_POINTER_MODE
16738 #define TARGET_VALID_POINTER_MODE s390_valid_pointer_mode
16740 #undef TARGET_BUILD_BUILTIN_VA_LIST
16741 #define TARGET_BUILD_BUILTIN_VA_LIST s390_build_builtin_va_list
16742 #undef TARGET_EXPAND_BUILTIN_VA_START
16743 #define TARGET_EXPAND_BUILTIN_VA_START s390_va_start
16744 #undef TARGET_ASAN_SHADOW_OFFSET
16745 #define TARGET_ASAN_SHADOW_OFFSET s390_asan_shadow_offset
16746 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
16747 #define TARGET_GIMPLIFY_VA_ARG_EXPR s390_gimplify_va_arg
16749 #undef TARGET_PROMOTE_FUNCTION_MODE
16750 #define TARGET_PROMOTE_FUNCTION_MODE s390_promote_function_mode
16751 #undef TARGET_PASS_BY_REFERENCE
16752 #define TARGET_PASS_BY_REFERENCE s390_pass_by_reference
16754 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
16755 #define TARGET_FUNCTION_OK_FOR_SIBCALL s390_function_ok_for_sibcall
16756 #undef TARGET_FUNCTION_ARG
16757 #define TARGET_FUNCTION_ARG s390_function_arg
16758 #undef TARGET_FUNCTION_ARG_ADVANCE
16759 #define TARGET_FUNCTION_ARG_ADVANCE s390_function_arg_advance
16760 #undef TARGET_FUNCTION_ARG_PADDING
16761 #define TARGET_FUNCTION_ARG_PADDING s390_function_arg_padding
16762 #undef TARGET_FUNCTION_VALUE
16763 #define TARGET_FUNCTION_VALUE s390_function_value
16764 #undef TARGET_LIBCALL_VALUE
16765 #define TARGET_LIBCALL_VALUE s390_libcall_value
16766 #undef TARGET_STRICT_ARGUMENT_NAMING
16767 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
16769 #undef TARGET_KEEP_LEAF_WHEN_PROFILED
16770 #define TARGET_KEEP_LEAF_WHEN_PROFILED s390_keep_leaf_when_profiled
16772 #undef TARGET_FIXED_CONDITION_CODE_REGS
16773 #define TARGET_FIXED_CONDITION_CODE_REGS s390_fixed_condition_code_regs
16775 #undef TARGET_CC_MODES_COMPATIBLE
16776 #define TARGET_CC_MODES_COMPATIBLE s390_cc_modes_compatible
16778 #undef TARGET_INVALID_WITHIN_DOLOOP
16779 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_insn_null
16781 #ifdef HAVE_AS_TLS
16782 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
16783 #define TARGET_ASM_OUTPUT_DWARF_DTPREL s390_output_dwarf_dtprel
16784 #endif
16786 #undef TARGET_DWARF_FRAME_REG_MODE
16787 #define TARGET_DWARF_FRAME_REG_MODE s390_dwarf_frame_reg_mode
16789 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
16790 #undef TARGET_MANGLE_TYPE
16791 #define TARGET_MANGLE_TYPE s390_mangle_type
16792 #endif
16794 #undef TARGET_SCALAR_MODE_SUPPORTED_P
16795 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
16797 #undef TARGET_VECTOR_MODE_SUPPORTED_P
16798 #define TARGET_VECTOR_MODE_SUPPORTED_P s390_vector_mode_supported_p
16800 #undef TARGET_PREFERRED_RELOAD_CLASS
16801 #define TARGET_PREFERRED_RELOAD_CLASS s390_preferred_reload_class
16803 #undef TARGET_SECONDARY_RELOAD
16804 #define TARGET_SECONDARY_RELOAD s390_secondary_reload
16805 #undef TARGET_SECONDARY_MEMORY_NEEDED
16806 #define TARGET_SECONDARY_MEMORY_NEEDED s390_secondary_memory_needed
16807 #undef TARGET_SECONDARY_MEMORY_NEEDED_MODE
16808 #define TARGET_SECONDARY_MEMORY_NEEDED_MODE s390_secondary_memory_needed_mode
16810 #undef TARGET_LIBGCC_CMP_RETURN_MODE
16811 #define TARGET_LIBGCC_CMP_RETURN_MODE s390_libgcc_cmp_return_mode
16813 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
16814 #define TARGET_LIBGCC_SHIFT_COUNT_MODE s390_libgcc_shift_count_mode
16816 #undef TARGET_LEGITIMATE_ADDRESS_P
16817 #define TARGET_LEGITIMATE_ADDRESS_P s390_legitimate_address_p
16819 #undef TARGET_LEGITIMATE_CONSTANT_P
16820 #define TARGET_LEGITIMATE_CONSTANT_P s390_legitimate_constant_p
16822 #undef TARGET_LRA_P
16823 #define TARGET_LRA_P s390_lra_p
16825 #undef TARGET_CAN_ELIMINATE
16826 #define TARGET_CAN_ELIMINATE s390_can_eliminate
16828 #undef TARGET_CONDITIONAL_REGISTER_USAGE
16829 #define TARGET_CONDITIONAL_REGISTER_USAGE s390_conditional_register_usage
16831 #undef TARGET_LOOP_UNROLL_ADJUST
16832 #define TARGET_LOOP_UNROLL_ADJUST s390_loop_unroll_adjust
16834 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
16835 #define TARGET_ASM_TRAMPOLINE_TEMPLATE s390_asm_trampoline_template
16836 #undef TARGET_TRAMPOLINE_INIT
16837 #define TARGET_TRAMPOLINE_INIT s390_trampoline_init
16839 /* PR 79421 */
16840 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
16841 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
16843 #undef TARGET_UNWIND_WORD_MODE
16844 #define TARGET_UNWIND_WORD_MODE s390_unwind_word_mode
16846 #undef TARGET_CANONICALIZE_COMPARISON
16847 #define TARGET_CANONICALIZE_COMPARISON s390_canonicalize_comparison
16849 #undef TARGET_HARD_REGNO_SCRATCH_OK
16850 #define TARGET_HARD_REGNO_SCRATCH_OK s390_hard_regno_scratch_ok
16852 #undef TARGET_HARD_REGNO_NREGS
16853 #define TARGET_HARD_REGNO_NREGS s390_hard_regno_nregs
16854 #undef TARGET_HARD_REGNO_MODE_OK
16855 #define TARGET_HARD_REGNO_MODE_OK s390_hard_regno_mode_ok
16856 #undef TARGET_MODES_TIEABLE_P
16857 #define TARGET_MODES_TIEABLE_P s390_modes_tieable_p
16859 #undef TARGET_HARD_REGNO_CALL_PART_CLOBBERED
16860 #define TARGET_HARD_REGNO_CALL_PART_CLOBBERED \
16861 s390_hard_regno_call_part_clobbered
16863 #undef TARGET_ATTRIBUTE_TABLE
16864 #define TARGET_ATTRIBUTE_TABLE s390_attribute_table
16866 #undef TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P
16867 #define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P hook_bool_const_tree_true
16869 #undef TARGET_SET_UP_BY_PROLOGUE
16870 #define TARGET_SET_UP_BY_PROLOGUE s300_set_up_by_prologue
16872 #undef TARGET_EXTRA_LIVE_ON_ENTRY
16873 #define TARGET_EXTRA_LIVE_ON_ENTRY s390_live_on_entry
16875 #undef TARGET_USE_BY_PIECES_INFRASTRUCTURE_P
16876 #define TARGET_USE_BY_PIECES_INFRASTRUCTURE_P \
16877 s390_use_by_pieces_infrastructure_p
16879 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
16880 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV s390_atomic_assign_expand_fenv
16882 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
16883 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN s390_invalid_arg_for_unprototyped_fn
16885 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
16886 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE s390_preferred_simd_mode
16888 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
16889 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT s390_support_vector_misalignment
16891 #undef TARGET_VECTOR_ALIGNMENT
16892 #define TARGET_VECTOR_ALIGNMENT s390_vector_alignment
16894 #undef TARGET_INVALID_BINARY_OP
16895 #define TARGET_INVALID_BINARY_OP s390_invalid_binary_op
16897 #ifdef HAVE_AS_MACHINE_MACHINEMODE
16898 #undef TARGET_ASM_FILE_START
16899 #define TARGET_ASM_FILE_START s390_asm_file_start
16900 #endif
16902 #undef TARGET_ASM_FILE_END
16903 #define TARGET_ASM_FILE_END s390_asm_file_end
16905 #if S390_USE_TARGET_ATTRIBUTE
16906 #undef TARGET_SET_CURRENT_FUNCTION
16907 #define TARGET_SET_CURRENT_FUNCTION s390_set_current_function
16909 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
16910 #define TARGET_OPTION_VALID_ATTRIBUTE_P s390_valid_target_attribute_p
16912 #undef TARGET_CAN_INLINE_P
16913 #define TARGET_CAN_INLINE_P s390_can_inline_p
16914 #endif
16916 #undef TARGET_OPTION_RESTORE
16917 #define TARGET_OPTION_RESTORE s390_function_specific_restore
16919 #undef TARGET_CAN_CHANGE_MODE_CLASS
16920 #define TARGET_CAN_CHANGE_MODE_CLASS s390_can_change_mode_class
16922 #undef TARGET_CONSTANT_ALIGNMENT
16923 #define TARGET_CONSTANT_ALIGNMENT s390_constant_alignment
16925 #undef TARGET_ASM_CODE_END
16926 #define TARGET_ASM_CODE_END s390_code_end
16928 #undef TARGET_CASE_VALUES_THRESHOLD
16929 #define TARGET_CASE_VALUES_THRESHOLD s390_case_values_threshold
16931 struct gcc_target targetm = TARGET_INITIALIZER;
16933 #include "gt-s390.h"