1 /* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988-2013 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
28 #include "hard-reg-set.h"
29 #include "insn-config.h"
30 #include "conditions.h"
32 #include "insn-codes.h"
33 #include "insn-attr.h"
40 #include "diagnostic-core.h"
42 #include "basic-block.h"
45 #include "target-def.h"
46 #include "common/common-target.h"
47 #include "langhooks.h"
53 #include "tm-constrs.h"
57 #include "sched-int.h"
61 #include "diagnostic.h"
63 #include "tree-pass.h"
64 #include "tree-flow.h"
66 static rtx
legitimize_dllimport_symbol (rtx
, bool);
68 #ifndef CHECK_STACK_LIMIT
69 #define CHECK_STACK_LIMIT (-1)
72 /* Return index of given mode in mult and division cost tables. */
73 #define MODE_INDEX(mode) \
74 ((mode) == QImode ? 0 \
75 : (mode) == HImode ? 1 \
76 : (mode) == SImode ? 2 \
77 : (mode) == DImode ? 3 \
80 /* Processor costs (relative to an add) */
81 /* We assume COSTS_N_INSNS is defined as (N)*4 and an addition is 2 bytes. */
82 #define COSTS_N_BYTES(N) ((N) * 2)
84 #define DUMMY_STRINGOP_ALGS {libcall, {{-1, libcall, false}}}
87 struct processor_costs ix86_size_cost
= {/* costs for tuning for size */
88 COSTS_N_BYTES (2), /* cost of an add instruction */
89 COSTS_N_BYTES (3), /* cost of a lea instruction */
90 COSTS_N_BYTES (2), /* variable shift costs */
91 COSTS_N_BYTES (3), /* constant shift costs */
92 {COSTS_N_BYTES (3), /* cost of starting multiply for QI */
93 COSTS_N_BYTES (3), /* HI */
94 COSTS_N_BYTES (3), /* SI */
95 COSTS_N_BYTES (3), /* DI */
96 COSTS_N_BYTES (5)}, /* other */
97 0, /* cost of multiply per each bit set */
98 {COSTS_N_BYTES (3), /* cost of a divide/mod for QI */
99 COSTS_N_BYTES (3), /* HI */
100 COSTS_N_BYTES (3), /* SI */
101 COSTS_N_BYTES (3), /* DI */
102 COSTS_N_BYTES (5)}, /* other */
103 COSTS_N_BYTES (3), /* cost of movsx */
104 COSTS_N_BYTES (3), /* cost of movzx */
105 0, /* "large" insn */
107 2, /* cost for loading QImode using movzbl */
108 {2, 2, 2}, /* cost of loading integer registers
109 in QImode, HImode and SImode.
110 Relative to reg-reg move (2). */
111 {2, 2, 2}, /* cost of storing integer registers */
112 2, /* cost of reg,reg fld/fst */
113 {2, 2, 2}, /* cost of loading fp registers
114 in SFmode, DFmode and XFmode */
115 {2, 2, 2}, /* cost of storing fp registers
116 in SFmode, DFmode and XFmode */
117 3, /* cost of moving MMX register */
118 {3, 3}, /* cost of loading MMX registers
119 in SImode and DImode */
120 {3, 3}, /* cost of storing MMX registers
121 in SImode and DImode */
122 3, /* cost of moving SSE register */
123 {3, 3, 3}, /* cost of loading SSE registers
124 in SImode, DImode and TImode */
125 {3, 3, 3}, /* cost of storing SSE registers
126 in SImode, DImode and TImode */
127 3, /* MMX or SSE register to integer */
128 0, /* size of l1 cache */
129 0, /* size of l2 cache */
130 0, /* size of prefetch block */
131 0, /* number of parallel prefetches */
133 COSTS_N_BYTES (2), /* cost of FADD and FSUB insns. */
134 COSTS_N_BYTES (2), /* cost of FMUL instruction. */
135 COSTS_N_BYTES (2), /* cost of FDIV instruction. */
136 COSTS_N_BYTES (2), /* cost of FABS instruction. */
137 COSTS_N_BYTES (2), /* cost of FCHS instruction. */
138 COSTS_N_BYTES (2), /* cost of FSQRT instruction. */
139 {{rep_prefix_1_byte
, {{-1, rep_prefix_1_byte
, false}}},
140 {rep_prefix_1_byte
, {{-1, rep_prefix_1_byte
, false}}}},
141 {{rep_prefix_1_byte
, {{-1, rep_prefix_1_byte
, false}}},
142 {rep_prefix_1_byte
, {{-1, rep_prefix_1_byte
, false}}}},
143 1, /* scalar_stmt_cost. */
144 1, /* scalar load_cost. */
145 1, /* scalar_store_cost. */
146 1, /* vec_stmt_cost. */
147 1, /* vec_to_scalar_cost. */
148 1, /* scalar_to_vec_cost. */
149 1, /* vec_align_load_cost. */
150 1, /* vec_unalign_load_cost. */
151 1, /* vec_store_cost. */
152 1, /* cond_taken_branch_cost. */
153 1, /* cond_not_taken_branch_cost. */
156 /* Processor costs (relative to an add) */
158 struct processor_costs i386_cost
= { /* 386 specific costs */
159 COSTS_N_INSNS (1), /* cost of an add instruction */
160 COSTS_N_INSNS (1), /* cost of a lea instruction */
161 COSTS_N_INSNS (3), /* variable shift costs */
162 COSTS_N_INSNS (2), /* constant shift costs */
163 {COSTS_N_INSNS (6), /* cost of starting multiply for QI */
164 COSTS_N_INSNS (6), /* HI */
165 COSTS_N_INSNS (6), /* SI */
166 COSTS_N_INSNS (6), /* DI */
167 COSTS_N_INSNS (6)}, /* other */
168 COSTS_N_INSNS (1), /* cost of multiply per each bit set */
169 {COSTS_N_INSNS (23), /* cost of a divide/mod for QI */
170 COSTS_N_INSNS (23), /* HI */
171 COSTS_N_INSNS (23), /* SI */
172 COSTS_N_INSNS (23), /* DI */
173 COSTS_N_INSNS (23)}, /* other */
174 COSTS_N_INSNS (3), /* cost of movsx */
175 COSTS_N_INSNS (2), /* cost of movzx */
176 15, /* "large" insn */
178 4, /* cost for loading QImode using movzbl */
179 {2, 4, 2}, /* cost of loading integer registers
180 in QImode, HImode and SImode.
181 Relative to reg-reg move (2). */
182 {2, 4, 2}, /* cost of storing integer registers */
183 2, /* cost of reg,reg fld/fst */
184 {8, 8, 8}, /* cost of loading fp registers
185 in SFmode, DFmode and XFmode */
186 {8, 8, 8}, /* cost of storing fp registers
187 in SFmode, DFmode and XFmode */
188 2, /* cost of moving MMX register */
189 {4, 8}, /* cost of loading MMX registers
190 in SImode and DImode */
191 {4, 8}, /* cost of storing MMX registers
192 in SImode and DImode */
193 2, /* cost of moving SSE register */
194 {4, 8, 16}, /* cost of loading SSE registers
195 in SImode, DImode and TImode */
196 {4, 8, 16}, /* cost of storing SSE registers
197 in SImode, DImode and TImode */
198 3, /* MMX or SSE register to integer */
199 0, /* size of l1 cache */
200 0, /* size of l2 cache */
201 0, /* size of prefetch block */
202 0, /* number of parallel prefetches */
204 COSTS_N_INSNS (23), /* cost of FADD and FSUB insns. */
205 COSTS_N_INSNS (27), /* cost of FMUL instruction. */
206 COSTS_N_INSNS (88), /* cost of FDIV instruction. */
207 COSTS_N_INSNS (22), /* cost of FABS instruction. */
208 COSTS_N_INSNS (24), /* cost of FCHS instruction. */
209 COSTS_N_INSNS (122), /* cost of FSQRT instruction. */
210 {{rep_prefix_1_byte
, {{-1, rep_prefix_1_byte
, false}}},
211 DUMMY_STRINGOP_ALGS
},
212 {{rep_prefix_1_byte
, {{-1, rep_prefix_1_byte
, false}}},
213 DUMMY_STRINGOP_ALGS
},
214 1, /* scalar_stmt_cost. */
215 1, /* scalar load_cost. */
216 1, /* scalar_store_cost. */
217 1, /* vec_stmt_cost. */
218 1, /* vec_to_scalar_cost. */
219 1, /* scalar_to_vec_cost. */
220 1, /* vec_align_load_cost. */
221 2, /* vec_unalign_load_cost. */
222 1, /* vec_store_cost. */
223 3, /* cond_taken_branch_cost. */
224 1, /* cond_not_taken_branch_cost. */
228 struct processor_costs i486_cost
= { /* 486 specific costs */
229 COSTS_N_INSNS (1), /* cost of an add instruction */
230 COSTS_N_INSNS (1), /* cost of a lea instruction */
231 COSTS_N_INSNS (3), /* variable shift costs */
232 COSTS_N_INSNS (2), /* constant shift costs */
233 {COSTS_N_INSNS (12), /* cost of starting multiply for QI */
234 COSTS_N_INSNS (12), /* HI */
235 COSTS_N_INSNS (12), /* SI */
236 COSTS_N_INSNS (12), /* DI */
237 COSTS_N_INSNS (12)}, /* other */
238 1, /* cost of multiply per each bit set */
239 {COSTS_N_INSNS (40), /* cost of a divide/mod for QI */
240 COSTS_N_INSNS (40), /* HI */
241 COSTS_N_INSNS (40), /* SI */
242 COSTS_N_INSNS (40), /* DI */
243 COSTS_N_INSNS (40)}, /* other */
244 COSTS_N_INSNS (3), /* cost of movsx */
245 COSTS_N_INSNS (2), /* cost of movzx */
246 15, /* "large" insn */
248 4, /* cost for loading QImode using movzbl */
249 {2, 4, 2}, /* cost of loading integer registers
250 in QImode, HImode and SImode.
251 Relative to reg-reg move (2). */
252 {2, 4, 2}, /* cost of storing integer registers */
253 2, /* cost of reg,reg fld/fst */
254 {8, 8, 8}, /* cost of loading fp registers
255 in SFmode, DFmode and XFmode */
256 {8, 8, 8}, /* cost of storing fp registers
257 in SFmode, DFmode and XFmode */
258 2, /* cost of moving MMX register */
259 {4, 8}, /* cost of loading MMX registers
260 in SImode and DImode */
261 {4, 8}, /* cost of storing MMX registers
262 in SImode and DImode */
263 2, /* cost of moving SSE register */
264 {4, 8, 16}, /* cost of loading SSE registers
265 in SImode, DImode and TImode */
266 {4, 8, 16}, /* cost of storing SSE registers
267 in SImode, DImode and TImode */
268 3, /* MMX or SSE register to integer */
269 4, /* size of l1 cache. 486 has 8kB cache
270 shared for code and data, so 4kB is
271 not really precise. */
272 4, /* size of l2 cache */
273 0, /* size of prefetch block */
274 0, /* number of parallel prefetches */
276 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
277 COSTS_N_INSNS (16), /* cost of FMUL instruction. */
278 COSTS_N_INSNS (73), /* cost of FDIV instruction. */
279 COSTS_N_INSNS (3), /* cost of FABS instruction. */
280 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
281 COSTS_N_INSNS (83), /* cost of FSQRT instruction. */
282 {{rep_prefix_4_byte
, {{-1, rep_prefix_4_byte
, false}}},
283 DUMMY_STRINGOP_ALGS
},
284 {{rep_prefix_4_byte
, {{-1, rep_prefix_4_byte
, false}}},
285 DUMMY_STRINGOP_ALGS
},
286 1, /* scalar_stmt_cost. */
287 1, /* scalar load_cost. */
288 1, /* scalar_store_cost. */
289 1, /* vec_stmt_cost. */
290 1, /* vec_to_scalar_cost. */
291 1, /* scalar_to_vec_cost. */
292 1, /* vec_align_load_cost. */
293 2, /* vec_unalign_load_cost. */
294 1, /* vec_store_cost. */
295 3, /* cond_taken_branch_cost. */
296 1, /* cond_not_taken_branch_cost. */
300 struct processor_costs pentium_cost
= {
301 COSTS_N_INSNS (1), /* cost of an add instruction */
302 COSTS_N_INSNS (1), /* cost of a lea instruction */
303 COSTS_N_INSNS (4), /* variable shift costs */
304 COSTS_N_INSNS (1), /* constant shift costs */
305 {COSTS_N_INSNS (11), /* cost of starting multiply for QI */
306 COSTS_N_INSNS (11), /* HI */
307 COSTS_N_INSNS (11), /* SI */
308 COSTS_N_INSNS (11), /* DI */
309 COSTS_N_INSNS (11)}, /* other */
310 0, /* cost of multiply per each bit set */
311 {COSTS_N_INSNS (25), /* cost of a divide/mod for QI */
312 COSTS_N_INSNS (25), /* HI */
313 COSTS_N_INSNS (25), /* SI */
314 COSTS_N_INSNS (25), /* DI */
315 COSTS_N_INSNS (25)}, /* other */
316 COSTS_N_INSNS (3), /* cost of movsx */
317 COSTS_N_INSNS (2), /* cost of movzx */
318 8, /* "large" insn */
320 6, /* cost for loading QImode using movzbl */
321 {2, 4, 2}, /* cost of loading integer registers
322 in QImode, HImode and SImode.
323 Relative to reg-reg move (2). */
324 {2, 4, 2}, /* cost of storing integer registers */
325 2, /* cost of reg,reg fld/fst */
326 {2, 2, 6}, /* cost of loading fp registers
327 in SFmode, DFmode and XFmode */
328 {4, 4, 6}, /* cost of storing fp registers
329 in SFmode, DFmode and XFmode */
330 8, /* cost of moving MMX register */
331 {8, 8}, /* cost of loading MMX registers
332 in SImode and DImode */
333 {8, 8}, /* cost of storing MMX registers
334 in SImode and DImode */
335 2, /* cost of moving SSE register */
336 {4, 8, 16}, /* cost of loading SSE registers
337 in SImode, DImode and TImode */
338 {4, 8, 16}, /* cost of storing SSE registers
339 in SImode, DImode and TImode */
340 3, /* MMX or SSE register to integer */
341 8, /* size of l1 cache. */
342 8, /* size of l2 cache */
343 0, /* size of prefetch block */
344 0, /* number of parallel prefetches */
346 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
347 COSTS_N_INSNS (3), /* cost of FMUL instruction. */
348 COSTS_N_INSNS (39), /* cost of FDIV instruction. */
349 COSTS_N_INSNS (1), /* cost of FABS instruction. */
350 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
351 COSTS_N_INSNS (70), /* cost of FSQRT instruction. */
352 {{libcall
, {{256, rep_prefix_4_byte
, false}, {-1, libcall
, false}}},
353 DUMMY_STRINGOP_ALGS
},
354 {{libcall
, {{-1, rep_prefix_4_byte
, false}}},
355 DUMMY_STRINGOP_ALGS
},
356 1, /* scalar_stmt_cost. */
357 1, /* scalar load_cost. */
358 1, /* scalar_store_cost. */
359 1, /* vec_stmt_cost. */
360 1, /* vec_to_scalar_cost. */
361 1, /* scalar_to_vec_cost. */
362 1, /* vec_align_load_cost. */
363 2, /* vec_unalign_load_cost. */
364 1, /* vec_store_cost. */
365 3, /* cond_taken_branch_cost. */
366 1, /* cond_not_taken_branch_cost. */
370 struct processor_costs pentiumpro_cost
= {
371 COSTS_N_INSNS (1), /* cost of an add instruction */
372 COSTS_N_INSNS (1), /* cost of a lea instruction */
373 COSTS_N_INSNS (1), /* variable shift costs */
374 COSTS_N_INSNS (1), /* constant shift costs */
375 {COSTS_N_INSNS (4), /* cost of starting multiply for QI */
376 COSTS_N_INSNS (4), /* HI */
377 COSTS_N_INSNS (4), /* SI */
378 COSTS_N_INSNS (4), /* DI */
379 COSTS_N_INSNS (4)}, /* other */
380 0, /* cost of multiply per each bit set */
381 {COSTS_N_INSNS (17), /* cost of a divide/mod for QI */
382 COSTS_N_INSNS (17), /* HI */
383 COSTS_N_INSNS (17), /* SI */
384 COSTS_N_INSNS (17), /* DI */
385 COSTS_N_INSNS (17)}, /* other */
386 COSTS_N_INSNS (1), /* cost of movsx */
387 COSTS_N_INSNS (1), /* cost of movzx */
388 8, /* "large" insn */
390 2, /* cost for loading QImode using movzbl */
391 {4, 4, 4}, /* cost of loading integer registers
392 in QImode, HImode and SImode.
393 Relative to reg-reg move (2). */
394 {2, 2, 2}, /* cost of storing integer registers */
395 2, /* cost of reg,reg fld/fst */
396 {2, 2, 6}, /* cost of loading fp registers
397 in SFmode, DFmode and XFmode */
398 {4, 4, 6}, /* cost of storing fp registers
399 in SFmode, DFmode and XFmode */
400 2, /* cost of moving MMX register */
401 {2, 2}, /* cost of loading MMX registers
402 in SImode and DImode */
403 {2, 2}, /* cost of storing MMX registers
404 in SImode and DImode */
405 2, /* cost of moving SSE register */
406 {2, 2, 8}, /* cost of loading SSE registers
407 in SImode, DImode and TImode */
408 {2, 2, 8}, /* cost of storing SSE registers
409 in SImode, DImode and TImode */
410 3, /* MMX or SSE register to integer */
411 8, /* size of l1 cache. */
412 256, /* size of l2 cache */
413 32, /* size of prefetch block */
414 6, /* number of parallel prefetches */
416 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
417 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
418 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
419 COSTS_N_INSNS (2), /* cost of FABS instruction. */
420 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
421 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
422 /* PentiumPro has optimized rep instructions for blocks aligned by 8 bytes
423 (we ensure the alignment). For small blocks inline loop is still a
424 noticeable win, for bigger blocks either rep movsl or rep movsb is
425 way to go. Rep movsb has apparently more expensive startup time in CPU,
426 but after 4K the difference is down in the noise. */
427 {{rep_prefix_4_byte
, {{128, loop
, false}, {1024, unrolled_loop
, false},
428 {8192, rep_prefix_4_byte
, false},
429 {-1, rep_prefix_1_byte
, false}}},
430 DUMMY_STRINGOP_ALGS
},
431 {{rep_prefix_4_byte
, {{1024, unrolled_loop
, false},
432 {8192, rep_prefix_4_byte
, false},
433 {-1, libcall
, false}}},
434 DUMMY_STRINGOP_ALGS
},
435 1, /* scalar_stmt_cost. */
436 1, /* scalar load_cost. */
437 1, /* scalar_store_cost. */
438 1, /* vec_stmt_cost. */
439 1, /* vec_to_scalar_cost. */
440 1, /* scalar_to_vec_cost. */
441 1, /* vec_align_load_cost. */
442 2, /* vec_unalign_load_cost. */
443 1, /* vec_store_cost. */
444 3, /* cond_taken_branch_cost. */
445 1, /* cond_not_taken_branch_cost. */
449 struct processor_costs geode_cost
= {
450 COSTS_N_INSNS (1), /* cost of an add instruction */
451 COSTS_N_INSNS (1), /* cost of a lea instruction */
452 COSTS_N_INSNS (2), /* variable shift costs */
453 COSTS_N_INSNS (1), /* constant shift costs */
454 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
455 COSTS_N_INSNS (4), /* HI */
456 COSTS_N_INSNS (7), /* SI */
457 COSTS_N_INSNS (7), /* DI */
458 COSTS_N_INSNS (7)}, /* other */
459 0, /* cost of multiply per each bit set */
460 {COSTS_N_INSNS (15), /* cost of a divide/mod for QI */
461 COSTS_N_INSNS (23), /* HI */
462 COSTS_N_INSNS (39), /* SI */
463 COSTS_N_INSNS (39), /* DI */
464 COSTS_N_INSNS (39)}, /* other */
465 COSTS_N_INSNS (1), /* cost of movsx */
466 COSTS_N_INSNS (1), /* cost of movzx */
467 8, /* "large" insn */
469 1, /* cost for loading QImode using movzbl */
470 {1, 1, 1}, /* cost of loading integer registers
471 in QImode, HImode and SImode.
472 Relative to reg-reg move (2). */
473 {1, 1, 1}, /* cost of storing integer registers */
474 1, /* cost of reg,reg fld/fst */
475 {1, 1, 1}, /* cost of loading fp registers
476 in SFmode, DFmode and XFmode */
477 {4, 6, 6}, /* cost of storing fp registers
478 in SFmode, DFmode and XFmode */
480 1, /* cost of moving MMX register */
481 {1, 1}, /* cost of loading MMX registers
482 in SImode and DImode */
483 {1, 1}, /* cost of storing MMX registers
484 in SImode and DImode */
485 1, /* cost of moving SSE register */
486 {1, 1, 1}, /* cost of loading SSE registers
487 in SImode, DImode and TImode */
488 {1, 1, 1}, /* cost of storing SSE registers
489 in SImode, DImode and TImode */
490 1, /* MMX or SSE register to integer */
491 64, /* size of l1 cache. */
492 128, /* size of l2 cache. */
493 32, /* size of prefetch block */
494 1, /* number of parallel prefetches */
496 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
497 COSTS_N_INSNS (11), /* cost of FMUL instruction. */
498 COSTS_N_INSNS (47), /* cost of FDIV instruction. */
499 COSTS_N_INSNS (1), /* cost of FABS instruction. */
500 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
501 COSTS_N_INSNS (54), /* cost of FSQRT instruction. */
502 {{libcall
, {{256, rep_prefix_4_byte
, false}, {-1, libcall
, false}}},
503 DUMMY_STRINGOP_ALGS
},
504 {{libcall
, {{256, rep_prefix_4_byte
, false}, {-1, libcall
, false}}},
505 DUMMY_STRINGOP_ALGS
},
506 1, /* scalar_stmt_cost. */
507 1, /* scalar load_cost. */
508 1, /* scalar_store_cost. */
509 1, /* vec_stmt_cost. */
510 1, /* vec_to_scalar_cost. */
511 1, /* scalar_to_vec_cost. */
512 1, /* vec_align_load_cost. */
513 2, /* vec_unalign_load_cost. */
514 1, /* vec_store_cost. */
515 3, /* cond_taken_branch_cost. */
516 1, /* cond_not_taken_branch_cost. */
520 struct processor_costs k6_cost
= {
521 COSTS_N_INSNS (1), /* cost of an add instruction */
522 COSTS_N_INSNS (2), /* cost of a lea instruction */
523 COSTS_N_INSNS (1), /* variable shift costs */
524 COSTS_N_INSNS (1), /* constant shift costs */
525 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
526 COSTS_N_INSNS (3), /* HI */
527 COSTS_N_INSNS (3), /* SI */
528 COSTS_N_INSNS (3), /* DI */
529 COSTS_N_INSNS (3)}, /* other */
530 0, /* cost of multiply per each bit set */
531 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
532 COSTS_N_INSNS (18), /* HI */
533 COSTS_N_INSNS (18), /* SI */
534 COSTS_N_INSNS (18), /* DI */
535 COSTS_N_INSNS (18)}, /* other */
536 COSTS_N_INSNS (2), /* cost of movsx */
537 COSTS_N_INSNS (2), /* cost of movzx */
538 8, /* "large" insn */
540 3, /* cost for loading QImode using movzbl */
541 {4, 5, 4}, /* cost of loading integer registers
542 in QImode, HImode and SImode.
543 Relative to reg-reg move (2). */
544 {2, 3, 2}, /* cost of storing integer registers */
545 4, /* cost of reg,reg fld/fst */
546 {6, 6, 6}, /* cost of loading fp registers
547 in SFmode, DFmode and XFmode */
548 {4, 4, 4}, /* cost of storing fp registers
549 in SFmode, DFmode and XFmode */
550 2, /* cost of moving MMX register */
551 {2, 2}, /* cost of loading MMX registers
552 in SImode and DImode */
553 {2, 2}, /* cost of storing MMX registers
554 in SImode and DImode */
555 2, /* cost of moving SSE register */
556 {2, 2, 8}, /* cost of loading SSE registers
557 in SImode, DImode and TImode */
558 {2, 2, 8}, /* cost of storing SSE registers
559 in SImode, DImode and TImode */
560 6, /* MMX or SSE register to integer */
561 32, /* size of l1 cache. */
562 32, /* size of l2 cache. Some models
563 have integrated l2 cache, but
564 optimizing for k6 is not important
565 enough to worry about that. */
566 32, /* size of prefetch block */
567 1, /* number of parallel prefetches */
569 COSTS_N_INSNS (2), /* cost of FADD and FSUB insns. */
570 COSTS_N_INSNS (2), /* cost of FMUL instruction. */
571 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
572 COSTS_N_INSNS (2), /* cost of FABS instruction. */
573 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
574 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
575 {{libcall
, {{256, rep_prefix_4_byte
, false}, {-1, libcall
, false}}},
576 DUMMY_STRINGOP_ALGS
},
577 {{libcall
, {{256, rep_prefix_4_byte
, false}, {-1, libcall
, false}}},
578 DUMMY_STRINGOP_ALGS
},
579 1, /* scalar_stmt_cost. */
580 1, /* scalar load_cost. */
581 1, /* scalar_store_cost. */
582 1, /* vec_stmt_cost. */
583 1, /* vec_to_scalar_cost. */
584 1, /* scalar_to_vec_cost. */
585 1, /* vec_align_load_cost. */
586 2, /* vec_unalign_load_cost. */
587 1, /* vec_store_cost. */
588 3, /* cond_taken_branch_cost. */
589 1, /* cond_not_taken_branch_cost. */
593 struct processor_costs athlon_cost
= {
594 COSTS_N_INSNS (1), /* cost of an add instruction */
595 COSTS_N_INSNS (2), /* cost of a lea instruction */
596 COSTS_N_INSNS (1), /* variable shift costs */
597 COSTS_N_INSNS (1), /* constant shift costs */
598 {COSTS_N_INSNS (5), /* cost of starting multiply for QI */
599 COSTS_N_INSNS (5), /* HI */
600 COSTS_N_INSNS (5), /* SI */
601 COSTS_N_INSNS (5), /* DI */
602 COSTS_N_INSNS (5)}, /* other */
603 0, /* cost of multiply per each bit set */
604 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
605 COSTS_N_INSNS (26), /* HI */
606 COSTS_N_INSNS (42), /* SI */
607 COSTS_N_INSNS (74), /* DI */
608 COSTS_N_INSNS (74)}, /* other */
609 COSTS_N_INSNS (1), /* cost of movsx */
610 COSTS_N_INSNS (1), /* cost of movzx */
611 8, /* "large" insn */
613 4, /* cost for loading QImode using movzbl */
614 {3, 4, 3}, /* cost of loading integer registers
615 in QImode, HImode and SImode.
616 Relative to reg-reg move (2). */
617 {3, 4, 3}, /* cost of storing integer registers */
618 4, /* cost of reg,reg fld/fst */
619 {4, 4, 12}, /* cost of loading fp registers
620 in SFmode, DFmode and XFmode */
621 {6, 6, 8}, /* cost of storing fp registers
622 in SFmode, DFmode and XFmode */
623 2, /* cost of moving MMX register */
624 {4, 4}, /* cost of loading MMX registers
625 in SImode and DImode */
626 {4, 4}, /* cost of storing MMX registers
627 in SImode and DImode */
628 2, /* cost of moving SSE register */
629 {4, 4, 6}, /* cost of loading SSE registers
630 in SImode, DImode and TImode */
631 {4, 4, 5}, /* cost of storing SSE registers
632 in SImode, DImode and TImode */
633 5, /* MMX or SSE register to integer */
634 64, /* size of l1 cache. */
635 256, /* size of l2 cache. */
636 64, /* size of prefetch block */
637 6, /* number of parallel prefetches */
639 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
640 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
641 COSTS_N_INSNS (24), /* cost of FDIV instruction. */
642 COSTS_N_INSNS (2), /* cost of FABS instruction. */
643 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
644 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
645 /* For some reason, Athlon deals better with REP prefix (relative to loops)
646 compared to K8. Alignment becomes important after 8 bytes for memcpy and
647 128 bytes for memset. */
648 {{libcall
, {{2048, rep_prefix_4_byte
, false}, {-1, libcall
, false}}},
649 DUMMY_STRINGOP_ALGS
},
650 {{libcall
, {{2048, rep_prefix_4_byte
, false}, {-1, libcall
, false}}},
651 DUMMY_STRINGOP_ALGS
},
652 1, /* scalar_stmt_cost. */
653 1, /* scalar load_cost. */
654 1, /* scalar_store_cost. */
655 1, /* vec_stmt_cost. */
656 1, /* vec_to_scalar_cost. */
657 1, /* scalar_to_vec_cost. */
658 1, /* vec_align_load_cost. */
659 2, /* vec_unalign_load_cost. */
660 1, /* vec_store_cost. */
661 3, /* cond_taken_branch_cost. */
662 1, /* cond_not_taken_branch_cost. */
666 struct processor_costs k8_cost
= {
667 COSTS_N_INSNS (1), /* cost of an add instruction */
668 COSTS_N_INSNS (2), /* cost of a lea instruction */
669 COSTS_N_INSNS (1), /* variable shift costs */
670 COSTS_N_INSNS (1), /* constant shift costs */
671 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
672 COSTS_N_INSNS (4), /* HI */
673 COSTS_N_INSNS (3), /* SI */
674 COSTS_N_INSNS (4), /* DI */
675 COSTS_N_INSNS (5)}, /* other */
676 0, /* cost of multiply per each bit set */
677 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
678 COSTS_N_INSNS (26), /* HI */
679 COSTS_N_INSNS (42), /* SI */
680 COSTS_N_INSNS (74), /* DI */
681 COSTS_N_INSNS (74)}, /* other */
682 COSTS_N_INSNS (1), /* cost of movsx */
683 COSTS_N_INSNS (1), /* cost of movzx */
684 8, /* "large" insn */
686 4, /* cost for loading QImode using movzbl */
687 {3, 4, 3}, /* cost of loading integer registers
688 in QImode, HImode and SImode.
689 Relative to reg-reg move (2). */
690 {3, 4, 3}, /* cost of storing integer registers */
691 4, /* cost of reg,reg fld/fst */
692 {4, 4, 12}, /* cost of loading fp registers
693 in SFmode, DFmode and XFmode */
694 {6, 6, 8}, /* cost of storing fp registers
695 in SFmode, DFmode and XFmode */
696 2, /* cost of moving MMX register */
697 {3, 3}, /* cost of loading MMX registers
698 in SImode and DImode */
699 {4, 4}, /* cost of storing MMX registers
700 in SImode and DImode */
701 2, /* cost of moving SSE register */
702 {4, 3, 6}, /* cost of loading SSE registers
703 in SImode, DImode and TImode */
704 {4, 4, 5}, /* cost of storing SSE registers
705 in SImode, DImode and TImode */
706 5, /* MMX or SSE register to integer */
707 64, /* size of l1 cache. */
708 512, /* size of l2 cache. */
709 64, /* size of prefetch block */
710 /* New AMD processors never drop prefetches; if they cannot be performed
711 immediately, they are queued. We set number of simultaneous prefetches
712 to a large constant to reflect this (it probably is not a good idea not
713 to limit number of prefetches at all, as their execution also takes some
715 100, /* number of parallel prefetches */
717 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
718 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
719 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
720 COSTS_N_INSNS (2), /* cost of FABS instruction. */
721 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
722 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
723 /* K8 has optimized REP instruction for medium sized blocks, but for very
724 small blocks it is better to use loop. For large blocks, libcall can
725 do nontemporary accesses and beat inline considerably. */
726 {{libcall
, {{6, loop
, false}, {14, unrolled_loop
, false},
727 {-1, rep_prefix_4_byte
, false}}},
728 {libcall
, {{16, loop
, false}, {8192, rep_prefix_8_byte
, false},
729 {-1, libcall
, false}}}},
730 {{libcall
, {{8, loop
, false}, {24, unrolled_loop
, false},
731 {2048, rep_prefix_4_byte
, false}, {-1, libcall
, false}}},
732 {libcall
, {{48, unrolled_loop
, false},
733 {8192, rep_prefix_8_byte
, false}, {-1, libcall
, false}}}},
734 4, /* scalar_stmt_cost. */
735 2, /* scalar load_cost. */
736 2, /* scalar_store_cost. */
737 5, /* vec_stmt_cost. */
738 0, /* vec_to_scalar_cost. */
739 2, /* scalar_to_vec_cost. */
740 2, /* vec_align_load_cost. */
741 3, /* vec_unalign_load_cost. */
742 3, /* vec_store_cost. */
743 3, /* cond_taken_branch_cost. */
744 2, /* cond_not_taken_branch_cost. */
747 struct processor_costs amdfam10_cost
= {
748 COSTS_N_INSNS (1), /* cost of an add instruction */
749 COSTS_N_INSNS (2), /* cost of a lea instruction */
750 COSTS_N_INSNS (1), /* variable shift costs */
751 COSTS_N_INSNS (1), /* constant shift costs */
752 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
753 COSTS_N_INSNS (4), /* HI */
754 COSTS_N_INSNS (3), /* SI */
755 COSTS_N_INSNS (4), /* DI */
756 COSTS_N_INSNS (5)}, /* other */
757 0, /* cost of multiply per each bit set */
758 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
759 COSTS_N_INSNS (35), /* HI */
760 COSTS_N_INSNS (51), /* SI */
761 COSTS_N_INSNS (83), /* DI */
762 COSTS_N_INSNS (83)}, /* other */
763 COSTS_N_INSNS (1), /* cost of movsx */
764 COSTS_N_INSNS (1), /* cost of movzx */
765 8, /* "large" insn */
767 4, /* cost for loading QImode using movzbl */
768 {3, 4, 3}, /* cost of loading integer registers
769 in QImode, HImode and SImode.
770 Relative to reg-reg move (2). */
771 {3, 4, 3}, /* cost of storing integer registers */
772 4, /* cost of reg,reg fld/fst */
773 {4, 4, 12}, /* cost of loading fp registers
774 in SFmode, DFmode and XFmode */
775 {6, 6, 8}, /* cost of storing fp registers
776 in SFmode, DFmode and XFmode */
777 2, /* cost of moving MMX register */
778 {3, 3}, /* cost of loading MMX registers
779 in SImode and DImode */
780 {4, 4}, /* cost of storing MMX registers
781 in SImode and DImode */
782 2, /* cost of moving SSE register */
783 {4, 4, 3}, /* cost of loading SSE registers
784 in SImode, DImode and TImode */
785 {4, 4, 5}, /* cost of storing SSE registers
786 in SImode, DImode and TImode */
787 3, /* MMX or SSE register to integer */
789 MOVD reg64, xmmreg Double FSTORE 4
790 MOVD reg32, xmmreg Double FSTORE 4
792 MOVD reg64, xmmreg Double FADD 3
794 MOVD reg32, xmmreg Double FADD 3
796 64, /* size of l1 cache. */
797 512, /* size of l2 cache. */
798 64, /* size of prefetch block */
799 /* New AMD processors never drop prefetches; if they cannot be performed
800 immediately, they are queued. We set number of simultaneous prefetches
801 to a large constant to reflect this (it probably is not a good idea not
802 to limit number of prefetches at all, as their execution also takes some
804 100, /* number of parallel prefetches */
806 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
807 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
808 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
809 COSTS_N_INSNS (2), /* cost of FABS instruction. */
810 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
811 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
813 /* AMDFAM10 has optimized REP instruction for medium sized blocks, but for
814 very small blocks it is better to use loop. For large blocks, libcall can
815 do nontemporary accesses and beat inline considerably. */
816 {{libcall
, {{6, loop
, false}, {14, unrolled_loop
, false},
817 {-1, rep_prefix_4_byte
, false}}},
818 {libcall
, {{16, loop
, false}, {8192, rep_prefix_8_byte
, false},
819 {-1, libcall
, false}}}},
820 {{libcall
, {{8, loop
, false}, {24, unrolled_loop
, false},
821 {2048, rep_prefix_4_byte
, false}, {-1, libcall
, false}}},
822 {libcall
, {{48, unrolled_loop
, false}, {8192, rep_prefix_8_byte
, false},
823 {-1, libcall
, false}}}},
824 4, /* scalar_stmt_cost. */
825 2, /* scalar load_cost. */
826 2, /* scalar_store_cost. */
827 6, /* vec_stmt_cost. */
828 0, /* vec_to_scalar_cost. */
829 2, /* scalar_to_vec_cost. */
830 2, /* vec_align_load_cost. */
831 2, /* vec_unalign_load_cost. */
832 2, /* vec_store_cost. */
833 2, /* cond_taken_branch_cost. */
834 1, /* cond_not_taken_branch_cost. */
837 struct processor_costs bdver1_cost
= {
838 COSTS_N_INSNS (1), /* cost of an add instruction */
839 COSTS_N_INSNS (1), /* cost of a lea instruction */
840 COSTS_N_INSNS (1), /* variable shift costs */
841 COSTS_N_INSNS (1), /* constant shift costs */
842 {COSTS_N_INSNS (4), /* cost of starting multiply for QI */
843 COSTS_N_INSNS (4), /* HI */
844 COSTS_N_INSNS (4), /* SI */
845 COSTS_N_INSNS (6), /* DI */
846 COSTS_N_INSNS (6)}, /* other */
847 0, /* cost of multiply per each bit set */
848 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
849 COSTS_N_INSNS (35), /* HI */
850 COSTS_N_INSNS (51), /* SI */
851 COSTS_N_INSNS (83), /* DI */
852 COSTS_N_INSNS (83)}, /* other */
853 COSTS_N_INSNS (1), /* cost of movsx */
854 COSTS_N_INSNS (1), /* cost of movzx */
855 8, /* "large" insn */
857 4, /* cost for loading QImode using movzbl */
858 {5, 5, 4}, /* cost of loading integer registers
859 in QImode, HImode and SImode.
860 Relative to reg-reg move (2). */
861 {4, 4, 4}, /* cost of storing integer registers */
862 2, /* cost of reg,reg fld/fst */
863 {5, 5, 12}, /* cost of loading fp registers
864 in SFmode, DFmode and XFmode */
865 {4, 4, 8}, /* cost of storing fp registers
866 in SFmode, DFmode and XFmode */
867 2, /* cost of moving MMX register */
868 {4, 4}, /* cost of loading MMX registers
869 in SImode and DImode */
870 {4, 4}, /* cost of storing MMX registers
871 in SImode and DImode */
872 2, /* cost of moving SSE register */
873 {4, 4, 4}, /* cost of loading SSE registers
874 in SImode, DImode and TImode */
875 {4, 4, 4}, /* cost of storing SSE registers
876 in SImode, DImode and TImode */
877 2, /* MMX or SSE register to integer */
879 MOVD reg64, xmmreg Double FSTORE 4
880 MOVD reg32, xmmreg Double FSTORE 4
882 MOVD reg64, xmmreg Double FADD 3
884 MOVD reg32, xmmreg Double FADD 3
886 16, /* size of l1 cache. */
887 2048, /* size of l2 cache. */
888 64, /* size of prefetch block */
889 /* New AMD processors never drop prefetches; if they cannot be performed
890 immediately, they are queued. We set number of simultaneous prefetches
891 to a large constant to reflect this (it probably is not a good idea not
892 to limit number of prefetches at all, as their execution also takes some
894 100, /* number of parallel prefetches */
896 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
897 COSTS_N_INSNS (6), /* cost of FMUL instruction. */
898 COSTS_N_INSNS (42), /* cost of FDIV instruction. */
899 COSTS_N_INSNS (2), /* cost of FABS instruction. */
900 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
901 COSTS_N_INSNS (52), /* cost of FSQRT instruction. */
903 /* BDVER1 has optimized REP instruction for medium sized blocks, but for
904 very small blocks it is better to use loop. For large blocks, libcall
905 can do nontemporary accesses and beat inline considerably. */
906 {{libcall
, {{6, loop
, false}, {14, unrolled_loop
, false},
907 {-1, rep_prefix_4_byte
, false}}},
908 {libcall
, {{16, loop
, false}, {8192, rep_prefix_8_byte
, false},
909 {-1, libcall
, false}}}},
910 {{libcall
, {{8, loop
, false}, {24, unrolled_loop
, false},
911 {2048, rep_prefix_4_byte
, false}, {-1, libcall
, false}}},
912 {libcall
, {{48, unrolled_loop
, false}, {8192, rep_prefix_8_byte
, false},
913 {-1, libcall
, false}}}},
914 6, /* scalar_stmt_cost. */
915 4, /* scalar load_cost. */
916 4, /* scalar_store_cost. */
917 6, /* vec_stmt_cost. */
918 0, /* vec_to_scalar_cost. */
919 2, /* scalar_to_vec_cost. */
920 4, /* vec_align_load_cost. */
921 4, /* vec_unalign_load_cost. */
922 4, /* vec_store_cost. */
923 2, /* cond_taken_branch_cost. */
924 1, /* cond_not_taken_branch_cost. */
927 struct processor_costs bdver2_cost
= {
928 COSTS_N_INSNS (1), /* cost of an add instruction */
929 COSTS_N_INSNS (1), /* cost of a lea instruction */
930 COSTS_N_INSNS (1), /* variable shift costs */
931 COSTS_N_INSNS (1), /* constant shift costs */
932 {COSTS_N_INSNS (4), /* cost of starting multiply for QI */
933 COSTS_N_INSNS (4), /* HI */
934 COSTS_N_INSNS (4), /* SI */
935 COSTS_N_INSNS (6), /* DI */
936 COSTS_N_INSNS (6)}, /* other */
937 0, /* cost of multiply per each bit set */
938 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
939 COSTS_N_INSNS (35), /* HI */
940 COSTS_N_INSNS (51), /* SI */
941 COSTS_N_INSNS (83), /* DI */
942 COSTS_N_INSNS (83)}, /* other */
943 COSTS_N_INSNS (1), /* cost of movsx */
944 COSTS_N_INSNS (1), /* cost of movzx */
945 8, /* "large" insn */
947 4, /* cost for loading QImode using movzbl */
948 {5, 5, 4}, /* cost of loading integer registers
949 in QImode, HImode and SImode.
950 Relative to reg-reg move (2). */
951 {4, 4, 4}, /* cost of storing integer registers */
952 2, /* cost of reg,reg fld/fst */
953 {5, 5, 12}, /* cost of loading fp registers
954 in SFmode, DFmode and XFmode */
955 {4, 4, 8}, /* cost of storing fp registers
956 in SFmode, DFmode and XFmode */
957 2, /* cost of moving MMX register */
958 {4, 4}, /* cost of loading MMX registers
959 in SImode and DImode */
960 {4, 4}, /* cost of storing MMX registers
961 in SImode and DImode */
962 2, /* cost of moving SSE register */
963 {4, 4, 4}, /* cost of loading SSE registers
964 in SImode, DImode and TImode */
965 {4, 4, 4}, /* cost of storing SSE registers
966 in SImode, DImode and TImode */
967 2, /* MMX or SSE register to integer */
969 MOVD reg64, xmmreg Double FSTORE 4
970 MOVD reg32, xmmreg Double FSTORE 4
972 MOVD reg64, xmmreg Double FADD 3
974 MOVD reg32, xmmreg Double FADD 3
976 16, /* size of l1 cache. */
977 2048, /* size of l2 cache. */
978 64, /* size of prefetch block */
979 /* New AMD processors never drop prefetches; if they cannot be performed
980 immediately, they are queued. We set number of simultaneous prefetches
981 to a large constant to reflect this (it probably is not a good idea not
982 to limit number of prefetches at all, as their execution also takes some
984 100, /* number of parallel prefetches */
986 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
987 COSTS_N_INSNS (6), /* cost of FMUL instruction. */
988 COSTS_N_INSNS (42), /* cost of FDIV instruction. */
989 COSTS_N_INSNS (2), /* cost of FABS instruction. */
990 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
991 COSTS_N_INSNS (52), /* cost of FSQRT instruction. */
993 /* BDVER2 has optimized REP instruction for medium sized blocks, but for
994 very small blocks it is better to use loop. For large blocks, libcall
995 can do nontemporary accesses and beat inline considerably. */
996 {{libcall
, {{6, loop
, false}, {14, unrolled_loop
, false},
997 {-1, rep_prefix_4_byte
, false}}},
998 {libcall
, {{16, loop
, false}, {8192, rep_prefix_8_byte
, false},
999 {-1, libcall
, false}}}},
1000 {{libcall
, {{8, loop
, false}, {24, unrolled_loop
, false},
1001 {2048, rep_prefix_4_byte
, false}, {-1, libcall
, false}}},
1002 {libcall
, {{48, unrolled_loop
, false}, {8192, rep_prefix_8_byte
, false},
1003 {-1, libcall
, false}}}},
1004 6, /* scalar_stmt_cost. */
1005 4, /* scalar load_cost. */
1006 4, /* scalar_store_cost. */
1007 6, /* vec_stmt_cost. */
1008 0, /* vec_to_scalar_cost. */
1009 2, /* scalar_to_vec_cost. */
1010 4, /* vec_align_load_cost. */
1011 4, /* vec_unalign_load_cost. */
1012 4, /* vec_store_cost. */
1013 2, /* cond_taken_branch_cost. */
1014 1, /* cond_not_taken_branch_cost. */
1017 struct processor_costs bdver3_cost
= {
1018 COSTS_N_INSNS (1), /* cost of an add instruction */
1019 COSTS_N_INSNS (1), /* cost of a lea instruction */
1020 COSTS_N_INSNS (1), /* variable shift costs */
1021 COSTS_N_INSNS (1), /* constant shift costs */
1022 {COSTS_N_INSNS (4), /* cost of starting multiply for QI */
1023 COSTS_N_INSNS (4), /* HI */
1024 COSTS_N_INSNS (4), /* SI */
1025 COSTS_N_INSNS (6), /* DI */
1026 COSTS_N_INSNS (6)}, /* other */
1027 0, /* cost of multiply per each bit set */
1028 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
1029 COSTS_N_INSNS (35), /* HI */
1030 COSTS_N_INSNS (51), /* SI */
1031 COSTS_N_INSNS (83), /* DI */
1032 COSTS_N_INSNS (83)}, /* other */
1033 COSTS_N_INSNS (1), /* cost of movsx */
1034 COSTS_N_INSNS (1), /* cost of movzx */
1035 8, /* "large" insn */
1037 4, /* cost for loading QImode using movzbl */
1038 {5, 5, 4}, /* cost of loading integer registers
1039 in QImode, HImode and SImode.
1040 Relative to reg-reg move (2). */
1041 {4, 4, 4}, /* cost of storing integer registers */
1042 2, /* cost of reg,reg fld/fst */
1043 {5, 5, 12}, /* cost of loading fp registers
1044 in SFmode, DFmode and XFmode */
1045 {4, 4, 8}, /* cost of storing fp registers
1046 in SFmode, DFmode and XFmode */
1047 2, /* cost of moving MMX register */
1048 {4, 4}, /* cost of loading MMX registers
1049 in SImode and DImode */
1050 {4, 4}, /* cost of storing MMX registers
1051 in SImode and DImode */
1052 2, /* cost of moving SSE register */
1053 {4, 4, 4}, /* cost of loading SSE registers
1054 in SImode, DImode and TImode */
1055 {4, 4, 4}, /* cost of storing SSE registers
1056 in SImode, DImode and TImode */
1057 2, /* MMX or SSE register to integer */
1058 16, /* size of l1 cache. */
1059 2048, /* size of l2 cache. */
1060 64, /* size of prefetch block */
1061 /* New AMD processors never drop prefetches; if they cannot be performed
1062 immediately, they are queued. We set number of simultaneous prefetches
1063 to a large constant to reflect this (it probably is not a good idea not
1064 to limit number of prefetches at all, as their execution also takes some
1066 100, /* number of parallel prefetches */
1067 2, /* Branch cost */
1068 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
1069 COSTS_N_INSNS (6), /* cost of FMUL instruction. */
1070 COSTS_N_INSNS (42), /* cost of FDIV instruction. */
1071 COSTS_N_INSNS (2), /* cost of FABS instruction. */
1072 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
1073 COSTS_N_INSNS (52), /* cost of FSQRT instruction. */
1075 /* BDVER3 has optimized REP instruction for medium sized blocks, but for
1076 very small blocks it is better to use loop. For large blocks, libcall
1077 can do nontemporary accesses and beat inline considerably. */
1078 {{libcall
, {{6, loop
, false}, {14, unrolled_loop
, false},
1079 {-1, rep_prefix_4_byte
, false}}},
1080 {libcall
, {{16, loop
, false}, {8192, rep_prefix_8_byte
, false},
1081 {-1, libcall
, false}}}},
1082 {{libcall
, {{8, loop
, false}, {24, unrolled_loop
, false},
1083 {2048, rep_prefix_4_byte
, false}, {-1, libcall
, false}}},
1084 {libcall
, {{48, unrolled_loop
, false}, {8192, rep_prefix_8_byte
, false},
1085 {-1, libcall
, false}}}},
1086 6, /* scalar_stmt_cost. */
1087 4, /* scalar load_cost. */
1088 4, /* scalar_store_cost. */
1089 6, /* vec_stmt_cost. */
1090 0, /* vec_to_scalar_cost. */
1091 2, /* scalar_to_vec_cost. */
1092 4, /* vec_align_load_cost. */
1093 4, /* vec_unalign_load_cost. */
1094 4, /* vec_store_cost. */
1095 2, /* cond_taken_branch_cost. */
1096 1, /* cond_not_taken_branch_cost. */
1099 struct processor_costs btver1_cost
= {
1100 COSTS_N_INSNS (1), /* cost of an add instruction */
1101 COSTS_N_INSNS (2), /* cost of a lea instruction */
1102 COSTS_N_INSNS (1), /* variable shift costs */
1103 COSTS_N_INSNS (1), /* constant shift costs */
1104 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1105 COSTS_N_INSNS (4), /* HI */
1106 COSTS_N_INSNS (3), /* SI */
1107 COSTS_N_INSNS (4), /* DI */
1108 COSTS_N_INSNS (5)}, /* other */
1109 0, /* cost of multiply per each bit set */
1110 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
1111 COSTS_N_INSNS (35), /* HI */
1112 COSTS_N_INSNS (51), /* SI */
1113 COSTS_N_INSNS (83), /* DI */
1114 COSTS_N_INSNS (83)}, /* other */
1115 COSTS_N_INSNS (1), /* cost of movsx */
1116 COSTS_N_INSNS (1), /* cost of movzx */
1117 8, /* "large" insn */
1119 4, /* cost for loading QImode using movzbl */
1120 {3, 4, 3}, /* cost of loading integer registers
1121 in QImode, HImode and SImode.
1122 Relative to reg-reg move (2). */
1123 {3, 4, 3}, /* cost of storing integer registers */
1124 4, /* cost of reg,reg fld/fst */
1125 {4, 4, 12}, /* cost of loading fp registers
1126 in SFmode, DFmode and XFmode */
1127 {6, 6, 8}, /* cost of storing fp registers
1128 in SFmode, DFmode and XFmode */
1129 2, /* cost of moving MMX register */
1130 {3, 3}, /* cost of loading MMX registers
1131 in SImode and DImode */
1132 {4, 4}, /* cost of storing MMX registers
1133 in SImode and DImode */
1134 2, /* cost of moving SSE register */
1135 {4, 4, 3}, /* cost of loading SSE registers
1136 in SImode, DImode and TImode */
1137 {4, 4, 5}, /* cost of storing SSE registers
1138 in SImode, DImode and TImode */
1139 3, /* MMX or SSE register to integer */
1141 MOVD reg64, xmmreg Double FSTORE 4
1142 MOVD reg32, xmmreg Double FSTORE 4
1144 MOVD reg64, xmmreg Double FADD 3
1146 MOVD reg32, xmmreg Double FADD 3
1148 32, /* size of l1 cache. */
1149 512, /* size of l2 cache. */
1150 64, /* size of prefetch block */
1151 100, /* number of parallel prefetches */
1152 2, /* Branch cost */
1153 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
1154 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
1155 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
1156 COSTS_N_INSNS (2), /* cost of FABS instruction. */
1157 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
1158 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
1160 /* BTVER1 has optimized REP instruction for medium sized blocks, but for
1161 very small blocks it is better to use loop. For large blocks, libcall can
1162 do nontemporary accesses and beat inline considerably. */
1163 {{libcall
, {{6, loop
, false}, {14, unrolled_loop
, false},
1164 {-1, rep_prefix_4_byte
, false}}},
1165 {libcall
, {{16, loop
, false}, {8192, rep_prefix_8_byte
, false},
1166 {-1, libcall
, false}}}},
1167 {{libcall
, {{8, loop
, false}, {24, unrolled_loop
, false},
1168 {2048, rep_prefix_4_byte
, false}, {-1, libcall
, false}}},
1169 {libcall
, {{48, unrolled_loop
, false}, {8192, rep_prefix_8_byte
, false},
1170 {-1, libcall
, false}}}},
1171 4, /* scalar_stmt_cost. */
1172 2, /* scalar load_cost. */
1173 2, /* scalar_store_cost. */
1174 6, /* vec_stmt_cost. */
1175 0, /* vec_to_scalar_cost. */
1176 2, /* scalar_to_vec_cost. */
1177 2, /* vec_align_load_cost. */
1178 2, /* vec_unalign_load_cost. */
1179 2, /* vec_store_cost. */
1180 2, /* cond_taken_branch_cost. */
1181 1, /* cond_not_taken_branch_cost. */
1184 struct processor_costs btver2_cost
= {
1185 COSTS_N_INSNS (1), /* cost of an add instruction */
1186 COSTS_N_INSNS (2), /* cost of a lea instruction */
1187 COSTS_N_INSNS (1), /* variable shift costs */
1188 COSTS_N_INSNS (1), /* constant shift costs */
1189 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1190 COSTS_N_INSNS (4), /* HI */
1191 COSTS_N_INSNS (3), /* SI */
1192 COSTS_N_INSNS (4), /* DI */
1193 COSTS_N_INSNS (5)}, /* other */
1194 0, /* cost of multiply per each bit set */
1195 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
1196 COSTS_N_INSNS (35), /* HI */
1197 COSTS_N_INSNS (51), /* SI */
1198 COSTS_N_INSNS (83), /* DI */
1199 COSTS_N_INSNS (83)}, /* other */
1200 COSTS_N_INSNS (1), /* cost of movsx */
1201 COSTS_N_INSNS (1), /* cost of movzx */
1202 8, /* "large" insn */
1204 4, /* cost for loading QImode using movzbl */
1205 {3, 4, 3}, /* cost of loading integer registers
1206 in QImode, HImode and SImode.
1207 Relative to reg-reg move (2). */
1208 {3, 4, 3}, /* cost of storing integer registers */
1209 4, /* cost of reg,reg fld/fst */
1210 {4, 4, 12}, /* cost of loading fp registers
1211 in SFmode, DFmode and XFmode */
1212 {6, 6, 8}, /* cost of storing fp registers
1213 in SFmode, DFmode and XFmode */
1214 2, /* cost of moving MMX register */
1215 {3, 3}, /* cost of loading MMX registers
1216 in SImode and DImode */
1217 {4, 4}, /* cost of storing MMX registers
1218 in SImode and DImode */
1219 2, /* cost of moving SSE register */
1220 {4, 4, 3}, /* cost of loading SSE registers
1221 in SImode, DImode and TImode */
1222 {4, 4, 5}, /* cost of storing SSE registers
1223 in SImode, DImode and TImode */
1224 3, /* MMX or SSE register to integer */
1226 MOVD reg64, xmmreg Double FSTORE 4
1227 MOVD reg32, xmmreg Double FSTORE 4
1229 MOVD reg64, xmmreg Double FADD 3
1231 MOVD reg32, xmmreg Double FADD 3
1233 32, /* size of l1 cache. */
1234 2048, /* size of l2 cache. */
1235 64, /* size of prefetch block */
1236 100, /* number of parallel prefetches */
1237 2, /* Branch cost */
1238 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
1239 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
1240 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
1241 COSTS_N_INSNS (2), /* cost of FABS instruction. */
1242 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
1243 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
1245 {{libcall
, {{6, loop
, false}, {14, unrolled_loop
, false},
1246 {-1, rep_prefix_4_byte
, false}}},
1247 {libcall
, {{16, loop
, false}, {8192, rep_prefix_8_byte
, false},
1248 {-1, libcall
, false}}}},
1249 {{libcall
, {{8, loop
, false}, {24, unrolled_loop
, false},
1250 {2048, rep_prefix_4_byte
, false}, {-1, libcall
, false}}},
1251 {libcall
, {{48, unrolled_loop
, false}, {8192, rep_prefix_8_byte
, false},
1252 {-1, libcall
, false}}}},
1253 4, /* scalar_stmt_cost. */
1254 2, /* scalar load_cost. */
1255 2, /* scalar_store_cost. */
1256 6, /* vec_stmt_cost. */
1257 0, /* vec_to_scalar_cost. */
1258 2, /* scalar_to_vec_cost. */
1259 2, /* vec_align_load_cost. */
1260 2, /* vec_unalign_load_cost. */
1261 2, /* vec_store_cost. */
1262 2, /* cond_taken_branch_cost. */
1263 1, /* cond_not_taken_branch_cost. */
1267 struct processor_costs pentium4_cost
= {
1268 COSTS_N_INSNS (1), /* cost of an add instruction */
1269 COSTS_N_INSNS (3), /* cost of a lea instruction */
1270 COSTS_N_INSNS (4), /* variable shift costs */
1271 COSTS_N_INSNS (4), /* constant shift costs */
1272 {COSTS_N_INSNS (15), /* cost of starting multiply for QI */
1273 COSTS_N_INSNS (15), /* HI */
1274 COSTS_N_INSNS (15), /* SI */
1275 COSTS_N_INSNS (15), /* DI */
1276 COSTS_N_INSNS (15)}, /* other */
1277 0, /* cost of multiply per each bit set */
1278 {COSTS_N_INSNS (56), /* cost of a divide/mod for QI */
1279 COSTS_N_INSNS (56), /* HI */
1280 COSTS_N_INSNS (56), /* SI */
1281 COSTS_N_INSNS (56), /* DI */
1282 COSTS_N_INSNS (56)}, /* other */
1283 COSTS_N_INSNS (1), /* cost of movsx */
1284 COSTS_N_INSNS (1), /* cost of movzx */
1285 16, /* "large" insn */
1287 2, /* cost for loading QImode using movzbl */
1288 {4, 5, 4}, /* cost of loading integer registers
1289 in QImode, HImode and SImode.
1290 Relative to reg-reg move (2). */
1291 {2, 3, 2}, /* cost of storing integer registers */
1292 2, /* cost of reg,reg fld/fst */
1293 {2, 2, 6}, /* cost of loading fp registers
1294 in SFmode, DFmode and XFmode */
1295 {4, 4, 6}, /* cost of storing fp registers
1296 in SFmode, DFmode and XFmode */
1297 2, /* cost of moving MMX register */
1298 {2, 2}, /* cost of loading MMX registers
1299 in SImode and DImode */
1300 {2, 2}, /* cost of storing MMX registers
1301 in SImode and DImode */
1302 12, /* cost of moving SSE register */
1303 {12, 12, 12}, /* cost of loading SSE registers
1304 in SImode, DImode and TImode */
1305 {2, 2, 8}, /* cost of storing SSE registers
1306 in SImode, DImode and TImode */
1307 10, /* MMX or SSE register to integer */
1308 8, /* size of l1 cache. */
1309 256, /* size of l2 cache. */
1310 64, /* size of prefetch block */
1311 6, /* number of parallel prefetches */
1312 2, /* Branch cost */
1313 COSTS_N_INSNS (5), /* cost of FADD and FSUB insns. */
1314 COSTS_N_INSNS (7), /* cost of FMUL instruction. */
1315 COSTS_N_INSNS (43), /* cost of FDIV instruction. */
1316 COSTS_N_INSNS (2), /* cost of FABS instruction. */
1317 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
1318 COSTS_N_INSNS (43), /* cost of FSQRT instruction. */
1319 {{libcall
, {{12, loop_1_byte
, false}, {-1, rep_prefix_4_byte
, false}}},
1320 DUMMY_STRINGOP_ALGS
},
1321 {{libcall
, {{6, loop_1_byte
, false}, {48, loop
, false},
1322 {20480, rep_prefix_4_byte
, false}, {-1, libcall
, false}}},
1323 DUMMY_STRINGOP_ALGS
},
1324 1, /* scalar_stmt_cost. */
1325 1, /* scalar load_cost. */
1326 1, /* scalar_store_cost. */
1327 1, /* vec_stmt_cost. */
1328 1, /* vec_to_scalar_cost. */
1329 1, /* scalar_to_vec_cost. */
1330 1, /* vec_align_load_cost. */
1331 2, /* vec_unalign_load_cost. */
1332 1, /* vec_store_cost. */
1333 3, /* cond_taken_branch_cost. */
1334 1, /* cond_not_taken_branch_cost. */
1338 struct processor_costs nocona_cost
= {
1339 COSTS_N_INSNS (1), /* cost of an add instruction */
1340 COSTS_N_INSNS (1), /* cost of a lea instruction */
1341 COSTS_N_INSNS (1), /* variable shift costs */
1342 COSTS_N_INSNS (1), /* constant shift costs */
1343 {COSTS_N_INSNS (10), /* cost of starting multiply for QI */
1344 COSTS_N_INSNS (10), /* HI */
1345 COSTS_N_INSNS (10), /* SI */
1346 COSTS_N_INSNS (10), /* DI */
1347 COSTS_N_INSNS (10)}, /* other */
1348 0, /* cost of multiply per each bit set */
1349 {COSTS_N_INSNS (66), /* cost of a divide/mod for QI */
1350 COSTS_N_INSNS (66), /* HI */
1351 COSTS_N_INSNS (66), /* SI */
1352 COSTS_N_INSNS (66), /* DI */
1353 COSTS_N_INSNS (66)}, /* other */
1354 COSTS_N_INSNS (1), /* cost of movsx */
1355 COSTS_N_INSNS (1), /* cost of movzx */
1356 16, /* "large" insn */
1357 17, /* MOVE_RATIO */
1358 4, /* cost for loading QImode using movzbl */
1359 {4, 4, 4}, /* cost of loading integer registers
1360 in QImode, HImode and SImode.
1361 Relative to reg-reg move (2). */
1362 {4, 4, 4}, /* cost of storing integer registers */
1363 3, /* cost of reg,reg fld/fst */
1364 {12, 12, 12}, /* cost of loading fp registers
1365 in SFmode, DFmode and XFmode */
1366 {4, 4, 4}, /* cost of storing fp registers
1367 in SFmode, DFmode and XFmode */
1368 6, /* cost of moving MMX register */
1369 {12, 12}, /* cost of loading MMX registers
1370 in SImode and DImode */
1371 {12, 12}, /* cost of storing MMX registers
1372 in SImode and DImode */
1373 6, /* cost of moving SSE register */
1374 {12, 12, 12}, /* cost of loading SSE registers
1375 in SImode, DImode and TImode */
1376 {12, 12, 12}, /* cost of storing SSE registers
1377 in SImode, DImode and TImode */
1378 8, /* MMX or SSE register to integer */
1379 8, /* size of l1 cache. */
1380 1024, /* size of l2 cache. */
1381 128, /* size of prefetch block */
1382 8, /* number of parallel prefetches */
1383 1, /* Branch cost */
1384 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
1385 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1386 COSTS_N_INSNS (40), /* cost of FDIV instruction. */
1387 COSTS_N_INSNS (3), /* cost of FABS instruction. */
1388 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
1389 COSTS_N_INSNS (44), /* cost of FSQRT instruction. */
1390 {{libcall
, {{12, loop_1_byte
, false}, {-1, rep_prefix_4_byte
, false}}},
1391 {libcall
, {{32, loop
, false}, {20000, rep_prefix_8_byte
, false},
1392 {100000, unrolled_loop
, false}, {-1, libcall
, false}}}},
1393 {{libcall
, {{6, loop_1_byte
, false}, {48, loop
, false},
1394 {20480, rep_prefix_4_byte
, false}, {-1, libcall
, false}}},
1395 {libcall
, {{24, loop
, false}, {64, unrolled_loop
, false},
1396 {8192, rep_prefix_8_byte
, false}, {-1, libcall
, false}}}},
1397 1, /* scalar_stmt_cost. */
1398 1, /* scalar load_cost. */
1399 1, /* scalar_store_cost. */
1400 1, /* vec_stmt_cost. */
1401 1, /* vec_to_scalar_cost. */
1402 1, /* scalar_to_vec_cost. */
1403 1, /* vec_align_load_cost. */
1404 2, /* vec_unalign_load_cost. */
1405 1, /* vec_store_cost. */
1406 3, /* cond_taken_branch_cost. */
1407 1, /* cond_not_taken_branch_cost. */
1411 struct processor_costs atom_cost
= {
1412 COSTS_N_INSNS (1), /* cost of an add instruction */
1413 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1414 COSTS_N_INSNS (1), /* variable shift costs */
1415 COSTS_N_INSNS (1), /* constant shift costs */
1416 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1417 COSTS_N_INSNS (4), /* HI */
1418 COSTS_N_INSNS (3), /* SI */
1419 COSTS_N_INSNS (4), /* DI */
1420 COSTS_N_INSNS (2)}, /* other */
1421 0, /* cost of multiply per each bit set */
1422 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1423 COSTS_N_INSNS (26), /* HI */
1424 COSTS_N_INSNS (42), /* SI */
1425 COSTS_N_INSNS (74), /* DI */
1426 COSTS_N_INSNS (74)}, /* other */
1427 COSTS_N_INSNS (1), /* cost of movsx */
1428 COSTS_N_INSNS (1), /* cost of movzx */
1429 8, /* "large" insn */
1430 17, /* MOVE_RATIO */
1431 4, /* cost for loading QImode using movzbl */
1432 {4, 4, 4}, /* cost of loading integer registers
1433 in QImode, HImode and SImode.
1434 Relative to reg-reg move (2). */
1435 {4, 4, 4}, /* cost of storing integer registers */
1436 4, /* cost of reg,reg fld/fst */
1437 {12, 12, 12}, /* cost of loading fp registers
1438 in SFmode, DFmode and XFmode */
1439 {6, 6, 8}, /* cost of storing fp registers
1440 in SFmode, DFmode and XFmode */
1441 2, /* cost of moving MMX register */
1442 {8, 8}, /* cost of loading MMX registers
1443 in SImode and DImode */
1444 {8, 8}, /* cost of storing MMX registers
1445 in SImode and DImode */
1446 2, /* cost of moving SSE register */
1447 {8, 8, 8}, /* cost of loading SSE registers
1448 in SImode, DImode and TImode */
1449 {8, 8, 8}, /* cost of storing SSE registers
1450 in SImode, DImode and TImode */
1451 5, /* MMX or SSE register to integer */
1452 32, /* size of l1 cache. */
1453 256, /* size of l2 cache. */
1454 64, /* size of prefetch block */
1455 6, /* number of parallel prefetches */
1456 3, /* Branch cost */
1457 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1458 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1459 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1460 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1461 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1462 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1463 {{libcall
, {{11, loop
, false}, {-1, rep_prefix_4_byte
, false}}},
1464 {libcall
, {{32, loop
, false}, {64, rep_prefix_4_byte
, false},
1465 {8192, rep_prefix_8_byte
, false}, {-1, libcall
, false}}}},
1466 {{libcall
, {{8, loop
, false}, {15, unrolled_loop
, false},
1467 {2048, rep_prefix_4_byte
, false}, {-1, libcall
, false}}},
1468 {libcall
, {{24, loop
, false}, {32, unrolled_loop
, false},
1469 {8192, rep_prefix_8_byte
, false}, {-1, libcall
, false}}}},
1470 1, /* scalar_stmt_cost. */
1471 1, /* scalar load_cost. */
1472 1, /* scalar_store_cost. */
1473 1, /* vec_stmt_cost. */
1474 1, /* vec_to_scalar_cost. */
1475 1, /* scalar_to_vec_cost. */
1476 1, /* vec_align_load_cost. */
1477 2, /* vec_unalign_load_cost. */
1478 1, /* vec_store_cost. */
1479 3, /* cond_taken_branch_cost. */
1480 1, /* cond_not_taken_branch_cost. */
1483 /* Generic64 should produce code tuned for Nocona and K8. */
1485 struct processor_costs generic64_cost
= {
1486 COSTS_N_INSNS (1), /* cost of an add instruction */
1487 /* On all chips taken into consideration lea is 2 cycles and more. With
1488 this cost however our current implementation of synth_mult results in
1489 use of unnecessary temporary registers causing regression on several
1490 SPECfp benchmarks. */
1491 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1492 COSTS_N_INSNS (1), /* variable shift costs */
1493 COSTS_N_INSNS (1), /* constant shift costs */
1494 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1495 COSTS_N_INSNS (4), /* HI */
1496 COSTS_N_INSNS (3), /* SI */
1497 COSTS_N_INSNS (4), /* DI */
1498 COSTS_N_INSNS (2)}, /* other */
1499 0, /* cost of multiply per each bit set */
1500 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1501 COSTS_N_INSNS (26), /* HI */
1502 COSTS_N_INSNS (42), /* SI */
1503 COSTS_N_INSNS (74), /* DI */
1504 COSTS_N_INSNS (74)}, /* other */
1505 COSTS_N_INSNS (1), /* cost of movsx */
1506 COSTS_N_INSNS (1), /* cost of movzx */
1507 8, /* "large" insn */
1508 17, /* MOVE_RATIO */
1509 4, /* cost for loading QImode using movzbl */
1510 {4, 4, 4}, /* cost of loading integer registers
1511 in QImode, HImode and SImode.
1512 Relative to reg-reg move (2). */
1513 {4, 4, 4}, /* cost of storing integer registers */
1514 4, /* cost of reg,reg fld/fst */
1515 {12, 12, 12}, /* cost of loading fp registers
1516 in SFmode, DFmode and XFmode */
1517 {6, 6, 8}, /* cost of storing fp registers
1518 in SFmode, DFmode and XFmode */
1519 2, /* cost of moving MMX register */
1520 {8, 8}, /* cost of loading MMX registers
1521 in SImode and DImode */
1522 {8, 8}, /* cost of storing MMX registers
1523 in SImode and DImode */
1524 2, /* cost of moving SSE register */
1525 {8, 8, 8}, /* cost of loading SSE registers
1526 in SImode, DImode and TImode */
1527 {8, 8, 8}, /* cost of storing SSE registers
1528 in SImode, DImode and TImode */
1529 5, /* MMX or SSE register to integer */
1530 32, /* size of l1 cache. */
1531 512, /* size of l2 cache. */
1532 64, /* size of prefetch block */
1533 6, /* number of parallel prefetches */
1534 /* Benchmarks shows large regressions on K8 sixtrack benchmark when this
1535 value is increased to perhaps more appropriate value of 5. */
1536 3, /* Branch cost */
1537 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1538 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1539 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1540 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1541 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1542 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1543 {DUMMY_STRINGOP_ALGS
,
1544 {libcall
, {{32, loop
, false}, {8192, rep_prefix_8_byte
, false},
1545 {-1, libcall
, false}}}},
1546 {DUMMY_STRINGOP_ALGS
,
1547 {libcall
, {{32, loop
, false}, {8192, rep_prefix_8_byte
, false},
1548 {-1, libcall
, false}}}},
1549 1, /* scalar_stmt_cost. */
1550 1, /* scalar load_cost. */
1551 1, /* scalar_store_cost. */
1552 1, /* vec_stmt_cost. */
1553 1, /* vec_to_scalar_cost. */
1554 1, /* scalar_to_vec_cost. */
1555 1, /* vec_align_load_cost. */
1556 2, /* vec_unalign_load_cost. */
1557 1, /* vec_store_cost. */
1558 3, /* cond_taken_branch_cost. */
1559 1, /* cond_not_taken_branch_cost. */
1562 /* core_cost should produce code tuned for Core familly of CPUs. */
1564 struct processor_costs core_cost
= {
1565 COSTS_N_INSNS (1), /* cost of an add instruction */
1566 /* On all chips taken into consideration lea is 2 cycles and more. With
1567 this cost however our current implementation of synth_mult results in
1568 use of unnecessary temporary registers causing regression on several
1569 SPECfp benchmarks. */
1570 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1571 COSTS_N_INSNS (1), /* variable shift costs */
1572 COSTS_N_INSNS (1), /* constant shift costs */
1573 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1574 COSTS_N_INSNS (4), /* HI */
1575 COSTS_N_INSNS (3), /* SI */
1576 COSTS_N_INSNS (4), /* DI */
1577 COSTS_N_INSNS (2)}, /* other */
1578 0, /* cost of multiply per each bit set */
1579 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1580 COSTS_N_INSNS (26), /* HI */
1581 COSTS_N_INSNS (42), /* SI */
1582 COSTS_N_INSNS (74), /* DI */
1583 COSTS_N_INSNS (74)}, /* other */
1584 COSTS_N_INSNS (1), /* cost of movsx */
1585 COSTS_N_INSNS (1), /* cost of movzx */
1586 8, /* "large" insn */
1587 17, /* MOVE_RATIO */
1588 4, /* cost for loading QImode using movzbl */
1589 {4, 4, 4}, /* cost of loading integer registers
1590 in QImode, HImode and SImode.
1591 Relative to reg-reg move (2). */
1592 {4, 4, 4}, /* cost of storing integer registers */
1593 4, /* cost of reg,reg fld/fst */
1594 {12, 12, 12}, /* cost of loading fp registers
1595 in SFmode, DFmode and XFmode */
1596 {6, 6, 8}, /* cost of storing fp registers
1597 in SFmode, DFmode and XFmode */
1598 2, /* cost of moving MMX register */
1599 {8, 8}, /* cost of loading MMX registers
1600 in SImode and DImode */
1601 {8, 8}, /* cost of storing MMX registers
1602 in SImode and DImode */
1603 2, /* cost of moving SSE register */
1604 {8, 8, 8}, /* cost of loading SSE registers
1605 in SImode, DImode and TImode */
1606 {8, 8, 8}, /* cost of storing SSE registers
1607 in SImode, DImode and TImode */
1608 5, /* MMX or SSE register to integer */
1609 64, /* size of l1 cache. */
1610 512, /* size of l2 cache. */
1611 64, /* size of prefetch block */
1612 6, /* number of parallel prefetches */
1613 /* FIXME perhaps more appropriate value is 5. */
1614 3, /* Branch cost */
1615 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1616 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1617 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1618 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1619 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1620 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1621 {{libcall
, {{1024, rep_prefix_4_byte
, true}, {-1, libcall
, false}}},
1622 {libcall
, {{24, loop
, true}, {128, rep_prefix_8_byte
, true},
1623 {-1, libcall
, false}}}},
1624 {{libcall
, {{6, loop_1_byte
, true},
1626 {8192, rep_prefix_4_byte
, true},
1627 {-1, libcall
, false}}},
1628 {libcall
, {{24, loop
, true}, {512, rep_prefix_8_byte
, true},
1629 {-1, libcall
, false}}}},
1630 1, /* scalar_stmt_cost. */
1631 1, /* scalar load_cost. */
1632 1, /* scalar_store_cost. */
1633 1, /* vec_stmt_cost. */
1634 1, /* vec_to_scalar_cost. */
1635 1, /* scalar_to_vec_cost. */
1636 1, /* vec_align_load_cost. */
1637 2, /* vec_unalign_load_cost. */
1638 1, /* vec_store_cost. */
1639 3, /* cond_taken_branch_cost. */
1640 1, /* cond_not_taken_branch_cost. */
1643 /* Generic32 should produce code tuned for PPro, Pentium4, Nocona,
1646 struct processor_costs generic32_cost
= {
1647 COSTS_N_INSNS (1), /* cost of an add instruction */
1648 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1649 COSTS_N_INSNS (1), /* variable shift costs */
1650 COSTS_N_INSNS (1), /* constant shift costs */
1651 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1652 COSTS_N_INSNS (4), /* HI */
1653 COSTS_N_INSNS (3), /* SI */
1654 COSTS_N_INSNS (4), /* DI */
1655 COSTS_N_INSNS (2)}, /* other */
1656 0, /* cost of multiply per each bit set */
1657 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1658 COSTS_N_INSNS (26), /* HI */
1659 COSTS_N_INSNS (42), /* SI */
1660 COSTS_N_INSNS (74), /* DI */
1661 COSTS_N_INSNS (74)}, /* other */
1662 COSTS_N_INSNS (1), /* cost of movsx */
1663 COSTS_N_INSNS (1), /* cost of movzx */
1664 8, /* "large" insn */
1665 17, /* MOVE_RATIO */
1666 4, /* cost for loading QImode using movzbl */
1667 {4, 4, 4}, /* cost of loading integer registers
1668 in QImode, HImode and SImode.
1669 Relative to reg-reg move (2). */
1670 {4, 4, 4}, /* cost of storing integer registers */
1671 4, /* cost of reg,reg fld/fst */
1672 {12, 12, 12}, /* cost of loading fp registers
1673 in SFmode, DFmode and XFmode */
1674 {6, 6, 8}, /* cost of storing fp registers
1675 in SFmode, DFmode and XFmode */
1676 2, /* cost of moving MMX register */
1677 {8, 8}, /* cost of loading MMX registers
1678 in SImode and DImode */
1679 {8, 8}, /* cost of storing MMX registers
1680 in SImode and DImode */
1681 2, /* cost of moving SSE register */
1682 {8, 8, 8}, /* cost of loading SSE registers
1683 in SImode, DImode and TImode */
1684 {8, 8, 8}, /* cost of storing SSE registers
1685 in SImode, DImode and TImode */
1686 5, /* MMX or SSE register to integer */
1687 32, /* size of l1 cache. */
1688 256, /* size of l2 cache. */
1689 64, /* size of prefetch block */
1690 6, /* number of parallel prefetches */
1691 3, /* Branch cost */
1692 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1693 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1694 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1695 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1696 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1697 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1698 {{libcall
, {{32, loop
, false}, {8192, rep_prefix_4_byte
, false},
1699 {-1, libcall
, false}}},
1700 DUMMY_STRINGOP_ALGS
},
1701 {{libcall
, {{32, loop
, false}, {8192, rep_prefix_4_byte
, false},
1702 {-1, libcall
, false}}},
1703 DUMMY_STRINGOP_ALGS
},
1704 1, /* scalar_stmt_cost. */
1705 1, /* scalar load_cost. */
1706 1, /* scalar_store_cost. */
1707 1, /* vec_stmt_cost. */
1708 1, /* vec_to_scalar_cost. */
1709 1, /* scalar_to_vec_cost. */
1710 1, /* vec_align_load_cost. */
1711 2, /* vec_unalign_load_cost. */
1712 1, /* vec_store_cost. */
1713 3, /* cond_taken_branch_cost. */
1714 1, /* cond_not_taken_branch_cost. */
1717 /* Set by -mtune. */
1718 const struct processor_costs
*ix86_tune_cost
= &pentium_cost
;
1720 /* Set by -mtune or -Os. */
1721 const struct processor_costs
*ix86_cost
= &pentium_cost
;
1723 /* Processor feature/optimization bitmasks. */
1724 #define m_386 (1<<PROCESSOR_I386)
1725 #define m_486 (1<<PROCESSOR_I486)
1726 #define m_PENT (1<<PROCESSOR_PENTIUM)
1727 #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
1728 #define m_PENT4 (1<<PROCESSOR_PENTIUM4)
1729 #define m_NOCONA (1<<PROCESSOR_NOCONA)
1730 #define m_P4_NOCONA (m_PENT4 | m_NOCONA)
1731 #define m_CORE2 (1<<PROCESSOR_CORE2)
1732 #define m_COREI7 (1<<PROCESSOR_COREI7)
1733 #define m_HASWELL (1<<PROCESSOR_HASWELL)
1734 #define m_CORE_ALL (m_CORE2 | m_COREI7 | m_HASWELL)
1735 #define m_ATOM (1<<PROCESSOR_ATOM)
1737 #define m_GEODE (1<<PROCESSOR_GEODE)
1738 #define m_K6 (1<<PROCESSOR_K6)
1739 #define m_K6_GEODE (m_K6 | m_GEODE)
1740 #define m_K8 (1<<PROCESSOR_K8)
1741 #define m_ATHLON (1<<PROCESSOR_ATHLON)
1742 #define m_ATHLON_K8 (m_K8 | m_ATHLON)
1743 #define m_AMDFAM10 (1<<PROCESSOR_AMDFAM10)
1744 #define m_BDVER1 (1<<PROCESSOR_BDVER1)
1745 #define m_BDVER2 (1<<PROCESSOR_BDVER2)
1746 #define m_BDVER3 (1<<PROCESSOR_BDVER3)
1747 #define m_BTVER1 (1<<PROCESSOR_BTVER1)
1748 #define m_BTVER2 (1<<PROCESSOR_BTVER2)
1749 #define m_BDVER (m_BDVER1 | m_BDVER2 | m_BDVER3)
1750 #define m_BTVER (m_BTVER1 | m_BTVER2)
1751 #define m_AMD_MULTIPLE (m_ATHLON_K8 | m_AMDFAM10 | m_BDVER | m_BTVER)
1753 #define m_GENERIC32 (1<<PROCESSOR_GENERIC32)
1754 #define m_GENERIC64 (1<<PROCESSOR_GENERIC64)
1756 /* Generic instruction choice should be common subset of supported CPUs
1757 (PPro/PENT4/NOCONA/CORE2/Athlon/K8). */
1758 #define m_GENERIC (m_GENERIC32 | m_GENERIC64)
1760 /* Feature tests against the various tunings. */
1761 unsigned char ix86_tune_features
[X86_TUNE_LAST
];
1763 /* Feature tests against the various tunings used to create ix86_tune_features
1764 based on the processor mask. */
1765 static unsigned int initial_ix86_tune_features
[X86_TUNE_LAST
] = {
1766 /* X86_TUNE_USE_LEAVE: Leave does not affect Nocona SPEC2000 results
1767 negatively, so enabling for Generic64 seems like good code size
1768 tradeoff. We can't enable it for 32bit generic because it does not
1769 work well with PPro base chips. */
1770 m_386
| m_CORE_ALL
| m_K6_GEODE
| m_AMD_MULTIPLE
| m_GENERIC64
,
1772 /* X86_TUNE_PUSH_MEMORY */
1773 m_386
| m_P4_NOCONA
| m_CORE_ALL
| m_K6_GEODE
| m_AMD_MULTIPLE
| m_GENERIC
,
1775 /* X86_TUNE_ZERO_EXTEND_WITH_AND */
1778 /* X86_TUNE_UNROLL_STRLEN */
1779 m_486
| m_PENT
| m_PPRO
| m_ATOM
| m_CORE_ALL
| m_K6
| m_AMD_MULTIPLE
| m_GENERIC
,
1781 /* X86_TUNE_BRANCH_PREDICTION_HINTS: Branch hints were put in P4 based
1782 on simulation result. But after P4 was made, no performance benefit
1783 was observed with branch hints. It also increases the code size.
1784 As a result, icc never generates branch hints. */
1787 /* X86_TUNE_DOUBLE_WITH_ADD */
1790 /* X86_TUNE_USE_SAHF */
1791 m_PPRO
| m_P4_NOCONA
| m_CORE_ALL
| m_ATOM
| m_K6_GEODE
| m_K8
| m_AMDFAM10
| m_BDVER
| m_BTVER
| m_GENERIC
,
1793 /* X86_TUNE_MOVX: Enable to zero extend integer registers to avoid
1794 partial dependencies. */
1795 m_PPRO
| m_P4_NOCONA
| m_CORE_ALL
| m_ATOM
| m_GEODE
| m_AMD_MULTIPLE
| m_GENERIC
,
1797 /* X86_TUNE_PARTIAL_REG_STALL: We probably ought to watch for partial
1798 register stalls on Generic32 compilation setting as well. However
1799 in current implementation the partial register stalls are not eliminated
1800 very well - they can be introduced via subregs synthesized by combine
1801 and can happen in caller/callee saving sequences. Because this option
1802 pays back little on PPro based chips and is in conflict with partial reg
1803 dependencies used by Athlon/P4 based chips, it is better to leave it off
1804 for generic32 for now. */
1807 /* X86_TUNE_PARTIAL_FLAG_REG_STALL */
1808 m_CORE_ALL
| m_GENERIC
,
1810 /* X86_TUNE_LCP_STALL: Avoid an expensive length-changing prefix stall
1811 * on 16-bit immediate moves into memory on Core2 and Corei7. */
1812 m_CORE_ALL
| m_GENERIC
,
1814 /* X86_TUNE_USE_HIMODE_FIOP */
1815 m_386
| m_486
| m_K6_GEODE
,
1817 /* X86_TUNE_USE_SIMODE_FIOP */
1818 ~(m_PENT
| m_PPRO
| m_CORE_ALL
| m_ATOM
| m_AMD_MULTIPLE
| m_GENERIC
),
1820 /* X86_TUNE_USE_MOV0 */
1823 /* X86_TUNE_USE_CLTD */
1824 ~(m_PENT
| m_ATOM
| m_K6
),
1826 /* X86_TUNE_USE_XCHGB: Use xchgb %rh,%rl instead of rolw/rorw $8,rx. */
1829 /* X86_TUNE_SPLIT_LONG_MOVES */
1832 /* X86_TUNE_READ_MODIFY_WRITE */
1835 /* X86_TUNE_READ_MODIFY */
1838 /* X86_TUNE_PROMOTE_QIMODE */
1839 m_386
| m_486
| m_PENT
| m_CORE_ALL
| m_ATOM
| m_K6_GEODE
| m_AMD_MULTIPLE
| m_GENERIC
,
1841 /* X86_TUNE_FAST_PREFIX */
1842 ~(m_386
| m_486
| m_PENT
),
1844 /* X86_TUNE_SINGLE_STRINGOP */
1845 m_386
| m_P4_NOCONA
,
1847 /* X86_TUNE_QIMODE_MATH */
1850 /* X86_TUNE_HIMODE_MATH: On PPro this flag is meant to avoid partial
1851 register stalls. Just like X86_TUNE_PARTIAL_REG_STALL this option
1852 might be considered for Generic32 if our scheme for avoiding partial
1853 stalls was more effective. */
1856 /* X86_TUNE_PROMOTE_QI_REGS */
1859 /* X86_TUNE_PROMOTE_HI_REGS */
1862 /* X86_TUNE_SINGLE_POP: Enable if single pop insn is preferred
1863 over esp addition. */
1864 m_386
| m_486
| m_PENT
| m_PPRO
,
1866 /* X86_TUNE_DOUBLE_POP: Enable if double pop insn is preferred
1867 over esp addition. */
1870 /* X86_TUNE_SINGLE_PUSH: Enable if single push insn is preferred
1871 over esp subtraction. */
1872 m_386
| m_486
| m_PENT
| m_K6_GEODE
,
1874 /* X86_TUNE_DOUBLE_PUSH. Enable if double push insn is preferred
1875 over esp subtraction. */
1876 m_PENT
| m_K6_GEODE
,
1878 /* X86_TUNE_INTEGER_DFMODE_MOVES: Enable if integer moves are preferred
1879 for DFmode copies */
1880 ~(m_PPRO
| m_P4_NOCONA
| m_CORE_ALL
| m_ATOM
| m_GEODE
| m_AMD_MULTIPLE
| m_ATOM
| m_GENERIC
),
1882 /* X86_TUNE_PARTIAL_REG_DEPENDENCY */
1883 m_P4_NOCONA
| m_CORE_ALL
| m_ATOM
| m_AMD_MULTIPLE
| m_GENERIC
,
1885 /* X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY: In the Generic model we have a
1886 conflict here in between PPro/Pentium4 based chips that thread 128bit
1887 SSE registers as single units versus K8 based chips that divide SSE
1888 registers to two 64bit halves. This knob promotes all store destinations
1889 to be 128bit to allow register renaming on 128bit SSE units, but usually
1890 results in one extra microop on 64bit SSE units. Experimental results
1891 shows that disabling this option on P4 brings over 20% SPECfp regression,
1892 while enabling it on K8 brings roughly 2.4% regression that can be partly
1893 masked by careful scheduling of moves. */
1894 m_PPRO
| m_P4_NOCONA
| m_CORE_ALL
| m_ATOM
| m_AMDFAM10
| m_BDVER
| m_GENERIC
,
1896 /* X86_TUNE_SSE_UNALIGNED_LOAD_OPTIMAL */
1897 m_COREI7
| m_AMDFAM10
| m_BDVER
| m_BTVER
,
1899 /* X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL */
1902 /* X86_TUNE_SSE_PACKED_SINGLE_INSN_OPTIMAL */
1905 /* X86_TUNE_SSE_SPLIT_REGS: Set for machines where the type and dependencies
1906 are resolved on SSE register parts instead of whole registers, so we may
1907 maintain just lower part of scalar values in proper format leaving the
1908 upper part undefined. */
1911 /* X86_TUNE_SSE_TYPELESS_STORES */
1914 /* X86_TUNE_SSE_LOAD0_BY_PXOR */
1915 m_PPRO
| m_P4_NOCONA
,
1917 /* X86_TUNE_MEMORY_MISMATCH_STALL */
1918 m_P4_NOCONA
| m_CORE_ALL
| m_ATOM
| m_AMD_MULTIPLE
| m_GENERIC
,
1920 /* X86_TUNE_PROLOGUE_USING_MOVE */
1921 m_PPRO
| m_ATHLON_K8
,
1923 /* X86_TUNE_EPILOGUE_USING_MOVE */
1924 m_PPRO
| m_ATHLON_K8
,
1926 /* X86_TUNE_SHIFT1 */
1929 /* X86_TUNE_USE_FFREEP */
1932 /* X86_TUNE_INTER_UNIT_MOVES */
1933 ~(m_AMD_MULTIPLE
| m_GENERIC
),
1935 /* X86_TUNE_INTER_UNIT_CONVERSIONS */
1936 ~(m_AMDFAM10
| m_BDVER
),
1938 /* X86_TUNE_FOUR_JUMP_LIMIT: Some CPU cores are not able to predict more
1939 than 4 branch instructions in the 16 byte window. */
1940 m_PPRO
| m_P4_NOCONA
| m_CORE_ALL
| m_ATOM
| m_AMD_MULTIPLE
| m_GENERIC
,
1942 /* X86_TUNE_SCHEDULE */
1943 m_PENT
| m_PPRO
| m_CORE_ALL
| m_ATOM
| m_K6_GEODE
| m_AMD_MULTIPLE
| m_GENERIC
,
1945 /* X86_TUNE_USE_BT */
1946 m_CORE_ALL
| m_ATOM
| m_AMD_MULTIPLE
| m_GENERIC
,
1948 /* X86_TUNE_USE_INCDEC */
1949 ~(m_P4_NOCONA
| m_CORE_ALL
| m_ATOM
| m_GENERIC
),
1951 /* X86_TUNE_PAD_RETURNS */
1952 m_CORE_ALL
| m_AMD_MULTIPLE
| m_GENERIC
,
1954 /* X86_TUNE_PAD_SHORT_FUNCTION: Pad short funtion. */
1957 /* X86_TUNE_EXT_80387_CONSTANTS */
1958 m_PPRO
| m_P4_NOCONA
| m_CORE_ALL
| m_ATOM
| m_K6_GEODE
| m_ATHLON_K8
| m_GENERIC
,
1960 /* X86_TUNE_AVOID_VECTOR_DECODE */
1961 m_CORE_ALL
| m_K8
| m_GENERIC64
,
1963 /* X86_TUNE_PROMOTE_HIMODE_IMUL: Modern CPUs have same latency for HImode
1964 and SImode multiply, but 386 and 486 do HImode multiply faster. */
1967 /* X86_TUNE_SLOW_IMUL_IMM32_MEM: Imul of 32-bit constant and memory is
1968 vector path on AMD machines. */
1969 m_CORE_ALL
| m_K8
| m_AMDFAM10
| m_BDVER
| m_BTVER
| m_GENERIC64
,
1971 /* X86_TUNE_SLOW_IMUL_IMM8: Imul of 8-bit constant is vector path on AMD
1973 m_CORE_ALL
| m_K8
| m_AMDFAM10
| m_BDVER
| m_BTVER
| m_GENERIC64
,
1975 /* X86_TUNE_MOVE_M1_VIA_OR: On pentiums, it is faster to load -1 via OR
1979 /* X86_TUNE_NOT_UNPAIRABLE: NOT is not pairable on Pentium, while XOR is,
1980 but one byte longer. */
1983 /* X86_TUNE_NOT_VECTORMODE: On AMD K6, NOT is vector decoded with memory
1984 operand that cannot be represented using a modRM byte. The XOR
1985 replacement is long decoded, so this split helps here as well. */
1988 /* X86_TUNE_USE_VECTOR_FP_CONVERTS: Prefer vector packed SSE conversion
1990 m_CORE_ALL
| m_AMDFAM10
| m_GENERIC
,
1992 /* X86_TUNE_USE_VECTOR_CONVERTS: Prefer vector packed SSE conversion
1993 from integer to FP. */
1996 /* X86_TUNE_FUSE_CMP_AND_BRANCH: Fuse a compare or test instruction
1997 with a subsequent conditional jump instruction into a single
1998 compare-and-branch uop. */
2001 /* X86_TUNE_OPT_AGU: Optimize for Address Generation Unit. This flag
2002 will impact LEA instruction selection. */
2005 /* X86_TUNE_VECTORIZE_DOUBLE: Enable double precision vector
2009 /* X86_SOFTARE_PREFETCHING_BENEFICIAL: Enable software prefetching
2010 at -O3. For the moment, the prefetching seems badly tuned for Intel
2012 m_K6_GEODE
| m_AMD_MULTIPLE
,
2014 /* X86_TUNE_AVX128_OPTIMAL: Enable 128-bit AVX instruction generation for
2015 the auto-vectorizer. */
2018 /* X86_TUNE_REASSOC_INT_TO_PARALLEL: Try to produce parallel computations
2019 during reassociation of integer computation. */
2022 /* X86_TUNE_REASSOC_FP_TO_PARALLEL: Try to produce parallel computations
2023 during reassociation of fp computation. */
2026 /* X86_TUNE_GENERAL_REGS_SSE_SPILL: Try to spill general regs to SSE
2027 regs instead of memory. */
2030 /* X86_TUNE_AVOID_MEM_OPND_FOR_CMOVE: Try to avoid memory operands for
2031 a conditional move. */
2035 /* Feature tests against the various architecture variations. */
2036 unsigned char ix86_arch_features
[X86_ARCH_LAST
];
2038 /* Feature tests against the various architecture variations, used to create
2039 ix86_arch_features based on the processor mask. */
2040 static unsigned int initial_ix86_arch_features
[X86_ARCH_LAST
] = {
2041 /* X86_ARCH_CMOV: Conditional move was added for pentiumpro. */
2042 ~(m_386
| m_486
| m_PENT
| m_K6
),
2044 /* X86_ARCH_CMPXCHG: Compare and exchange was added for 80486. */
2047 /* X86_ARCH_CMPXCHG8B: Compare and exchange 8 bytes was added for pentium. */
2050 /* X86_ARCH_XADD: Exchange and add was added for 80486. */
2053 /* X86_ARCH_BSWAP: Byteswap was added for 80486. */
2057 static const unsigned int x86_accumulate_outgoing_args
2058 = m_PPRO
| m_P4_NOCONA
| m_ATOM
| m_CORE_ALL
| m_AMD_MULTIPLE
| m_GENERIC
;
2060 static const unsigned int x86_arch_always_fancy_math_387
2061 = m_PENT
| m_PPRO
| m_P4_NOCONA
| m_CORE_ALL
| m_ATOM
| m_AMD_MULTIPLE
| m_GENERIC
;
2063 static const unsigned int x86_avx256_split_unaligned_load
2064 = m_COREI7
| m_GENERIC
;
2066 static const unsigned int x86_avx256_split_unaligned_store
2067 = m_COREI7
| m_BDVER
| m_GENERIC
;
2069 /* In case the average insn count for single function invocation is
2070 lower than this constant, emit fast (but longer) prologue and
2072 #define FAST_PROLOGUE_INSN_COUNT 20
2074 /* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
2075 static const char *const qi_reg_name
[] = QI_REGISTER_NAMES
;
2076 static const char *const qi_high_reg_name
[] = QI_HIGH_REGISTER_NAMES
;
2077 static const char *const hi_reg_name
[] = HI_REGISTER_NAMES
;
2079 /* Array of the smallest class containing reg number REGNO, indexed by
2080 REGNO. Used by REGNO_REG_CLASS in i386.h. */
2082 enum reg_class
const regclass_map
[FIRST_PSEUDO_REGISTER
] =
2084 /* ax, dx, cx, bx */
2085 AREG
, DREG
, CREG
, BREG
,
2086 /* si, di, bp, sp */
2087 SIREG
, DIREG
, NON_Q_REGS
, NON_Q_REGS
,
2089 FP_TOP_REG
, FP_SECOND_REG
, FLOAT_REGS
, FLOAT_REGS
,
2090 FLOAT_REGS
, FLOAT_REGS
, FLOAT_REGS
, FLOAT_REGS
,
2093 /* flags, fpsr, fpcr, frame */
2094 NO_REGS
, NO_REGS
, NO_REGS
, NON_Q_REGS
,
2096 SSE_FIRST_REG
, SSE_REGS
, SSE_REGS
, SSE_REGS
, SSE_REGS
, SSE_REGS
,
2099 MMX_REGS
, MMX_REGS
, MMX_REGS
, MMX_REGS
, MMX_REGS
, MMX_REGS
,
2102 NON_Q_REGS
, NON_Q_REGS
, NON_Q_REGS
, NON_Q_REGS
,
2103 NON_Q_REGS
, NON_Q_REGS
, NON_Q_REGS
, NON_Q_REGS
,
2104 /* SSE REX registers */
2105 SSE_REGS
, SSE_REGS
, SSE_REGS
, SSE_REGS
, SSE_REGS
, SSE_REGS
,
2109 /* The "default" register map used in 32bit mode. */
2111 int const dbx_register_map
[FIRST_PSEUDO_REGISTER
] =
2113 0, 2, 1, 3, 6, 7, 4, 5, /* general regs */
2114 12, 13, 14, 15, 16, 17, 18, 19, /* fp regs */
2115 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
2116 21, 22, 23, 24, 25, 26, 27, 28, /* SSE */
2117 29, 30, 31, 32, 33, 34, 35, 36, /* MMX */
2118 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
2119 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
2122 /* The "default" register map used in 64bit mode. */
2124 int const dbx64_register_map
[FIRST_PSEUDO_REGISTER
] =
2126 0, 1, 2, 3, 4, 5, 6, 7, /* general regs */
2127 33, 34, 35, 36, 37, 38, 39, 40, /* fp regs */
2128 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
2129 17, 18, 19, 20, 21, 22, 23, 24, /* SSE */
2130 41, 42, 43, 44, 45, 46, 47, 48, /* MMX */
2131 8,9,10,11,12,13,14,15, /* extended integer registers */
2132 25, 26, 27, 28, 29, 30, 31, 32, /* extended SSE registers */
2135 /* Define the register numbers to be used in Dwarf debugging information.
2136 The SVR4 reference port C compiler uses the following register numbers
2137 in its Dwarf output code:
2138 0 for %eax (gcc regno = 0)
2139 1 for %ecx (gcc regno = 2)
2140 2 for %edx (gcc regno = 1)
2141 3 for %ebx (gcc regno = 3)
2142 4 for %esp (gcc regno = 7)
2143 5 for %ebp (gcc regno = 6)
2144 6 for %esi (gcc regno = 4)
2145 7 for %edi (gcc regno = 5)
2146 The following three DWARF register numbers are never generated by
2147 the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
2148 believes these numbers have these meanings.
2149 8 for %eip (no gcc equivalent)
2150 9 for %eflags (gcc regno = 17)
2151 10 for %trapno (no gcc equivalent)
2152 It is not at all clear how we should number the FP stack registers
2153 for the x86 architecture. If the version of SDB on x86/svr4 were
2154 a bit less brain dead with respect to floating-point then we would
2155 have a precedent to follow with respect to DWARF register numbers
2156 for x86 FP registers, but the SDB on x86/svr4 is so completely
2157 broken with respect to FP registers that it is hardly worth thinking
2158 of it as something to strive for compatibility with.
2159 The version of x86/svr4 SDB I have at the moment does (partially)
2160 seem to believe that DWARF register number 11 is associated with
2161 the x86 register %st(0), but that's about all. Higher DWARF
2162 register numbers don't seem to be associated with anything in
2163 particular, and even for DWARF regno 11, SDB only seems to under-
2164 stand that it should say that a variable lives in %st(0) (when
2165 asked via an `=' command) if we said it was in DWARF regno 11,
2166 but SDB still prints garbage when asked for the value of the
2167 variable in question (via a `/' command).
2168 (Also note that the labels SDB prints for various FP stack regs
2169 when doing an `x' command are all wrong.)
2170 Note that these problems generally don't affect the native SVR4
2171 C compiler because it doesn't allow the use of -O with -g and
2172 because when it is *not* optimizing, it allocates a memory
2173 location for each floating-point variable, and the memory
2174 location is what gets described in the DWARF AT_location
2175 attribute for the variable in question.
2176 Regardless of the severe mental illness of the x86/svr4 SDB, we
2177 do something sensible here and we use the following DWARF
2178 register numbers. Note that these are all stack-top-relative
2180 11 for %st(0) (gcc regno = 8)
2181 12 for %st(1) (gcc regno = 9)
2182 13 for %st(2) (gcc regno = 10)
2183 14 for %st(3) (gcc regno = 11)
2184 15 for %st(4) (gcc regno = 12)
2185 16 for %st(5) (gcc regno = 13)
2186 17 for %st(6) (gcc regno = 14)
2187 18 for %st(7) (gcc regno = 15)
2189 int const svr4_dbx_register_map
[FIRST_PSEUDO_REGISTER
] =
2191 0, 2, 1, 3, 6, 7, 5, 4, /* general regs */
2192 11, 12, 13, 14, 15, 16, 17, 18, /* fp regs */
2193 -1, 9, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
2194 21, 22, 23, 24, 25, 26, 27, 28, /* SSE registers */
2195 29, 30, 31, 32, 33, 34, 35, 36, /* MMX registers */
2196 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
2197 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
2200 /* Define parameter passing and return registers. */
2202 static int const x86_64_int_parameter_registers
[6] =
2204 DI_REG
, SI_REG
, DX_REG
, CX_REG
, R8_REG
, R9_REG
2207 static int const x86_64_ms_abi_int_parameter_registers
[4] =
2209 CX_REG
, DX_REG
, R8_REG
, R9_REG
2212 static int const x86_64_int_return_registers
[4] =
2214 AX_REG
, DX_REG
, DI_REG
, SI_REG
2217 /* Define the structure for the machine field in struct function. */
2219 struct GTY(()) stack_local_entry
{
2220 unsigned short mode
;
2223 struct stack_local_entry
*next
;
2226 /* Structure describing stack frame layout.
2227 Stack grows downward:
2233 saved static chain if ix86_static_chain_on_stack
2235 saved frame pointer if frame_pointer_needed
2236 <- HARD_FRAME_POINTER
2242 <- sse_regs_save_offset
2245 [va_arg registers] |
2249 [padding2] | = to_allocate
2258 int outgoing_arguments_size
;
2260 /* The offsets relative to ARG_POINTER. */
2261 HOST_WIDE_INT frame_pointer_offset
;
2262 HOST_WIDE_INT hard_frame_pointer_offset
;
2263 HOST_WIDE_INT stack_pointer_offset
;
2264 HOST_WIDE_INT hfp_save_offset
;
2265 HOST_WIDE_INT reg_save_offset
;
2266 HOST_WIDE_INT sse_reg_save_offset
;
2268 /* When save_regs_using_mov is set, emit prologue using
2269 move instead of push instructions. */
2270 bool save_regs_using_mov
;
2273 /* Which cpu are we scheduling for. */
2274 enum attr_cpu ix86_schedule
;
2276 /* Which cpu are we optimizing for. */
2277 enum processor_type ix86_tune
;
2279 /* Which instruction set architecture to use. */
2280 enum processor_type ix86_arch
;
2282 /* True if processor has SSE prefetch instruction. */
2283 unsigned char x86_prefetch_sse
;
2285 /* -mstackrealign option */
2286 static const char ix86_force_align_arg_pointer_string
[]
2287 = "force_align_arg_pointer";
2289 static rtx (*ix86_gen_leave
) (void);
2290 static rtx (*ix86_gen_add3
) (rtx
, rtx
, rtx
);
2291 static rtx (*ix86_gen_sub3
) (rtx
, rtx
, rtx
);
2292 static rtx (*ix86_gen_sub3_carry
) (rtx
, rtx
, rtx
, rtx
, rtx
);
2293 static rtx (*ix86_gen_one_cmpl2
) (rtx
, rtx
);
2294 static rtx (*ix86_gen_monitor
) (rtx
, rtx
, rtx
);
2295 static rtx (*ix86_gen_andsp
) (rtx
, rtx
, rtx
);
2296 static rtx (*ix86_gen_allocate_stack_worker
) (rtx
, rtx
);
2297 static rtx (*ix86_gen_adjust_stack_and_probe
) (rtx
, rtx
, rtx
);
2298 static rtx (*ix86_gen_probe_stack_range
) (rtx
, rtx
, rtx
);
2299 static rtx (*ix86_gen_tls_global_dynamic_64
) (rtx
, rtx
, rtx
);
2300 static rtx (*ix86_gen_tls_local_dynamic_base_64
) (rtx
, rtx
);
2302 /* Preferred alignment for stack boundary in bits. */
2303 unsigned int ix86_preferred_stack_boundary
;
2305 /* Alignment for incoming stack boundary in bits specified at
2307 static unsigned int ix86_user_incoming_stack_boundary
;
2309 /* Default alignment for incoming stack boundary in bits. */
2310 static unsigned int ix86_default_incoming_stack_boundary
;
2312 /* Alignment for incoming stack boundary in bits. */
2313 unsigned int ix86_incoming_stack_boundary
;
2315 /* Calling abi specific va_list type nodes. */
2316 static GTY(()) tree sysv_va_list_type_node
;
2317 static GTY(()) tree ms_va_list_type_node
;
2319 /* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */
2320 char internal_label_prefix
[16];
2321 int internal_label_prefix_len
;
2323 /* Fence to use after loop using movnt. */
2326 /* Register class used for passing given 64bit part of the argument.
2327 These represent classes as documented by the PS ABI, with the exception
2328 of SSESF, SSEDF classes, that are basically SSE class, just gcc will
2329 use SF or DFmode move instead of DImode to avoid reformatting penalties.
2331 Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
2332 whenever possible (upper half does contain padding). */
2333 enum x86_64_reg_class
2336 X86_64_INTEGER_CLASS
,
2337 X86_64_INTEGERSI_CLASS
,
2344 X86_64_COMPLEX_X87_CLASS
,
2348 #define MAX_CLASSES 4
2350 /* Table of constants used by fldpi, fldln2, etc.... */
2351 static REAL_VALUE_TYPE ext_80387_constants_table
[5];
2352 static bool ext_80387_constants_init
= 0;
2355 static struct machine_function
* ix86_init_machine_status (void);
2356 static rtx
ix86_function_value (const_tree
, const_tree
, bool);
2357 static bool ix86_function_value_regno_p (const unsigned int);
2358 static unsigned int ix86_function_arg_boundary (enum machine_mode
,
2360 static rtx
ix86_static_chain (const_tree
, bool);
2361 static int ix86_function_regparm (const_tree
, const_tree
);
2362 static void ix86_compute_frame_layout (struct ix86_frame
*);
2363 static bool ix86_expand_vector_init_one_nonzero (bool, enum machine_mode
,
2365 static void ix86_add_new_builtins (HOST_WIDE_INT
);
2366 static tree
ix86_canonical_va_list_type (tree
);
2367 static void predict_jump (int);
2368 static unsigned int split_stack_prologue_scratch_regno (void);
2369 static bool i386_asm_output_addr_const_extra (FILE *, rtx
);
2371 enum ix86_function_specific_strings
2373 IX86_FUNCTION_SPECIFIC_ARCH
,
2374 IX86_FUNCTION_SPECIFIC_TUNE
,
2375 IX86_FUNCTION_SPECIFIC_MAX
2378 static char *ix86_target_string (HOST_WIDE_INT
, int, const char *,
2379 const char *, enum fpmath_unit
, bool);
2380 static void ix86_debug_options (void) ATTRIBUTE_UNUSED
;
2381 static void ix86_function_specific_save (struct cl_target_option
*);
2382 static void ix86_function_specific_restore (struct cl_target_option
*);
2383 static void ix86_function_specific_print (FILE *, int,
2384 struct cl_target_option
*);
2385 static bool ix86_valid_target_attribute_p (tree
, tree
, tree
, int);
2386 static bool ix86_valid_target_attribute_inner_p (tree
, char *[],
2387 struct gcc_options
*);
2388 static bool ix86_can_inline_p (tree
, tree
);
2389 static void ix86_set_current_function (tree
);
2390 static unsigned int ix86_minimum_incoming_stack_boundary (bool);
2392 static enum calling_abi
ix86_function_abi (const_tree
);
2395 #ifndef SUBTARGET32_DEFAULT_CPU
2396 #define SUBTARGET32_DEFAULT_CPU "i386"
2399 /* Whether -mtune= or -march= were specified */
2400 static int ix86_tune_defaulted
;
2401 static int ix86_arch_specified
;
2403 /* Vectorization library interface and handlers. */
2404 static tree (*ix86_veclib_handler
) (enum built_in_function
, tree
, tree
);
2406 static tree
ix86_veclibabi_svml (enum built_in_function
, tree
, tree
);
2407 static tree
ix86_veclibabi_acml (enum built_in_function
, tree
, tree
);
2409 /* Processor target table, indexed by processor number */
2412 const struct processor_costs
*cost
; /* Processor costs */
2413 const int align_loop
; /* Default alignments. */
2414 const int align_loop_max_skip
;
2415 const int align_jump
;
2416 const int align_jump_max_skip
;
2417 const int align_func
;
2420 static const struct ptt processor_target_table
[PROCESSOR_max
] =
2422 {&i386_cost
, 4, 3, 4, 3, 4},
2423 {&i486_cost
, 16, 15, 16, 15, 16},
2424 {&pentium_cost
, 16, 7, 16, 7, 16},
2425 {&pentiumpro_cost
, 16, 15, 16, 10, 16},
2426 {&geode_cost
, 0, 0, 0, 0, 0},
2427 {&k6_cost
, 32, 7, 32, 7, 32},
2428 {&athlon_cost
, 16, 7, 16, 7, 16},
2429 {&pentium4_cost
, 0, 0, 0, 0, 0},
2430 {&k8_cost
, 16, 7, 16, 7, 16},
2431 {&nocona_cost
, 0, 0, 0, 0, 0},
2433 {&core_cost
, 16, 10, 16, 10, 16},
2435 {&core_cost
, 16, 10, 16, 10, 16},
2437 {&core_cost
, 16, 10, 16, 10, 16},
2438 {&generic32_cost
, 16, 7, 16, 7, 16},
2439 {&generic64_cost
, 16, 10, 16, 10, 16},
2440 {&amdfam10_cost
, 32, 24, 32, 7, 32},
2441 {&bdver1_cost
, 32, 24, 32, 7, 32},
2442 {&bdver2_cost
, 32, 24, 32, 7, 32},
2443 {&bdver3_cost
, 32, 24, 32, 7, 32},
2444 {&btver1_cost
, 32, 24, 32, 7, 32},
2445 {&btver2_cost
, 32, 24, 32, 7, 32},
2446 {&atom_cost
, 16, 15, 16, 7, 16}
2449 static const char *const cpu_names
[TARGET_CPU_DEFAULT_max
] =
2483 gate_insert_vzeroupper (void)
2485 return TARGET_VZEROUPPER
;
2489 rest_of_handle_insert_vzeroupper (void)
2493 /* vzeroupper instructions are inserted immediately after reload to
2494 account for possible spills from 256bit registers. The pass
2495 reuses mode switching infrastructure by re-running mode insertion
2496 pass, so disable entities that have already been processed. */
2497 for (i
= 0; i
< MAX_386_ENTITIES
; i
++)
2498 ix86_optimize_mode_switching
[i
] = 0;
2500 ix86_optimize_mode_switching
[AVX_U128
] = 1;
2502 /* Call optimize_mode_switching. */
2503 pass_mode_switching
.pass
.execute ();
2507 struct rtl_opt_pass pass_insert_vzeroupper
=
2511 "vzeroupper", /* name */
2512 OPTGROUP_NONE
, /* optinfo_flags */
2513 gate_insert_vzeroupper
, /* gate */
2514 rest_of_handle_insert_vzeroupper
, /* execute */
2517 0, /* static_pass_number */
2518 TV_NONE
, /* tv_id */
2519 0, /* properties_required */
2520 0, /* properties_provided */
2521 0, /* properties_destroyed */
2522 0, /* todo_flags_start */
2523 TODO_df_finish
| TODO_verify_rtl_sharing
|
2524 0, /* todo_flags_finish */
2528 /* Return true if a red-zone is in use. */
2531 ix86_using_red_zone (void)
2533 return TARGET_RED_ZONE
&& !TARGET_64BIT_MS_ABI
;
2536 /* Return a string that documents the current -m options. The caller is
2537 responsible for freeing the string. */
2540 ix86_target_string (HOST_WIDE_INT isa
, int flags
, const char *arch
,
2541 const char *tune
, enum fpmath_unit fpmath
,
2544 struct ix86_target_opts
2546 const char *option
; /* option string */
2547 HOST_WIDE_INT mask
; /* isa mask options */
2550 /* This table is ordered so that options like -msse4.2 that imply
2551 preceding options while match those first. */
2552 static struct ix86_target_opts isa_opts
[] =
2554 { "-mfma4", OPTION_MASK_ISA_FMA4
},
2555 { "-mfma", OPTION_MASK_ISA_FMA
},
2556 { "-mxop", OPTION_MASK_ISA_XOP
},
2557 { "-mlwp", OPTION_MASK_ISA_LWP
},
2558 { "-msse4a", OPTION_MASK_ISA_SSE4A
},
2559 { "-msse4.2", OPTION_MASK_ISA_SSE4_2
},
2560 { "-msse4.1", OPTION_MASK_ISA_SSE4_1
},
2561 { "-mssse3", OPTION_MASK_ISA_SSSE3
},
2562 { "-msse3", OPTION_MASK_ISA_SSE3
},
2563 { "-msse2", OPTION_MASK_ISA_SSE2
},
2564 { "-msse", OPTION_MASK_ISA_SSE
},
2565 { "-m3dnow", OPTION_MASK_ISA_3DNOW
},
2566 { "-m3dnowa", OPTION_MASK_ISA_3DNOW_A
},
2567 { "-mmmx", OPTION_MASK_ISA_MMX
},
2568 { "-mabm", OPTION_MASK_ISA_ABM
},
2569 { "-mbmi", OPTION_MASK_ISA_BMI
},
2570 { "-mbmi2", OPTION_MASK_ISA_BMI2
},
2571 { "-mlzcnt", OPTION_MASK_ISA_LZCNT
},
2572 { "-mhle", OPTION_MASK_ISA_HLE
},
2573 { "-mfxsr", OPTION_MASK_ISA_FXSR
},
2574 { "-mrdseed", OPTION_MASK_ISA_RDSEED
},
2575 { "-mprfchw", OPTION_MASK_ISA_PRFCHW
},
2576 { "-madx", OPTION_MASK_ISA_ADX
},
2577 { "-mtbm", OPTION_MASK_ISA_TBM
},
2578 { "-mpopcnt", OPTION_MASK_ISA_POPCNT
},
2579 { "-mmovbe", OPTION_MASK_ISA_MOVBE
},
2580 { "-mcrc32", OPTION_MASK_ISA_CRC32
},
2581 { "-maes", OPTION_MASK_ISA_AES
},
2582 { "-mpclmul", OPTION_MASK_ISA_PCLMUL
},
2583 { "-mfsgsbase", OPTION_MASK_ISA_FSGSBASE
},
2584 { "-mrdrnd", OPTION_MASK_ISA_RDRND
},
2585 { "-mf16c", OPTION_MASK_ISA_F16C
},
2586 { "-mrtm", OPTION_MASK_ISA_RTM
},
2587 { "-mxsave", OPTION_MASK_ISA_XSAVE
},
2588 { "-mxsaveopt", OPTION_MASK_ISA_XSAVEOPT
},
2592 static struct ix86_target_opts flag_opts
[] =
2594 { "-m128bit-long-double", MASK_128BIT_LONG_DOUBLE
},
2595 { "-mlong-double-64", MASK_LONG_DOUBLE_64
},
2596 { "-m80387", MASK_80387
},
2597 { "-maccumulate-outgoing-args", MASK_ACCUMULATE_OUTGOING_ARGS
},
2598 { "-malign-double", MASK_ALIGN_DOUBLE
},
2599 { "-mcld", MASK_CLD
},
2600 { "-mfp-ret-in-387", MASK_FLOAT_RETURNS
},
2601 { "-mieee-fp", MASK_IEEE_FP
},
2602 { "-minline-all-stringops", MASK_INLINE_ALL_STRINGOPS
},
2603 { "-minline-stringops-dynamically", MASK_INLINE_STRINGOPS_DYNAMICALLY
},
2604 { "-mms-bitfields", MASK_MS_BITFIELD_LAYOUT
},
2605 { "-mno-align-stringops", MASK_NO_ALIGN_STRINGOPS
},
2606 { "-mno-fancy-math-387", MASK_NO_FANCY_MATH_387
},
2607 { "-mno-push-args", MASK_NO_PUSH_ARGS
},
2608 { "-mno-red-zone", MASK_NO_RED_ZONE
},
2609 { "-momit-leaf-frame-pointer", MASK_OMIT_LEAF_FRAME_POINTER
},
2610 { "-mrecip", MASK_RECIP
},
2611 { "-mrtd", MASK_RTD
},
2612 { "-msseregparm", MASK_SSEREGPARM
},
2613 { "-mstack-arg-probe", MASK_STACK_PROBE
},
2614 { "-mtls-direct-seg-refs", MASK_TLS_DIRECT_SEG_REFS
},
2615 { "-mvect8-ret-in-mem", MASK_VECT8_RETURNS
},
2616 { "-m8bit-idiv", MASK_USE_8BIT_IDIV
},
2617 { "-mvzeroupper", MASK_VZEROUPPER
},
2618 { "-mavx256-split-unaligned-load", MASK_AVX256_SPLIT_UNALIGNED_LOAD
},
2619 { "-mavx256-split-unaligned-store", MASK_AVX256_SPLIT_UNALIGNED_STORE
},
2620 { "-mprefer-avx128", MASK_PREFER_AVX128
},
2623 const char *opts
[ARRAY_SIZE (isa_opts
) + ARRAY_SIZE (flag_opts
) + 6][2];
2626 char target_other
[40];
2636 memset (opts
, '\0', sizeof (opts
));
2638 /* Add -march= option. */
2641 opts
[num
][0] = "-march=";
2642 opts
[num
++][1] = arch
;
2645 /* Add -mtune= option. */
2648 opts
[num
][0] = "-mtune=";
2649 opts
[num
++][1] = tune
;
2652 /* Add -m32/-m64/-mx32. */
2653 if ((isa
& OPTION_MASK_ISA_64BIT
) != 0)
2655 if ((isa
& OPTION_MASK_ABI_64
) != 0)
2659 isa
&= ~ (OPTION_MASK_ISA_64BIT
2660 | OPTION_MASK_ABI_64
2661 | OPTION_MASK_ABI_X32
);
2665 opts
[num
++][0] = abi
;
2667 /* Pick out the options in isa options. */
2668 for (i
= 0; i
< ARRAY_SIZE (isa_opts
); i
++)
2670 if ((isa
& isa_opts
[i
].mask
) != 0)
2672 opts
[num
++][0] = isa_opts
[i
].option
;
2673 isa
&= ~ isa_opts
[i
].mask
;
2677 if (isa
&& add_nl_p
)
2679 opts
[num
++][0] = isa_other
;
2680 sprintf (isa_other
, "(other isa: %#" HOST_WIDE_INT_PRINT
"x)",
2684 /* Add flag options. */
2685 for (i
= 0; i
< ARRAY_SIZE (flag_opts
); i
++)
2687 if ((flags
& flag_opts
[i
].mask
) != 0)
2689 opts
[num
++][0] = flag_opts
[i
].option
;
2690 flags
&= ~ flag_opts
[i
].mask
;
2694 if (flags
&& add_nl_p
)
2696 opts
[num
++][0] = target_other
;
2697 sprintf (target_other
, "(other flags: %#x)", flags
);
2700 /* Add -fpmath= option. */
2703 opts
[num
][0] = "-mfpmath=";
2704 switch ((int) fpmath
)
2707 opts
[num
++][1] = "387";
2711 opts
[num
++][1] = "sse";
2714 case FPMATH_387
| FPMATH_SSE
:
2715 opts
[num
++][1] = "sse+387";
2727 gcc_assert (num
< ARRAY_SIZE (opts
));
2729 /* Size the string. */
2731 sep_len
= (add_nl_p
) ? 3 : 1;
2732 for (i
= 0; i
< num
; i
++)
2735 for (j
= 0; j
< 2; j
++)
2737 len
+= strlen (opts
[i
][j
]);
2740 /* Build the string. */
2741 ret
= ptr
= (char *) xmalloc (len
);
2744 for (i
= 0; i
< num
; i
++)
2748 for (j
= 0; j
< 2; j
++)
2749 len2
[j
] = (opts
[i
][j
]) ? strlen (opts
[i
][j
]) : 0;
2756 if (add_nl_p
&& line_len
+ len2
[0] + len2
[1] > 70)
2764 for (j
= 0; j
< 2; j
++)
2767 memcpy (ptr
, opts
[i
][j
], len2
[j
]);
2769 line_len
+= len2
[j
];
2774 gcc_assert (ret
+ len
>= ptr
);
2779 /* Return true, if profiling code should be emitted before
2780 prologue. Otherwise it returns false.
2781 Note: For x86 with "hotfix" it is sorried. */
2783 ix86_profile_before_prologue (void)
2785 return flag_fentry
!= 0;
2788 /* Function that is callable from the debugger to print the current
2791 ix86_debug_options (void)
2793 char *opts
= ix86_target_string (ix86_isa_flags
, target_flags
,
2794 ix86_arch_string
, ix86_tune_string
,
2799 fprintf (stderr
, "%s\n\n", opts
);
2803 fputs ("<no options>\n\n", stderr
);
2808 /* Override various settings based on options. If MAIN_ARGS_P, the
2809 options are from the command line, otherwise they are from
2813 ix86_option_override_internal (bool main_args_p
)
2816 unsigned int ix86_arch_mask
, ix86_tune_mask
;
2817 const bool ix86_tune_specified
= (ix86_tune_string
!= NULL
);
2822 #define PTA_3DNOW (HOST_WIDE_INT_1 << 0)
2823 #define PTA_3DNOW_A (HOST_WIDE_INT_1 << 1)
2824 #define PTA_64BIT (HOST_WIDE_INT_1 << 2)
2825 #define PTA_ABM (HOST_WIDE_INT_1 << 3)
2826 #define PTA_AES (HOST_WIDE_INT_1 << 4)
2827 #define PTA_AVX (HOST_WIDE_INT_1 << 5)
2828 #define PTA_BMI (HOST_WIDE_INT_1 << 6)
2829 #define PTA_CX16 (HOST_WIDE_INT_1 << 7)
2830 #define PTA_F16C (HOST_WIDE_INT_1 << 8)
2831 #define PTA_FMA (HOST_WIDE_INT_1 << 9)
2832 #define PTA_FMA4 (HOST_WIDE_INT_1 << 10)
2833 #define PTA_FSGSBASE (HOST_WIDE_INT_1 << 11)
2834 #define PTA_LWP (HOST_WIDE_INT_1 << 12)
2835 #define PTA_LZCNT (HOST_WIDE_INT_1 << 13)
2836 #define PTA_MMX (HOST_WIDE_INT_1 << 14)
2837 #define PTA_MOVBE (HOST_WIDE_INT_1 << 15)
2838 #define PTA_NO_SAHF (HOST_WIDE_INT_1 << 16)
2839 #define PTA_PCLMUL (HOST_WIDE_INT_1 << 17)
2840 #define PTA_POPCNT (HOST_WIDE_INT_1 << 18)
2841 #define PTA_PREFETCH_SSE (HOST_WIDE_INT_1 << 19)
2842 #define PTA_RDRND (HOST_WIDE_INT_1 << 20)
2843 #define PTA_SSE (HOST_WIDE_INT_1 << 21)
2844 #define PTA_SSE2 (HOST_WIDE_INT_1 << 22)
2845 #define PTA_SSE3 (HOST_WIDE_INT_1 << 23)
2846 #define PTA_SSE4_1 (HOST_WIDE_INT_1 << 24)
2847 #define PTA_SSE4_2 (HOST_WIDE_INT_1 << 25)
2848 #define PTA_SSE4A (HOST_WIDE_INT_1 << 26)
2849 #define PTA_SSSE3 (HOST_WIDE_INT_1 << 27)
2850 #define PTA_TBM (HOST_WIDE_INT_1 << 28)
2851 #define PTA_XOP (HOST_WIDE_INT_1 << 29)
2852 #define PTA_AVX2 (HOST_WIDE_INT_1 << 30)
2853 #define PTA_BMI2 (HOST_WIDE_INT_1 << 31)
2854 #define PTA_RTM (HOST_WIDE_INT_1 << 32)
2855 #define PTA_HLE (HOST_WIDE_INT_1 << 33)
2856 #define PTA_PRFCHW (HOST_WIDE_INT_1 << 34)
2857 #define PTA_RDSEED (HOST_WIDE_INT_1 << 35)
2858 #define PTA_ADX (HOST_WIDE_INT_1 << 36)
2859 #define PTA_FXSR (HOST_WIDE_INT_1 << 37)
2860 #define PTA_XSAVE (HOST_WIDE_INT_1 << 38)
2861 #define PTA_XSAVEOPT (HOST_WIDE_INT_1 << 39)
2863 /* if this reaches 64, need to widen struct pta flags below */
2867 const char *const name
; /* processor name or nickname. */
2868 const enum processor_type processor
;
2869 const enum attr_cpu schedule
;
2870 const unsigned HOST_WIDE_INT flags
;
2872 const processor_alias_table
[] =
2874 {"i386", PROCESSOR_I386
, CPU_NONE
, 0},
2875 {"i486", PROCESSOR_I486
, CPU_NONE
, 0},
2876 {"i586", PROCESSOR_PENTIUM
, CPU_PENTIUM
, 0},
2877 {"pentium", PROCESSOR_PENTIUM
, CPU_PENTIUM
, 0},
2878 {"pentium-mmx", PROCESSOR_PENTIUM
, CPU_PENTIUM
, PTA_MMX
},
2879 {"winchip-c6", PROCESSOR_I486
, CPU_NONE
, PTA_MMX
},
2880 {"winchip2", PROCESSOR_I486
, CPU_NONE
, PTA_MMX
| PTA_3DNOW
},
2881 {"c3", PROCESSOR_I486
, CPU_NONE
, PTA_MMX
| PTA_3DNOW
},
2882 {"c3-2", PROCESSOR_PENTIUMPRO
, CPU_PENTIUMPRO
, PTA_MMX
| PTA_SSE
},
2883 {"i686", PROCESSOR_PENTIUMPRO
, CPU_PENTIUMPRO
, 0},
2884 {"pentiumpro", PROCESSOR_PENTIUMPRO
, CPU_PENTIUMPRO
, 0},
2885 {"pentium2", PROCESSOR_PENTIUMPRO
, CPU_PENTIUMPRO
, PTA_MMX
| PTA_FXSR
},
2886 {"pentium3", PROCESSOR_PENTIUMPRO
, CPU_PENTIUMPRO
,
2887 PTA_MMX
| PTA_SSE
| PTA_FXSR
},
2888 {"pentium3m", PROCESSOR_PENTIUMPRO
, CPU_PENTIUMPRO
,
2889 PTA_MMX
| PTA_SSE
| PTA_FXSR
},
2890 {"pentium-m", PROCESSOR_PENTIUMPRO
, CPU_PENTIUMPRO
,
2891 PTA_MMX
| PTA_SSE
| PTA_SSE2
| PTA_FXSR
},
2892 {"pentium4", PROCESSOR_PENTIUM4
, CPU_NONE
,
2893 PTA_MMX
|PTA_SSE
| PTA_SSE2
| PTA_FXSR
},
2894 {"pentium4m", PROCESSOR_PENTIUM4
, CPU_NONE
,
2895 PTA_MMX
| PTA_SSE
| PTA_SSE2
| PTA_FXSR
},
2896 {"prescott", PROCESSOR_NOCONA
, CPU_NONE
,
2897 PTA_MMX
| PTA_SSE
| PTA_SSE2
| PTA_SSE3
| PTA_FXSR
},
2898 {"nocona", PROCESSOR_NOCONA
, CPU_NONE
,
2899 PTA_64BIT
| PTA_MMX
| PTA_SSE
| PTA_SSE2
| PTA_SSE3
2900 | PTA_CX16
| PTA_NO_SAHF
| PTA_FXSR
},
2901 {"core2", PROCESSOR_CORE2
, CPU_CORE2
,
2902 PTA_64BIT
| PTA_MMX
| PTA_SSE
| PTA_SSE2
| PTA_SSE3
2903 | PTA_SSSE3
| PTA_CX16
| PTA_FXSR
},
2904 {"corei7", PROCESSOR_COREI7
, CPU_COREI7
,
2905 PTA_64BIT
| PTA_MMX
| PTA_SSE
| PTA_SSE2
| PTA_SSE3
2906 | PTA_SSSE3
| PTA_SSE4_1
| PTA_SSE4_2
| PTA_CX16
| PTA_FXSR
},
2907 {"corei7-avx", PROCESSOR_COREI7
, CPU_COREI7
,
2908 PTA_64BIT
| PTA_MMX
| PTA_SSE
| PTA_SSE2
| PTA_SSE3
2909 | PTA_SSSE3
| PTA_SSE4_1
| PTA_SSE4_2
| PTA_AVX
2910 | PTA_CX16
| PTA_POPCNT
| PTA_AES
| PTA_PCLMUL
2911 | PTA_FXSR
| PTA_XSAVE
| PTA_XSAVEOPT
},
2912 {"core-avx-i", PROCESSOR_COREI7
, CPU_COREI7
,
2913 PTA_64BIT
| PTA_MMX
| PTA_SSE
| PTA_SSE2
| PTA_SSE3
2914 | PTA_SSSE3
| PTA_SSE4_1
| PTA_SSE4_2
| PTA_AVX
2915 | PTA_CX16
| PTA_POPCNT
| PTA_AES
| PTA_PCLMUL
| PTA_FSGSBASE
2916 | PTA_RDRND
| PTA_F16C
| PTA_FXSR
| PTA_XSAVE
| PTA_XSAVEOPT
},
2917 {"core-avx2", PROCESSOR_HASWELL
, CPU_COREI7
,
2918 PTA_64BIT
| PTA_MMX
| PTA_SSE
| PTA_SSE2
| PTA_SSE3
2919 | PTA_SSSE3
| PTA_SSE4_1
| PTA_SSE4_2
| PTA_AVX
| PTA_AVX2
2920 | PTA_CX16
| PTA_POPCNT
| PTA_AES
| PTA_PCLMUL
| PTA_FSGSBASE
2921 | PTA_RDRND
| PTA_F16C
| PTA_BMI
| PTA_BMI2
| PTA_LZCNT
2922 | PTA_FMA
| PTA_MOVBE
| PTA_RTM
| PTA_HLE
| PTA_FXSR
| PTA_XSAVE
2924 {"atom", PROCESSOR_ATOM
, CPU_ATOM
,
2925 PTA_64BIT
| PTA_MMX
| PTA_SSE
| PTA_SSE2
| PTA_SSE3
2926 | PTA_SSSE3
| PTA_CX16
| PTA_MOVBE
| PTA_FXSR
},
2927 {"geode", PROCESSOR_GEODE
, CPU_GEODE
,
2928 PTA_MMX
| PTA_3DNOW
| PTA_3DNOW_A
| PTA_PREFETCH_SSE
},
2929 {"k6", PROCESSOR_K6
, CPU_K6
, PTA_MMX
},
2930 {"k6-2", PROCESSOR_K6
, CPU_K6
, PTA_MMX
| PTA_3DNOW
},
2931 {"k6-3", PROCESSOR_K6
, CPU_K6
, PTA_MMX
| PTA_3DNOW
},
2932 {"athlon", PROCESSOR_ATHLON
, CPU_ATHLON
,
2933 PTA_MMX
| PTA_3DNOW
| PTA_3DNOW_A
| PTA_PREFETCH_SSE
},
2934 {"athlon-tbird", PROCESSOR_ATHLON
, CPU_ATHLON
,
2935 PTA_MMX
| PTA_3DNOW
| PTA_3DNOW_A
| PTA_PREFETCH_SSE
},
2936 {"athlon-4", PROCESSOR_ATHLON
, CPU_ATHLON
,
2937 PTA_MMX
| PTA_3DNOW
| PTA_3DNOW_A
| PTA_SSE
},
2938 {"athlon-xp", PROCESSOR_ATHLON
, CPU_ATHLON
,
2939 PTA_MMX
| PTA_3DNOW
| PTA_3DNOW_A
| PTA_SSE
},
2940 {"athlon-mp", PROCESSOR_ATHLON
, CPU_ATHLON
,
2941 PTA_MMX
| PTA_3DNOW
| PTA_3DNOW_A
| PTA_SSE
},
2942 {"x86-64", PROCESSOR_K8
, CPU_K8
,
2943 PTA_64BIT
| PTA_MMX
| PTA_SSE
| PTA_SSE2
| PTA_NO_SAHF
},
2944 {"k8", PROCESSOR_K8
, CPU_K8
,
2945 PTA_64BIT
| PTA_MMX
| PTA_3DNOW
| PTA_3DNOW_A
| PTA_SSE
2946 | PTA_SSE2
| PTA_NO_SAHF
},
2947 {"k8-sse3", PROCESSOR_K8
, CPU_K8
,
2948 PTA_64BIT
| PTA_MMX
| PTA_3DNOW
| PTA_3DNOW_A
| PTA_SSE
2949 | PTA_SSE2
| PTA_SSE3
| PTA_NO_SAHF
},
2950 {"opteron", PROCESSOR_K8
, CPU_K8
,
2951 PTA_64BIT
| PTA_MMX
| PTA_3DNOW
| PTA_3DNOW_A
| PTA_SSE
2952 | PTA_SSE2
| PTA_NO_SAHF
},
2953 {"opteron-sse3", PROCESSOR_K8
, CPU_K8
,
2954 PTA_64BIT
| PTA_MMX
| PTA_3DNOW
| PTA_3DNOW_A
| PTA_SSE
2955 | PTA_SSE2
| PTA_SSE3
| PTA_NO_SAHF
},
2956 {"athlon64", PROCESSOR_K8
, CPU_K8
,
2957 PTA_64BIT
| PTA_MMX
| PTA_3DNOW
| PTA_3DNOW_A
| PTA_SSE
2958 | PTA_SSE2
| PTA_NO_SAHF
},
2959 {"athlon64-sse3", PROCESSOR_K8
, CPU_K8
,
2960 PTA_64BIT
| PTA_MMX
| PTA_3DNOW
| PTA_3DNOW_A
| PTA_SSE
2961 | PTA_SSE2
| PTA_SSE3
| PTA_NO_SAHF
},
2962 {"athlon-fx", PROCESSOR_K8
, CPU_K8
,
2963 PTA_64BIT
| PTA_MMX
| PTA_3DNOW
| PTA_3DNOW_A
| PTA_SSE
2964 | PTA_SSE2
| PTA_NO_SAHF
},
2965 {"amdfam10", PROCESSOR_AMDFAM10
, CPU_AMDFAM10
,
2966 PTA_64BIT
| PTA_MMX
| PTA_3DNOW
| PTA_3DNOW_A
| PTA_SSE
2967 | PTA_SSE2
| PTA_SSE3
| PTA_SSE4A
| PTA_CX16
| PTA_ABM
},
2968 {"barcelona", PROCESSOR_AMDFAM10
, CPU_AMDFAM10
,
2969 PTA_64BIT
| PTA_MMX
| PTA_3DNOW
| PTA_3DNOW_A
| PTA_SSE
2970 | PTA_SSE2
| PTA_SSE3
| PTA_SSE4A
| PTA_CX16
| PTA_ABM
},
2971 {"bdver1", PROCESSOR_BDVER1
, CPU_BDVER1
,
2972 PTA_64BIT
| PTA_MMX
| PTA_SSE
| PTA_SSE2
| PTA_SSE3
2973 | PTA_SSE4A
| PTA_CX16
| PTA_ABM
| PTA_SSSE3
| PTA_SSE4_1
2974 | PTA_SSE4_2
| PTA_AES
| PTA_PCLMUL
| PTA_AVX
| PTA_FMA4
2975 | PTA_XOP
| PTA_LWP
| PTA_PRFCHW
| PTA_FXSR
| PTA_XSAVE
},
2976 {"bdver2", PROCESSOR_BDVER2
, CPU_BDVER2
,
2977 PTA_64BIT
| PTA_MMX
| PTA_SSE
| PTA_SSE2
| PTA_SSE3
2978 | PTA_SSE4A
| PTA_CX16
| PTA_ABM
| PTA_SSSE3
| PTA_SSE4_1
2979 | PTA_SSE4_2
| PTA_AES
| PTA_PCLMUL
| PTA_AVX
| PTA_FMA4
2980 | PTA_XOP
| PTA_LWP
| PTA_BMI
| PTA_TBM
| PTA_F16C
2981 | PTA_FMA
| PTA_PRFCHW
| PTA_FXSR
| PTA_XSAVE
},
2982 {"bdver3", PROCESSOR_BDVER3
, CPU_BDVER3
,
2983 PTA_64BIT
| PTA_MMX
| PTA_SSE
| PTA_SSE2
| PTA_SSE3
2984 | PTA_SSE4A
| PTA_CX16
| PTA_ABM
| PTA_SSSE3
| PTA_SSE4_1
2985 | PTA_SSE4_2
| PTA_AES
| PTA_PCLMUL
| PTA_AVX
2986 | PTA_XOP
| PTA_LWP
| PTA_BMI
| PTA_TBM
| PTA_F16C
2987 | PTA_FMA
| PTA_PRFCHW
| PTA_FXSR
| PTA_XSAVE
2989 {"btver1", PROCESSOR_BTVER1
, CPU_GENERIC64
,
2990 PTA_64BIT
| PTA_MMX
| PTA_SSE
| PTA_SSE2
| PTA_SSE3
2991 | PTA_SSSE3
| PTA_SSE4A
|PTA_ABM
| PTA_CX16
| PTA_PRFCHW
2992 | PTA_FXSR
| PTA_XSAVE
},
2993 {"btver2", PROCESSOR_BTVER2
, CPU_BTVER2
,
2994 PTA_64BIT
| PTA_MMX
| PTA_SSE
| PTA_SSE2
| PTA_SSE3
2995 | PTA_SSSE3
| PTA_SSE4A
|PTA_ABM
| PTA_CX16
| PTA_SSE4_1
2996 | PTA_SSE4_2
| PTA_AES
| PTA_PCLMUL
| PTA_AVX
2997 | PTA_BMI
| PTA_F16C
| PTA_MOVBE
| PTA_PRFCHW
2998 | PTA_FXSR
| PTA_XSAVE
| PTA_XSAVEOPT
},
3000 {"generic32", PROCESSOR_GENERIC32
, CPU_PENTIUMPRO
,
3001 PTA_HLE
/* flags are only used for -march switch. */ },
3002 {"generic64", PROCESSOR_GENERIC64
, CPU_GENERIC64
,
3004 | PTA_HLE
/* flags are only used for -march switch. */ },
3007 /* -mrecip options. */
3010 const char *string
; /* option name */
3011 unsigned int mask
; /* mask bits to set */
3013 const recip_options
[] =
3015 { "all", RECIP_MASK_ALL
},
3016 { "none", RECIP_MASK_NONE
},
3017 { "div", RECIP_MASK_DIV
},
3018 { "sqrt", RECIP_MASK_SQRT
},
3019 { "vec-div", RECIP_MASK_VEC_DIV
},
3020 { "vec-sqrt", RECIP_MASK_VEC_SQRT
},
3023 int const pta_size
= ARRAY_SIZE (processor_alias_table
);
3025 /* Set up prefix/suffix so the error messages refer to either the command
3026 line argument, or the attribute(target). */
3035 prefix
= "option(\"";
3040 /* Turn off both OPTION_MASK_ABI_64 and OPTION_MASK_ABI_X32 if
3041 TARGET_64BIT_DEFAULT is true and TARGET_64BIT is false. */
3042 if (TARGET_64BIT_DEFAULT
&& !TARGET_64BIT
)
3043 ix86_isa_flags
&= ~(OPTION_MASK_ABI_64
| OPTION_MASK_ABI_X32
);
3044 #ifdef TARGET_BI_ARCH
3047 #if TARGET_BI_ARCH == 1
3048 /* When TARGET_BI_ARCH == 1, by default, OPTION_MASK_ABI_64
3049 is on and OPTION_MASK_ABI_X32 is off. We turn off
3050 OPTION_MASK_ABI_64 if OPTION_MASK_ABI_X32 is turned on by
3053 ix86_isa_flags
&= ~OPTION_MASK_ABI_64
;
3055 /* When TARGET_BI_ARCH == 2, by default, OPTION_MASK_ABI_X32 is
3056 on and OPTION_MASK_ABI_64 is off. We turn off
3057 OPTION_MASK_ABI_X32 if OPTION_MASK_ABI_64 is turned on by
3060 ix86_isa_flags
&= ~OPTION_MASK_ABI_X32
;
3067 /* Always turn on OPTION_MASK_ISA_64BIT and turn off
3068 OPTION_MASK_ABI_64 for TARGET_X32. */
3069 ix86_isa_flags
|= OPTION_MASK_ISA_64BIT
;
3070 ix86_isa_flags
&= ~OPTION_MASK_ABI_64
;
3072 else if (TARGET_LP64
)
3074 /* Always turn on OPTION_MASK_ISA_64BIT and turn off
3075 OPTION_MASK_ABI_X32 for TARGET_LP64. */
3076 ix86_isa_flags
|= OPTION_MASK_ISA_64BIT
;
3077 ix86_isa_flags
&= ~OPTION_MASK_ABI_X32
;
3080 #ifdef SUBTARGET_OVERRIDE_OPTIONS
3081 SUBTARGET_OVERRIDE_OPTIONS
;
3084 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
3085 SUBSUBTARGET_OVERRIDE_OPTIONS
;
3088 /* -fPIC is the default for x86_64. */
3089 if (TARGET_MACHO
&& TARGET_64BIT
)
3092 /* Need to check -mtune=generic first. */
3093 if (ix86_tune_string
)
3095 if (!strcmp (ix86_tune_string
, "generic")
3096 || !strcmp (ix86_tune_string
, "i686")
3097 /* As special support for cross compilers we read -mtune=native
3098 as -mtune=generic. With native compilers we won't see the
3099 -mtune=native, as it was changed by the driver. */
3100 || !strcmp (ix86_tune_string
, "native"))
3103 ix86_tune_string
= "generic64";
3105 ix86_tune_string
= "generic32";
3107 /* If this call is for setting the option attribute, allow the
3108 generic32/generic64 that was previously set. */
3109 else if (!main_args_p
3110 && (!strcmp (ix86_tune_string
, "generic32")
3111 || !strcmp (ix86_tune_string
, "generic64")))
3113 else if (!strncmp (ix86_tune_string
, "generic", 7))
3114 error ("bad value (%s) for %stune=%s %s",
3115 ix86_tune_string
, prefix
, suffix
, sw
);
3116 else if (!strcmp (ix86_tune_string
, "x86-64"))
3117 warning (OPT_Wdeprecated
, "%stune=x86-64%s is deprecated; use "
3118 "%stune=k8%s or %stune=generic%s instead as appropriate",
3119 prefix
, suffix
, prefix
, suffix
, prefix
, suffix
);
3123 if (ix86_arch_string
)
3124 ix86_tune_string
= ix86_arch_string
;
3125 if (!ix86_tune_string
)
3127 ix86_tune_string
= cpu_names
[TARGET_CPU_DEFAULT
];
3128 ix86_tune_defaulted
= 1;
3131 /* ix86_tune_string is set to ix86_arch_string or defaulted. We
3132 need to use a sensible tune option. */
3133 if (!strcmp (ix86_tune_string
, "generic")
3134 || !strcmp (ix86_tune_string
, "x86-64")
3135 || !strcmp (ix86_tune_string
, "i686"))
3138 ix86_tune_string
= "generic64";
3140 ix86_tune_string
= "generic32";
3144 if (ix86_stringop_alg
== rep_prefix_8_byte
&& !TARGET_64BIT
)
3146 /* rep; movq isn't available in 32-bit code. */
3147 error ("-mstringop-strategy=rep_8byte not supported for 32-bit code");
3148 ix86_stringop_alg
= no_stringop
;
3151 if (!ix86_arch_string
)
3152 ix86_arch_string
= TARGET_64BIT
? "x86-64" : SUBTARGET32_DEFAULT_CPU
;
3154 ix86_arch_specified
= 1;
3156 if (global_options_set
.x_ix86_pmode
)
3158 if ((TARGET_LP64
&& ix86_pmode
== PMODE_SI
)
3159 || (!TARGET_64BIT
&& ix86_pmode
== PMODE_DI
))
3160 error ("address mode %qs not supported in the %s bit mode",
3161 TARGET_64BIT
? "short" : "long",
3162 TARGET_64BIT
? "64" : "32");
3165 ix86_pmode
= TARGET_LP64
? PMODE_DI
: PMODE_SI
;
3167 if (!global_options_set
.x_ix86_abi
)
3168 ix86_abi
= DEFAULT_ABI
;
3170 if (global_options_set
.x_ix86_cmodel
)
3172 switch (ix86_cmodel
)
3177 ix86_cmodel
= CM_SMALL_PIC
;
3179 error ("code model %qs not supported in the %s bit mode",
3186 ix86_cmodel
= CM_MEDIUM_PIC
;
3188 error ("code model %qs not supported in the %s bit mode",
3190 else if (TARGET_X32
)
3191 error ("code model %qs not supported in x32 mode",
3198 ix86_cmodel
= CM_LARGE_PIC
;
3200 error ("code model %qs not supported in the %s bit mode",
3202 else if (TARGET_X32
)
3203 error ("code model %qs not supported in x32 mode",
3209 error ("code model %s does not support PIC mode", "32");
3211 error ("code model %qs not supported in the %s bit mode",
3218 error ("code model %s does not support PIC mode", "kernel");
3219 ix86_cmodel
= CM_32
;
3222 error ("code model %qs not supported in the %s bit mode",
3232 /* For TARGET_64BIT and MS_ABI, force pic on, in order to enable the
3233 use of rip-relative addressing. This eliminates fixups that
3234 would otherwise be needed if this object is to be placed in a
3235 DLL, and is essentially just as efficient as direct addressing. */
3236 if (TARGET_64BIT
&& DEFAULT_ABI
== MS_ABI
)
3237 ix86_cmodel
= CM_SMALL_PIC
, flag_pic
= 1;
3238 else if (TARGET_64BIT
&& TARGET_RDOS
)
3239 ix86_cmodel
= CM_MEDIUM_PIC
, flag_pic
= 1;
3240 else if (TARGET_64BIT
)
3241 ix86_cmodel
= flag_pic
? CM_SMALL_PIC
: CM_SMALL
;
3243 ix86_cmodel
= CM_32
;
3245 if (TARGET_MACHO
&& ix86_asm_dialect
== ASM_INTEL
)
3247 error ("-masm=intel not supported in this configuration");
3248 ix86_asm_dialect
= ASM_ATT
;
3250 if ((TARGET_64BIT
!= 0) != ((ix86_isa_flags
& OPTION_MASK_ISA_64BIT
) != 0))
3251 sorry ("%i-bit mode not compiled in",
3252 (ix86_isa_flags
& OPTION_MASK_ISA_64BIT
) ? 64 : 32);
3254 for (i
= 0; i
< pta_size
; i
++)
3255 if (! strcmp (ix86_arch_string
, processor_alias_table
[i
].name
))
3257 ix86_schedule
= processor_alias_table
[i
].schedule
;
3258 ix86_arch
= processor_alias_table
[i
].processor
;
3259 /* Default cpu tuning to the architecture. */
3260 ix86_tune
= ix86_arch
;
3262 if (TARGET_64BIT
&& !(processor_alias_table
[i
].flags
& PTA_64BIT
))
3263 error ("CPU you selected does not support x86-64 "
3266 if (processor_alias_table
[i
].flags
& PTA_MMX
3267 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_MMX
))
3268 ix86_isa_flags
|= OPTION_MASK_ISA_MMX
;
3269 if (processor_alias_table
[i
].flags
& PTA_3DNOW
3270 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_3DNOW
))
3271 ix86_isa_flags
|= OPTION_MASK_ISA_3DNOW
;
3272 if (processor_alias_table
[i
].flags
& PTA_3DNOW_A
3273 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_3DNOW_A
))
3274 ix86_isa_flags
|= OPTION_MASK_ISA_3DNOW_A
;
3275 if (processor_alias_table
[i
].flags
& PTA_SSE
3276 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_SSE
))
3277 ix86_isa_flags
|= OPTION_MASK_ISA_SSE
;
3278 if (processor_alias_table
[i
].flags
& PTA_SSE2
3279 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_SSE2
))
3280 ix86_isa_flags
|= OPTION_MASK_ISA_SSE2
;
3281 if (processor_alias_table
[i
].flags
& PTA_SSE3
3282 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_SSE3
))
3283 ix86_isa_flags
|= OPTION_MASK_ISA_SSE3
;
3284 if (processor_alias_table
[i
].flags
& PTA_SSSE3
3285 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_SSSE3
))
3286 ix86_isa_flags
|= OPTION_MASK_ISA_SSSE3
;
3287 if (processor_alias_table
[i
].flags
& PTA_SSE4_1
3288 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_SSE4_1
))
3289 ix86_isa_flags
|= OPTION_MASK_ISA_SSE4_1
;
3290 if (processor_alias_table
[i
].flags
& PTA_SSE4_2
3291 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_SSE4_2
))
3292 ix86_isa_flags
|= OPTION_MASK_ISA_SSE4_2
;
3293 if (processor_alias_table
[i
].flags
& PTA_AVX
3294 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_AVX
))
3295 ix86_isa_flags
|= OPTION_MASK_ISA_AVX
;
3296 if (processor_alias_table
[i
].flags
& PTA_AVX2
3297 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_AVX2
))
3298 ix86_isa_flags
|= OPTION_MASK_ISA_AVX2
;
3299 if (processor_alias_table
[i
].flags
& PTA_FMA
3300 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_FMA
))
3301 ix86_isa_flags
|= OPTION_MASK_ISA_FMA
;
3302 if (processor_alias_table
[i
].flags
& PTA_SSE4A
3303 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_SSE4A
))
3304 ix86_isa_flags
|= OPTION_MASK_ISA_SSE4A
;
3305 if (processor_alias_table
[i
].flags
& PTA_FMA4
3306 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_FMA4
))
3307 ix86_isa_flags
|= OPTION_MASK_ISA_FMA4
;
3308 if (processor_alias_table
[i
].flags
& PTA_XOP
3309 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_XOP
))
3310 ix86_isa_flags
|= OPTION_MASK_ISA_XOP
;
3311 if (processor_alias_table
[i
].flags
& PTA_LWP
3312 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_LWP
))
3313 ix86_isa_flags
|= OPTION_MASK_ISA_LWP
;
3314 if (processor_alias_table
[i
].flags
& PTA_ABM
3315 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_ABM
))
3316 ix86_isa_flags
|= OPTION_MASK_ISA_ABM
;
3317 if (processor_alias_table
[i
].flags
& PTA_BMI
3318 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_BMI
))
3319 ix86_isa_flags
|= OPTION_MASK_ISA_BMI
;
3320 if (processor_alias_table
[i
].flags
& (PTA_LZCNT
| PTA_ABM
)
3321 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_LZCNT
))
3322 ix86_isa_flags
|= OPTION_MASK_ISA_LZCNT
;
3323 if (processor_alias_table
[i
].flags
& PTA_TBM
3324 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_TBM
))
3325 ix86_isa_flags
|= OPTION_MASK_ISA_TBM
;
3326 if (processor_alias_table
[i
].flags
& PTA_BMI2
3327 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_BMI2
))
3328 ix86_isa_flags
|= OPTION_MASK_ISA_BMI2
;
3329 if (processor_alias_table
[i
].flags
& PTA_CX16
3330 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_CX16
))
3331 ix86_isa_flags
|= OPTION_MASK_ISA_CX16
;
3332 if (processor_alias_table
[i
].flags
& (PTA_POPCNT
| PTA_ABM
)
3333 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_POPCNT
))
3334 ix86_isa_flags
|= OPTION_MASK_ISA_POPCNT
;
3335 if (!(TARGET_64BIT
&& (processor_alias_table
[i
].flags
& PTA_NO_SAHF
))
3336 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_SAHF
))
3337 ix86_isa_flags
|= OPTION_MASK_ISA_SAHF
;
3338 if (processor_alias_table
[i
].flags
& PTA_MOVBE
3339 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_MOVBE
))
3340 ix86_isa_flags
|= OPTION_MASK_ISA_MOVBE
;
3341 if (processor_alias_table
[i
].flags
& PTA_AES
3342 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_AES
))
3343 ix86_isa_flags
|= OPTION_MASK_ISA_AES
;
3344 if (processor_alias_table
[i
].flags
& PTA_PCLMUL
3345 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_PCLMUL
))
3346 ix86_isa_flags
|= OPTION_MASK_ISA_PCLMUL
;
3347 if (processor_alias_table
[i
].flags
& PTA_FSGSBASE
3348 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_FSGSBASE
))
3349 ix86_isa_flags
|= OPTION_MASK_ISA_FSGSBASE
;
3350 if (processor_alias_table
[i
].flags
& PTA_RDRND
3351 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_RDRND
))
3352 ix86_isa_flags
|= OPTION_MASK_ISA_RDRND
;
3353 if (processor_alias_table
[i
].flags
& PTA_F16C
3354 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_F16C
))
3355 ix86_isa_flags
|= OPTION_MASK_ISA_F16C
;
3356 if (processor_alias_table
[i
].flags
& PTA_RTM
3357 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_RTM
))
3358 ix86_isa_flags
|= OPTION_MASK_ISA_RTM
;
3359 if (processor_alias_table
[i
].flags
& PTA_HLE
3360 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_HLE
))
3361 ix86_isa_flags
|= OPTION_MASK_ISA_HLE
;
3362 if (processor_alias_table
[i
].flags
& PTA_PRFCHW
3363 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_PRFCHW
))
3364 ix86_isa_flags
|= OPTION_MASK_ISA_PRFCHW
;
3365 if (processor_alias_table
[i
].flags
& PTA_RDSEED
3366 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_RDSEED
))
3367 ix86_isa_flags
|= OPTION_MASK_ISA_RDSEED
;
3368 if (processor_alias_table
[i
].flags
& PTA_ADX
3369 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_ADX
))
3370 ix86_isa_flags
|= OPTION_MASK_ISA_ADX
;
3371 if (processor_alias_table
[i
].flags
& PTA_FXSR
3372 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_FXSR
))
3373 ix86_isa_flags
|= OPTION_MASK_ISA_FXSR
;
3374 if (processor_alias_table
[i
].flags
& PTA_XSAVE
3375 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_XSAVE
))
3376 ix86_isa_flags
|= OPTION_MASK_ISA_XSAVE
;
3377 if (processor_alias_table
[i
].flags
& PTA_XSAVEOPT
3378 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_XSAVEOPT
))
3379 ix86_isa_flags
|= OPTION_MASK_ISA_XSAVEOPT
;
3380 if (processor_alias_table
[i
].flags
& (PTA_PREFETCH_SSE
| PTA_SSE
))
3381 x86_prefetch_sse
= true;
3386 if (!strcmp (ix86_arch_string
, "generic"))
3387 error ("generic CPU can be used only for %stune=%s %s",
3388 prefix
, suffix
, sw
);
3389 else if (!strncmp (ix86_arch_string
, "generic", 7) || i
== pta_size
)
3390 error ("bad value (%s) for %sarch=%s %s",
3391 ix86_arch_string
, prefix
, suffix
, sw
);
3393 ix86_arch_mask
= 1u << ix86_arch
;
3394 for (i
= 0; i
< X86_ARCH_LAST
; ++i
)
3395 ix86_arch_features
[i
] = !!(initial_ix86_arch_features
[i
] & ix86_arch_mask
);
3397 for (i
= 0; i
< pta_size
; i
++)
3398 if (! strcmp (ix86_tune_string
, processor_alias_table
[i
].name
))
3400 ix86_schedule
= processor_alias_table
[i
].schedule
;
3401 ix86_tune
= processor_alias_table
[i
].processor
;
3404 if (!(processor_alias_table
[i
].flags
& PTA_64BIT
))
3406 if (ix86_tune_defaulted
)
3408 ix86_tune_string
= "x86-64";
3409 for (i
= 0; i
< pta_size
; i
++)
3410 if (! strcmp (ix86_tune_string
,
3411 processor_alias_table
[i
].name
))
3413 ix86_schedule
= processor_alias_table
[i
].schedule
;
3414 ix86_tune
= processor_alias_table
[i
].processor
;
3417 error ("CPU you selected does not support x86-64 "
3423 /* Adjust tuning when compiling for 32-bit ABI. */
3426 case PROCESSOR_GENERIC64
:
3427 ix86_tune
= PROCESSOR_GENERIC32
;
3428 ix86_schedule
= CPU_PENTIUMPRO
;
3435 /* Intel CPUs have always interpreted SSE prefetch instructions as
3436 NOPs; so, we can enable SSE prefetch instructions even when
3437 -mtune (rather than -march) points us to a processor that has them.
3438 However, the VIA C3 gives a SIGILL, so we only do that for i686 and
3439 higher processors. */
3441 && (processor_alias_table
[i
].flags
& (PTA_PREFETCH_SSE
| PTA_SSE
)))
3442 x86_prefetch_sse
= true;
3446 if (ix86_tune_specified
&& i
== pta_size
)
3447 error ("bad value (%s) for %stune=%s %s",
3448 ix86_tune_string
, prefix
, suffix
, sw
);
3450 ix86_tune_mask
= 1u << ix86_tune
;
3451 for (i
= 0; i
< X86_TUNE_LAST
; ++i
)
3452 ix86_tune_features
[i
] = !!(initial_ix86_tune_features
[i
] & ix86_tune_mask
);
3454 #ifndef USE_IX86_FRAME_POINTER
3455 #define USE_IX86_FRAME_POINTER 0
3458 #ifndef USE_X86_64_FRAME_POINTER
3459 #define USE_X86_64_FRAME_POINTER 0
3462 /* Set the default values for switches whose default depends on TARGET_64BIT
3463 in case they weren't overwritten by command line options. */
3466 if (optimize
>= 1 && !global_options_set
.x_flag_omit_frame_pointer
)
3467 flag_omit_frame_pointer
= !USE_X86_64_FRAME_POINTER
;
3468 if (flag_asynchronous_unwind_tables
== 2)
3469 flag_unwind_tables
= flag_asynchronous_unwind_tables
= 1;
3470 if (flag_pcc_struct_return
== 2)
3471 flag_pcc_struct_return
= 0;
3475 if (optimize
>= 1 && !global_options_set
.x_flag_omit_frame_pointer
)
3476 flag_omit_frame_pointer
= !(USE_IX86_FRAME_POINTER
|| optimize_size
);
3477 if (flag_asynchronous_unwind_tables
== 2)
3478 flag_asynchronous_unwind_tables
= !USE_IX86_FRAME_POINTER
;
3479 if (flag_pcc_struct_return
== 2)
3480 flag_pcc_struct_return
= DEFAULT_PCC_STRUCT_RETURN
;
3483 ix86_tune_cost
= processor_target_table
[ix86_tune
].cost
;
3485 ix86_cost
= &ix86_size_cost
;
3487 ix86_cost
= ix86_tune_cost
;
3489 /* Arrange to set up i386_stack_locals for all functions. */
3490 init_machine_status
= ix86_init_machine_status
;
3492 /* Validate -mregparm= value. */
3493 if (global_options_set
.x_ix86_regparm
)
3496 warning (0, "-mregparm is ignored in 64-bit mode");
3497 if (ix86_regparm
> REGPARM_MAX
)
3499 error ("-mregparm=%d is not between 0 and %d",
3500 ix86_regparm
, REGPARM_MAX
);
3505 ix86_regparm
= REGPARM_MAX
;
3507 /* Default align_* from the processor table. */
3508 if (align_loops
== 0)
3510 align_loops
= processor_target_table
[ix86_tune
].align_loop
;
3511 align_loops_max_skip
= processor_target_table
[ix86_tune
].align_loop_max_skip
;
3513 if (align_jumps
== 0)
3515 align_jumps
= processor_target_table
[ix86_tune
].align_jump
;
3516 align_jumps_max_skip
= processor_target_table
[ix86_tune
].align_jump_max_skip
;
3518 if (align_functions
== 0)
3520 align_functions
= processor_target_table
[ix86_tune
].align_func
;
3523 /* Provide default for -mbranch-cost= value. */
3524 if (!global_options_set
.x_ix86_branch_cost
)
3525 ix86_branch_cost
= ix86_cost
->branch_cost
;
3529 target_flags
|= TARGET_SUBTARGET64_DEFAULT
& ~target_flags_explicit
;
3531 /* Enable by default the SSE and MMX builtins. Do allow the user to
3532 explicitly disable any of these. In particular, disabling SSE and
3533 MMX for kernel code is extremely useful. */
3534 if (!ix86_arch_specified
)
3536 |= ((OPTION_MASK_ISA_SSE2
| OPTION_MASK_ISA_SSE
| OPTION_MASK_ISA_MMX
3537 | TARGET_SUBTARGET64_ISA_DEFAULT
) & ~ix86_isa_flags_explicit
);
3540 warning (0, "%srtd%s is ignored in 64bit mode", prefix
, suffix
);
3544 target_flags
|= TARGET_SUBTARGET32_DEFAULT
& ~target_flags_explicit
;
3546 if (!ix86_arch_specified
)
3548 |= TARGET_SUBTARGET32_ISA_DEFAULT
& ~ix86_isa_flags_explicit
;
3550 /* i386 ABI does not specify red zone. It still makes sense to use it
3551 when programmer takes care to stack from being destroyed. */
3552 if (!(target_flags_explicit
& MASK_NO_RED_ZONE
))
3553 target_flags
|= MASK_NO_RED_ZONE
;
3556 /* Keep nonleaf frame pointers. */
3557 if (flag_omit_frame_pointer
)
3558 target_flags
&= ~MASK_OMIT_LEAF_FRAME_POINTER
;
3559 else if (TARGET_OMIT_LEAF_FRAME_POINTER
)
3560 flag_omit_frame_pointer
= 1;
3562 /* If we're doing fast math, we don't care about comparison order
3563 wrt NaNs. This lets us use a shorter comparison sequence. */
3564 if (flag_finite_math_only
)
3565 target_flags
&= ~MASK_IEEE_FP
;
3567 /* If the architecture always has an FPU, turn off NO_FANCY_MATH_387,
3568 since the insns won't need emulation. */
3569 if (x86_arch_always_fancy_math_387
& ix86_arch_mask
)
3570 target_flags
&= ~MASK_NO_FANCY_MATH_387
;
3572 /* Likewise, if the target doesn't have a 387, or we've specified
3573 software floating point, don't use 387 inline intrinsics. */
3575 target_flags
|= MASK_NO_FANCY_MATH_387
;
3577 /* Turn on MMX builtins for -msse. */
3579 ix86_isa_flags
|= OPTION_MASK_ISA_MMX
& ~ix86_isa_flags_explicit
;
3581 /* Enable SSE prefetch. */
3582 if (TARGET_SSE
|| TARGET_PRFCHW
)
3583 x86_prefetch_sse
= true;
3585 /* Turn on popcnt instruction for -msse4.2 or -mabm. */
3586 if (TARGET_SSE4_2
|| TARGET_ABM
)
3587 ix86_isa_flags
|= OPTION_MASK_ISA_POPCNT
& ~ix86_isa_flags_explicit
;
3589 /* Turn on lzcnt instruction for -mabm. */
3591 ix86_isa_flags
|= OPTION_MASK_ISA_LZCNT
& ~ix86_isa_flags_explicit
;
3593 /* Validate -mpreferred-stack-boundary= value or default it to
3594 PREFERRED_STACK_BOUNDARY_DEFAULT. */
3595 ix86_preferred_stack_boundary
= PREFERRED_STACK_BOUNDARY_DEFAULT
;
3596 if (global_options_set
.x_ix86_preferred_stack_boundary_arg
)
3598 int min
= (TARGET_64BIT
? (TARGET_SSE
? 4 : 3) : 2);
3599 int max
= (TARGET_SEH
? 4 : 12);
3601 if (ix86_preferred_stack_boundary_arg
< min
3602 || ix86_preferred_stack_boundary_arg
> max
)
3605 error ("-mpreferred-stack-boundary is not supported "
3608 error ("-mpreferred-stack-boundary=%d is not between %d and %d",
3609 ix86_preferred_stack_boundary_arg
, min
, max
);
3612 ix86_preferred_stack_boundary
3613 = (1 << ix86_preferred_stack_boundary_arg
) * BITS_PER_UNIT
;
3616 /* Set the default value for -mstackrealign. */
3617 if (ix86_force_align_arg_pointer
== -1)
3618 ix86_force_align_arg_pointer
= STACK_REALIGN_DEFAULT
;
3620 ix86_default_incoming_stack_boundary
= PREFERRED_STACK_BOUNDARY
;
3622 /* Validate -mincoming-stack-boundary= value or default it to
3623 MIN_STACK_BOUNDARY/PREFERRED_STACK_BOUNDARY. */
3624 ix86_incoming_stack_boundary
= ix86_default_incoming_stack_boundary
;
3625 if (global_options_set
.x_ix86_incoming_stack_boundary_arg
)
3627 if (ix86_incoming_stack_boundary_arg
< (TARGET_64BIT
? 4 : 2)
3628 || ix86_incoming_stack_boundary_arg
> 12)
3629 error ("-mincoming-stack-boundary=%d is not between %d and 12",
3630 ix86_incoming_stack_boundary_arg
, TARGET_64BIT
? 4 : 2);
3633 ix86_user_incoming_stack_boundary
3634 = (1 << ix86_incoming_stack_boundary_arg
) * BITS_PER_UNIT
;
3635 ix86_incoming_stack_boundary
3636 = ix86_user_incoming_stack_boundary
;
3640 /* Accept -msseregparm only if at least SSE support is enabled. */
3641 if (TARGET_SSEREGPARM
3643 error ("%ssseregparm%s used without SSE enabled", prefix
, suffix
);
3645 if (global_options_set
.x_ix86_fpmath
)
3647 if (ix86_fpmath
& FPMATH_SSE
)
3651 warning (0, "SSE instruction set disabled, using 387 arithmetics");
3652 ix86_fpmath
= FPMATH_387
;
3654 else if ((ix86_fpmath
& FPMATH_387
) && !TARGET_80387
)
3656 warning (0, "387 instruction set disabled, using SSE arithmetics");
3657 ix86_fpmath
= FPMATH_SSE
;
3662 ix86_fpmath
= TARGET_FPMATH_DEFAULT
;
3664 /* If the i387 is disabled, then do not return values in it. */
3666 target_flags
&= ~MASK_FLOAT_RETURNS
;
3668 /* Use external vectorized library in vectorizing intrinsics. */
3669 if (global_options_set
.x_ix86_veclibabi_type
)
3670 switch (ix86_veclibabi_type
)
3672 case ix86_veclibabi_type_svml
:
3673 ix86_veclib_handler
= ix86_veclibabi_svml
;
3676 case ix86_veclibabi_type_acml
:
3677 ix86_veclib_handler
= ix86_veclibabi_acml
;
3684 if ((!USE_IX86_FRAME_POINTER
3685 || (x86_accumulate_outgoing_args
& ix86_tune_mask
))
3686 && !(target_flags_explicit
& MASK_ACCUMULATE_OUTGOING_ARGS
)
3688 target_flags
|= MASK_ACCUMULATE_OUTGOING_ARGS
;
3690 /* ??? Unwind info is not correct around the CFG unless either a frame
3691 pointer is present or M_A_O_A is set. Fixing this requires rewriting
3692 unwind info generation to be aware of the CFG and propagating states
3694 if ((flag_unwind_tables
|| flag_asynchronous_unwind_tables
3695 || flag_exceptions
|| flag_non_call_exceptions
)
3696 && flag_omit_frame_pointer
3697 && !(target_flags
& MASK_ACCUMULATE_OUTGOING_ARGS
))
3699 if (target_flags_explicit
& MASK_ACCUMULATE_OUTGOING_ARGS
)
3700 warning (0, "unwind tables currently require either a frame pointer "
3701 "or %saccumulate-outgoing-args%s for correctness",
3703 target_flags
|= MASK_ACCUMULATE_OUTGOING_ARGS
;
3706 /* If stack probes are required, the space used for large function
3707 arguments on the stack must also be probed, so enable
3708 -maccumulate-outgoing-args so this happens in the prologue. */
3709 if (TARGET_STACK_PROBE
3710 && !(target_flags
& MASK_ACCUMULATE_OUTGOING_ARGS
))
3712 if (target_flags_explicit
& MASK_ACCUMULATE_OUTGOING_ARGS
)
3713 warning (0, "stack probing requires %saccumulate-outgoing-args%s "
3714 "for correctness", prefix
, suffix
);
3715 target_flags
|= MASK_ACCUMULATE_OUTGOING_ARGS
;
3718 /* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */
3721 ASM_GENERATE_INTERNAL_LABEL (internal_label_prefix
, "LX", 0);
3722 p
= strchr (internal_label_prefix
, 'X');
3723 internal_label_prefix_len
= p
- internal_label_prefix
;
3727 /* When scheduling description is not available, disable scheduler pass
3728 so it won't slow down the compilation and make x87 code slower. */
3729 if (!TARGET_SCHEDULE
)
3730 flag_schedule_insns_after_reload
= flag_schedule_insns
= 0;
3732 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES
,
3733 ix86_tune_cost
->simultaneous_prefetches
,
3734 global_options
.x_param_values
,
3735 global_options_set
.x_param_values
);
3736 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE
,
3737 ix86_tune_cost
->prefetch_block
,
3738 global_options
.x_param_values
,
3739 global_options_set
.x_param_values
);
3740 maybe_set_param_value (PARAM_L1_CACHE_SIZE
,
3741 ix86_tune_cost
->l1_cache_size
,
3742 global_options
.x_param_values
,
3743 global_options_set
.x_param_values
);
3744 maybe_set_param_value (PARAM_L2_CACHE_SIZE
,
3745 ix86_tune_cost
->l2_cache_size
,
3746 global_options
.x_param_values
,
3747 global_options_set
.x_param_values
);
3749 /* Enable sw prefetching at -O3 for CPUS that prefetching is helpful. */
3750 if (flag_prefetch_loop_arrays
< 0
3752 && (optimize
>= 3 || flag_profile_use
)
3753 && TARGET_SOFTWARE_PREFETCHING_BENEFICIAL
)
3754 flag_prefetch_loop_arrays
= 1;
3756 /* If using typedef char *va_list, signal that __builtin_va_start (&ap, 0)
3757 can be optimized to ap = __builtin_next_arg (0). */
3758 if (!TARGET_64BIT
&& !flag_split_stack
)
3759 targetm
.expand_builtin_va_start
= NULL
;
3763 ix86_gen_leave
= gen_leave_rex64
;
3764 if (Pmode
== DImode
)
3766 ix86_gen_monitor
= gen_sse3_monitor64_di
;
3767 ix86_gen_tls_global_dynamic_64
= gen_tls_global_dynamic_64_di
;
3768 ix86_gen_tls_local_dynamic_base_64
3769 = gen_tls_local_dynamic_base_64_di
;
3773 ix86_gen_monitor
= gen_sse3_monitor64_si
;
3774 ix86_gen_tls_global_dynamic_64
= gen_tls_global_dynamic_64_si
;
3775 ix86_gen_tls_local_dynamic_base_64
3776 = gen_tls_local_dynamic_base_64_si
;
3781 ix86_gen_leave
= gen_leave
;
3782 ix86_gen_monitor
= gen_sse3_monitor
;
3785 if (Pmode
== DImode
)
3787 ix86_gen_add3
= gen_adddi3
;
3788 ix86_gen_sub3
= gen_subdi3
;
3789 ix86_gen_sub3_carry
= gen_subdi3_carry
;
3790 ix86_gen_one_cmpl2
= gen_one_cmpldi2
;
3791 ix86_gen_andsp
= gen_anddi3
;
3792 ix86_gen_allocate_stack_worker
= gen_allocate_stack_worker_probe_di
;
3793 ix86_gen_adjust_stack_and_probe
= gen_adjust_stack_and_probedi
;
3794 ix86_gen_probe_stack_range
= gen_probe_stack_rangedi
;
3798 ix86_gen_add3
= gen_addsi3
;
3799 ix86_gen_sub3
= gen_subsi3
;
3800 ix86_gen_sub3_carry
= gen_subsi3_carry
;
3801 ix86_gen_one_cmpl2
= gen_one_cmplsi2
;
3802 ix86_gen_andsp
= gen_andsi3
;
3803 ix86_gen_allocate_stack_worker
= gen_allocate_stack_worker_probe_si
;
3804 ix86_gen_adjust_stack_and_probe
= gen_adjust_stack_and_probesi
;
3805 ix86_gen_probe_stack_range
= gen_probe_stack_rangesi
;
3809 /* Use -mcld by default for 32-bit code if configured with --enable-cld. */
3811 target_flags
|= MASK_CLD
& ~target_flags_explicit
;
3814 if (!TARGET_64BIT
&& flag_pic
)
3816 if (flag_fentry
> 0)
3817 sorry ("-mfentry isn%'t supported for 32-bit in combination "
3821 else if (TARGET_SEH
)
3823 if (flag_fentry
== 0)
3824 sorry ("-mno-fentry isn%'t compatible with SEH");
3827 else if (flag_fentry
< 0)
3829 #if defined(PROFILE_BEFORE_PROLOGUE)
3838 /* When not optimize for size, enable vzeroupper optimization for
3839 TARGET_AVX with -fexpensive-optimizations and split 32-byte
3840 AVX unaligned load/store. */
3843 if (flag_expensive_optimizations
3844 && !(target_flags_explicit
& MASK_VZEROUPPER
))
3845 target_flags
|= MASK_VZEROUPPER
;
3846 if ((x86_avx256_split_unaligned_load
& ix86_tune_mask
)
3847 && !(target_flags_explicit
& MASK_AVX256_SPLIT_UNALIGNED_LOAD
))
3848 target_flags
|= MASK_AVX256_SPLIT_UNALIGNED_LOAD
;
3849 if ((x86_avx256_split_unaligned_store
& ix86_tune_mask
)
3850 && !(target_flags_explicit
& MASK_AVX256_SPLIT_UNALIGNED_STORE
))
3851 target_flags
|= MASK_AVX256_SPLIT_UNALIGNED_STORE
;
3852 /* Enable 128-bit AVX instruction generation
3853 for the auto-vectorizer. */
3854 if (TARGET_AVX128_OPTIMAL
3855 && !(target_flags_explicit
& MASK_PREFER_AVX128
))
3856 target_flags
|= MASK_PREFER_AVX128
;
3861 /* Disable vzeroupper pass if TARGET_AVX is disabled. */
3862 target_flags
&= ~MASK_VZEROUPPER
;
3865 if (ix86_recip_name
)
3867 char *p
= ASTRDUP (ix86_recip_name
);
3869 unsigned int mask
, i
;
3872 while ((q
= strtok (p
, ",")) != NULL
)
3883 if (!strcmp (q
, "default"))
3884 mask
= RECIP_MASK_ALL
;
3887 for (i
= 0; i
< ARRAY_SIZE (recip_options
); i
++)
3888 if (!strcmp (q
, recip_options
[i
].string
))
3890 mask
= recip_options
[i
].mask
;
3894 if (i
== ARRAY_SIZE (recip_options
))
3896 error ("unknown option for -mrecip=%s", q
);
3898 mask
= RECIP_MASK_NONE
;
3902 recip_mask_explicit
|= mask
;
3904 recip_mask
&= ~mask
;
3911 recip_mask
|= RECIP_MASK_ALL
& ~recip_mask_explicit
;
3912 else if (target_flags_explicit
& MASK_RECIP
)
3913 recip_mask
&= ~(RECIP_MASK_ALL
& ~recip_mask_explicit
);
3915 /* Default long double to 64-bit for Bionic. */
3916 if (TARGET_HAS_BIONIC
3917 && !(target_flags_explicit
& MASK_LONG_DOUBLE_64
))
3918 target_flags
|= MASK_LONG_DOUBLE_64
;
3920 /* Save the initial options in case the user does function specific
3923 target_option_default_node
= target_option_current_node
3924 = build_target_option_node ();
3927 /* Implement the TARGET_OPTION_OVERRIDE hook. */
3930 ix86_option_override (void)
3932 static struct register_pass_info insert_vzeroupper_info
3933 = { &pass_insert_vzeroupper
.pass
, "reload",
3934 1, PASS_POS_INSERT_AFTER
3937 ix86_option_override_internal (true);
3940 /* This needs to be done at start up. It's convenient to do it here. */
3941 register_pass (&insert_vzeroupper_info
);
3944 /* Update register usage after having seen the compiler flags. */
3947 ix86_conditional_register_usage (void)
3952 /* The PIC register, if it exists, is fixed. */
3953 j
= PIC_OFFSET_TABLE_REGNUM
;
3954 if (j
!= INVALID_REGNUM
)
3955 fixed_regs
[j
] = call_used_regs
[j
] = 1;
3957 /* For 32-bit targets, squash the REX registers. */
3960 for (i
= FIRST_REX_INT_REG
; i
<= LAST_REX_INT_REG
; i
++)
3961 fixed_regs
[i
] = call_used_regs
[i
] = 1, reg_names
[i
] = "";
3962 for (i
= FIRST_REX_SSE_REG
; i
<= LAST_REX_SSE_REG
; i
++)
3963 fixed_regs
[i
] = call_used_regs
[i
] = 1, reg_names
[i
] = "";
3966 /* See the definition of CALL_USED_REGISTERS in i386.h. */
3967 c_mask
= (TARGET_64BIT_MS_ABI
? (1 << 3)
3968 : TARGET_64BIT
? (1 << 2)
3971 CLEAR_HARD_REG_SET (reg_class_contents
[(int)CLOBBERED_REGS
]);
3973 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
3975 /* Set/reset conditionally defined registers from
3976 CALL_USED_REGISTERS initializer. */
3977 if (call_used_regs
[i
] > 1)
3978 call_used_regs
[i
] = !!(call_used_regs
[i
] & c_mask
);
3980 /* Calculate registers of CLOBBERED_REGS register set
3981 as call used registers from GENERAL_REGS register set. */
3982 if (TEST_HARD_REG_BIT (reg_class_contents
[(int)GENERAL_REGS
], i
)
3983 && call_used_regs
[i
])
3984 SET_HARD_REG_BIT (reg_class_contents
[(int)CLOBBERED_REGS
], i
);
3987 /* If MMX is disabled, squash the registers. */
3989 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
3990 if (TEST_HARD_REG_BIT (reg_class_contents
[(int)MMX_REGS
], i
))
3991 fixed_regs
[i
] = call_used_regs
[i
] = 1, reg_names
[i
] = "";
3993 /* If SSE is disabled, squash the registers. */
3995 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
3996 if (TEST_HARD_REG_BIT (reg_class_contents
[(int)SSE_REGS
], i
))
3997 fixed_regs
[i
] = call_used_regs
[i
] = 1, reg_names
[i
] = "";
3999 /* If the FPU is disabled, squash the registers. */
4000 if (! (TARGET_80387
|| TARGET_FLOAT_RETURNS_IN_80387
))
4001 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
4002 if (TEST_HARD_REG_BIT (reg_class_contents
[(int)FLOAT_REGS
], i
))
4003 fixed_regs
[i
] = call_used_regs
[i
] = 1, reg_names
[i
] = "";
4007 /* Save the current options */
4010 ix86_function_specific_save (struct cl_target_option
*ptr
)
4012 ptr
->arch
= ix86_arch
;
4013 ptr
->schedule
= ix86_schedule
;
4014 ptr
->tune
= ix86_tune
;
4015 ptr
->branch_cost
= ix86_branch_cost
;
4016 ptr
->tune_defaulted
= ix86_tune_defaulted
;
4017 ptr
->arch_specified
= ix86_arch_specified
;
4018 ptr
->x_ix86_isa_flags_explicit
= ix86_isa_flags_explicit
;
4019 ptr
->ix86_target_flags_explicit
= target_flags_explicit
;
4020 ptr
->x_recip_mask_explicit
= recip_mask_explicit
;
4022 /* The fields are char but the variables are not; make sure the
4023 values fit in the fields. */
4024 gcc_assert (ptr
->arch
== ix86_arch
);
4025 gcc_assert (ptr
->schedule
== ix86_schedule
);
4026 gcc_assert (ptr
->tune
== ix86_tune
);
4027 gcc_assert (ptr
->branch_cost
== ix86_branch_cost
);
4030 /* Restore the current options */
4033 ix86_function_specific_restore (struct cl_target_option
*ptr
)
4035 enum processor_type old_tune
= ix86_tune
;
4036 enum processor_type old_arch
= ix86_arch
;
4037 unsigned int ix86_arch_mask
, ix86_tune_mask
;
4040 ix86_arch
= (enum processor_type
) ptr
->arch
;
4041 ix86_schedule
= (enum attr_cpu
) ptr
->schedule
;
4042 ix86_tune
= (enum processor_type
) ptr
->tune
;
4043 ix86_branch_cost
= ptr
->branch_cost
;
4044 ix86_tune_defaulted
= ptr
->tune_defaulted
;
4045 ix86_arch_specified
= ptr
->arch_specified
;
4046 ix86_isa_flags_explicit
= ptr
->x_ix86_isa_flags_explicit
;
4047 target_flags_explicit
= ptr
->ix86_target_flags_explicit
;
4048 recip_mask_explicit
= ptr
->x_recip_mask_explicit
;
4050 /* Recreate the arch feature tests if the arch changed */
4051 if (old_arch
!= ix86_arch
)
4053 ix86_arch_mask
= 1u << ix86_arch
;
4054 for (i
= 0; i
< X86_ARCH_LAST
; ++i
)
4055 ix86_arch_features
[i
]
4056 = !!(initial_ix86_arch_features
[i
] & ix86_arch_mask
);
4059 /* Recreate the tune optimization tests */
4060 if (old_tune
!= ix86_tune
)
4062 ix86_tune_mask
= 1u << ix86_tune
;
4063 for (i
= 0; i
< X86_TUNE_LAST
; ++i
)
4064 ix86_tune_features
[i
]
4065 = !!(initial_ix86_tune_features
[i
] & ix86_tune_mask
);
4069 /* Print the current options */
4072 ix86_function_specific_print (FILE *file
, int indent
,
4073 struct cl_target_option
*ptr
)
4076 = ix86_target_string (ptr
->x_ix86_isa_flags
, ptr
->x_target_flags
,
4077 NULL
, NULL
, ptr
->x_ix86_fpmath
, false);
4079 fprintf (file
, "%*sarch = %d (%s)\n",
4082 ((ptr
->arch
< TARGET_CPU_DEFAULT_max
)
4083 ? cpu_names
[ptr
->arch
]
4086 fprintf (file
, "%*stune = %d (%s)\n",
4089 ((ptr
->tune
< TARGET_CPU_DEFAULT_max
)
4090 ? cpu_names
[ptr
->tune
]
4093 fprintf (file
, "%*sbranch_cost = %d\n", indent
, "", ptr
->branch_cost
);
4097 fprintf (file
, "%*s%s\n", indent
, "", target_string
);
4098 free (target_string
);
4103 /* Inner function to process the attribute((target(...))), take an argument and
4104 set the current options from the argument. If we have a list, recursively go
4108 ix86_valid_target_attribute_inner_p (tree args
, char *p_strings
[],
4109 struct gcc_options
*enum_opts_set
)
4114 #define IX86_ATTR_ISA(S,O) { S, sizeof (S)-1, ix86_opt_isa, O, 0 }
4115 #define IX86_ATTR_STR(S,O) { S, sizeof (S)-1, ix86_opt_str, O, 0 }
4116 #define IX86_ATTR_ENUM(S,O) { S, sizeof (S)-1, ix86_opt_enum, O, 0 }
4117 #define IX86_ATTR_YES(S,O,M) { S, sizeof (S)-1, ix86_opt_yes, O, M }
4118 #define IX86_ATTR_NO(S,O,M) { S, sizeof (S)-1, ix86_opt_no, O, M }
4134 enum ix86_opt_type type
;
4139 IX86_ATTR_ISA ("3dnow", OPT_m3dnow
),
4140 IX86_ATTR_ISA ("abm", OPT_mabm
),
4141 IX86_ATTR_ISA ("bmi", OPT_mbmi
),
4142 IX86_ATTR_ISA ("bmi2", OPT_mbmi2
),
4143 IX86_ATTR_ISA ("lzcnt", OPT_mlzcnt
),
4144 IX86_ATTR_ISA ("tbm", OPT_mtbm
),
4145 IX86_ATTR_ISA ("aes", OPT_maes
),
4146 IX86_ATTR_ISA ("avx", OPT_mavx
),
4147 IX86_ATTR_ISA ("avx2", OPT_mavx2
),
4148 IX86_ATTR_ISA ("mmx", OPT_mmmx
),
4149 IX86_ATTR_ISA ("pclmul", OPT_mpclmul
),
4150 IX86_ATTR_ISA ("popcnt", OPT_mpopcnt
),
4151 IX86_ATTR_ISA ("sse", OPT_msse
),
4152 IX86_ATTR_ISA ("sse2", OPT_msse2
),
4153 IX86_ATTR_ISA ("sse3", OPT_msse3
),
4154 IX86_ATTR_ISA ("sse4", OPT_msse4
),
4155 IX86_ATTR_ISA ("sse4.1", OPT_msse4_1
),
4156 IX86_ATTR_ISA ("sse4.2", OPT_msse4_2
),
4157 IX86_ATTR_ISA ("sse4a", OPT_msse4a
),
4158 IX86_ATTR_ISA ("ssse3", OPT_mssse3
),
4159 IX86_ATTR_ISA ("fma4", OPT_mfma4
),
4160 IX86_ATTR_ISA ("fma", OPT_mfma
),
4161 IX86_ATTR_ISA ("xop", OPT_mxop
),
4162 IX86_ATTR_ISA ("lwp", OPT_mlwp
),
4163 IX86_ATTR_ISA ("fsgsbase", OPT_mfsgsbase
),
4164 IX86_ATTR_ISA ("rdrnd", OPT_mrdrnd
),
4165 IX86_ATTR_ISA ("f16c", OPT_mf16c
),
4166 IX86_ATTR_ISA ("rtm", OPT_mrtm
),
4167 IX86_ATTR_ISA ("hle", OPT_mhle
),
4168 IX86_ATTR_ISA ("prfchw", OPT_mprfchw
),
4169 IX86_ATTR_ISA ("rdseed", OPT_mrdseed
),
4170 IX86_ATTR_ISA ("adx", OPT_madx
),
4171 IX86_ATTR_ISA ("fxsr", OPT_mfxsr
),
4172 IX86_ATTR_ISA ("xsave", OPT_mxsave
),
4173 IX86_ATTR_ISA ("xsaveopt", OPT_mxsaveopt
),
4176 IX86_ATTR_ENUM ("fpmath=", OPT_mfpmath_
),
4178 /* string options */
4179 IX86_ATTR_STR ("arch=", IX86_FUNCTION_SPECIFIC_ARCH
),
4180 IX86_ATTR_STR ("tune=", IX86_FUNCTION_SPECIFIC_TUNE
),
4183 IX86_ATTR_YES ("cld",
4187 IX86_ATTR_NO ("fancy-math-387",
4188 OPT_mfancy_math_387
,
4189 MASK_NO_FANCY_MATH_387
),
4191 IX86_ATTR_YES ("ieee-fp",
4195 IX86_ATTR_YES ("inline-all-stringops",
4196 OPT_minline_all_stringops
,
4197 MASK_INLINE_ALL_STRINGOPS
),
4199 IX86_ATTR_YES ("inline-stringops-dynamically",
4200 OPT_minline_stringops_dynamically
,
4201 MASK_INLINE_STRINGOPS_DYNAMICALLY
),
4203 IX86_ATTR_NO ("align-stringops",
4204 OPT_mno_align_stringops
,
4205 MASK_NO_ALIGN_STRINGOPS
),
4207 IX86_ATTR_YES ("recip",
4213 /* If this is a list, recurse to get the options. */
4214 if (TREE_CODE (args
) == TREE_LIST
)
4218 for (; args
; args
= TREE_CHAIN (args
))
4219 if (TREE_VALUE (args
)
4220 && !ix86_valid_target_attribute_inner_p (TREE_VALUE (args
),
4221 p_strings
, enum_opts_set
))
4227 else if (TREE_CODE (args
) != STRING_CST
)
4229 error ("attribute %<target%> argument not a string");
4233 /* Handle multiple arguments separated by commas. */
4234 next_optstr
= ASTRDUP (TREE_STRING_POINTER (args
));
4236 while (next_optstr
&& *next_optstr
!= '\0')
4238 char *p
= next_optstr
;
4240 char *comma
= strchr (next_optstr
, ',');
4241 const char *opt_string
;
4242 size_t len
, opt_len
;
4247 enum ix86_opt_type type
= ix86_opt_unknown
;
4253 len
= comma
- next_optstr
;
4254 next_optstr
= comma
+ 1;
4262 /* Recognize no-xxx. */
4263 if (len
> 3 && p
[0] == 'n' && p
[1] == 'o' && p
[2] == '-')
4272 /* Find the option. */
4275 for (i
= 0; i
< ARRAY_SIZE (attrs
); i
++)
4277 type
= attrs
[i
].type
;
4278 opt_len
= attrs
[i
].len
;
4279 if (ch
== attrs
[i
].string
[0]
4280 && ((type
!= ix86_opt_str
&& type
!= ix86_opt_enum
)
4283 && memcmp (p
, attrs
[i
].string
, opt_len
) == 0)
4286 mask
= attrs
[i
].mask
;
4287 opt_string
= attrs
[i
].string
;
4292 /* Process the option. */
4295 error ("attribute(target(\"%s\")) is unknown", orig_p
);
4299 else if (type
== ix86_opt_isa
)
4301 struct cl_decoded_option decoded
;
4303 generate_option (opt
, NULL
, opt_set_p
, CL_TARGET
, &decoded
);
4304 ix86_handle_option (&global_options
, &global_options_set
,
4305 &decoded
, input_location
);
4308 else if (type
== ix86_opt_yes
|| type
== ix86_opt_no
)
4310 if (type
== ix86_opt_no
)
4311 opt_set_p
= !opt_set_p
;
4314 target_flags
|= mask
;
4316 target_flags
&= ~mask
;
4319 else if (type
== ix86_opt_str
)
4323 error ("option(\"%s\") was already specified", opt_string
);
4327 p_strings
[opt
] = xstrdup (p
+ opt_len
);
4330 else if (type
== ix86_opt_enum
)
4335 arg_ok
= opt_enum_arg_to_value (opt
, p
+ opt_len
, &value
, CL_TARGET
);
4337 set_option (&global_options
, enum_opts_set
, opt
, value
,
4338 p
+ opt_len
, DK_UNSPECIFIED
, input_location
,
4342 error ("attribute(target(\"%s\")) is unknown", orig_p
);
4354 /* Return a TARGET_OPTION_NODE tree of the target options listed or NULL. */
4357 ix86_valid_target_attribute_tree (tree args
)
4359 const char *orig_arch_string
= ix86_arch_string
;
4360 const char *orig_tune_string
= ix86_tune_string
;
4361 enum fpmath_unit orig_fpmath_set
= global_options_set
.x_ix86_fpmath
;
4362 int orig_tune_defaulted
= ix86_tune_defaulted
;
4363 int orig_arch_specified
= ix86_arch_specified
;
4364 char *option_strings
[IX86_FUNCTION_SPECIFIC_MAX
] = { NULL
, NULL
};
4367 struct cl_target_option
*def
4368 = TREE_TARGET_OPTION (target_option_default_node
);
4369 struct gcc_options enum_opts_set
;
4371 memset (&enum_opts_set
, 0, sizeof (enum_opts_set
));
4373 /* Process each of the options on the chain. */
4374 if (! ix86_valid_target_attribute_inner_p (args
, option_strings
,
4376 return error_mark_node
;
4378 /* If the changed options are different from the default, rerun
4379 ix86_option_override_internal, and then save the options away.
4380 The string options are are attribute options, and will be undone
4381 when we copy the save structure. */
4382 if (ix86_isa_flags
!= def
->x_ix86_isa_flags
4383 || target_flags
!= def
->x_target_flags
4384 || option_strings
[IX86_FUNCTION_SPECIFIC_ARCH
]
4385 || option_strings
[IX86_FUNCTION_SPECIFIC_TUNE
]
4386 || enum_opts_set
.x_ix86_fpmath
)
4388 /* If we are using the default tune= or arch=, undo the string assigned,
4389 and use the default. */
4390 if (option_strings
[IX86_FUNCTION_SPECIFIC_ARCH
])
4391 ix86_arch_string
= option_strings
[IX86_FUNCTION_SPECIFIC_ARCH
];
4392 else if (!orig_arch_specified
)
4393 ix86_arch_string
= NULL
;
4395 if (option_strings
[IX86_FUNCTION_SPECIFIC_TUNE
])
4396 ix86_tune_string
= option_strings
[IX86_FUNCTION_SPECIFIC_TUNE
];
4397 else if (orig_tune_defaulted
)
4398 ix86_tune_string
= NULL
;
4400 /* If fpmath= is not set, and we now have sse2 on 32-bit, use it. */
4401 if (enum_opts_set
.x_ix86_fpmath
)
4402 global_options_set
.x_ix86_fpmath
= (enum fpmath_unit
) 1;
4403 else if (!TARGET_64BIT
&& TARGET_SSE
)
4405 ix86_fpmath
= (enum fpmath_unit
) (FPMATH_SSE
| FPMATH_387
);
4406 global_options_set
.x_ix86_fpmath
= (enum fpmath_unit
) 1;
4409 /* Do any overrides, such as arch=xxx, or tune=xxx support. */
4410 ix86_option_override_internal (false);
4412 /* Add any builtin functions with the new isa if any. */
4413 ix86_add_new_builtins (ix86_isa_flags
);
4415 /* Save the current options unless we are validating options for
4417 t
= build_target_option_node ();
4419 ix86_arch_string
= orig_arch_string
;
4420 ix86_tune_string
= orig_tune_string
;
4421 global_options_set
.x_ix86_fpmath
= orig_fpmath_set
;
4423 /* Free up memory allocated to hold the strings */
4424 for (i
= 0; i
< IX86_FUNCTION_SPECIFIC_MAX
; i
++)
4425 free (option_strings
[i
]);
4431 /* Hook to validate attribute((target("string"))). */
4434 ix86_valid_target_attribute_p (tree fndecl
,
4435 tree
ARG_UNUSED (name
),
4437 int ARG_UNUSED (flags
))
4439 struct cl_target_option cur_target
;
4442 /* attribute((target("default"))) does nothing, beyond
4443 affecting multi-versioning. */
4444 if (TREE_VALUE (args
)
4445 && TREE_CODE (TREE_VALUE (args
)) == STRING_CST
4446 && TREE_CHAIN (args
) == NULL_TREE
4447 && strcmp (TREE_STRING_POINTER (TREE_VALUE (args
)), "default") == 0)
4450 tree old_optimize
= build_optimization_node ();
4451 tree new_target
, new_optimize
;
4452 tree func_optimize
= DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl
);
4454 /* If the function changed the optimization levels as well as setting target
4455 options, start with the optimizations specified. */
4456 if (func_optimize
&& func_optimize
!= old_optimize
)
4457 cl_optimization_restore (&global_options
,
4458 TREE_OPTIMIZATION (func_optimize
));
4460 /* The target attributes may also change some optimization flags, so update
4461 the optimization options if necessary. */
4462 cl_target_option_save (&cur_target
, &global_options
);
4463 new_target
= ix86_valid_target_attribute_tree (args
);
4464 new_optimize
= build_optimization_node ();
4466 if (new_target
== error_mark_node
)
4469 else if (fndecl
&& new_target
)
4471 DECL_FUNCTION_SPECIFIC_TARGET (fndecl
) = new_target
;
4473 if (old_optimize
!= new_optimize
)
4474 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl
) = new_optimize
;
4477 cl_target_option_restore (&global_options
, &cur_target
);
4479 if (old_optimize
!= new_optimize
)
4480 cl_optimization_restore (&global_options
,
4481 TREE_OPTIMIZATION (old_optimize
));
4487 /* Hook to determine if one function can safely inline another. */
4490 ix86_can_inline_p (tree caller
, tree callee
)
4493 tree caller_tree
= DECL_FUNCTION_SPECIFIC_TARGET (caller
);
4494 tree callee_tree
= DECL_FUNCTION_SPECIFIC_TARGET (callee
);
4496 /* If callee has no option attributes, then it is ok to inline. */
4500 /* If caller has no option attributes, but callee does then it is not ok to
4502 else if (!caller_tree
)
4507 struct cl_target_option
*caller_opts
= TREE_TARGET_OPTION (caller_tree
);
4508 struct cl_target_option
*callee_opts
= TREE_TARGET_OPTION (callee_tree
);
4510 /* Callee's isa options should a subset of the caller's, i.e. a SSE4 function
4511 can inline a SSE2 function but a SSE2 function can't inline a SSE4
4513 if ((caller_opts
->x_ix86_isa_flags
& callee_opts
->x_ix86_isa_flags
)
4514 != callee_opts
->x_ix86_isa_flags
)
4517 /* See if we have the same non-isa options. */
4518 else if (caller_opts
->x_target_flags
!= callee_opts
->x_target_flags
)
4521 /* See if arch, tune, etc. are the same. */
4522 else if (caller_opts
->arch
!= callee_opts
->arch
)
4525 else if (caller_opts
->tune
!= callee_opts
->tune
)
4528 else if (caller_opts
->x_ix86_fpmath
!= callee_opts
->x_ix86_fpmath
)
4531 else if (caller_opts
->branch_cost
!= callee_opts
->branch_cost
)
4542 /* Remember the last target of ix86_set_current_function. */
4543 static GTY(()) tree ix86_previous_fndecl
;
4545 /* Establish appropriate back-end context for processing the function
4546 FNDECL. The argument might be NULL to indicate processing at top
4547 level, outside of any function scope. */
4549 ix86_set_current_function (tree fndecl
)
4551 /* Only change the context if the function changes. This hook is called
4552 several times in the course of compiling a function, and we don't want to
4553 slow things down too much or call target_reinit when it isn't safe. */
4554 if (fndecl
&& fndecl
!= ix86_previous_fndecl
)
4556 tree old_tree
= (ix86_previous_fndecl
4557 ? DECL_FUNCTION_SPECIFIC_TARGET (ix86_previous_fndecl
)
4560 tree new_tree
= (fndecl
4561 ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl
)
4564 ix86_previous_fndecl
= fndecl
;
4565 if (old_tree
== new_tree
)
4570 cl_target_option_restore (&global_options
,
4571 TREE_TARGET_OPTION (new_tree
));
4577 struct cl_target_option
*def
4578 = TREE_TARGET_OPTION (target_option_current_node
);
4580 cl_target_option_restore (&global_options
, def
);
4587 /* Return true if this goes in large data/bss. */
4590 ix86_in_large_data_p (tree exp
)
4592 if (ix86_cmodel
!= CM_MEDIUM
&& ix86_cmodel
!= CM_MEDIUM_PIC
)
4595 /* Functions are never large data. */
4596 if (TREE_CODE (exp
) == FUNCTION_DECL
)
4599 if (TREE_CODE (exp
) == VAR_DECL
&& DECL_SECTION_NAME (exp
))
4601 const char *section
= TREE_STRING_POINTER (DECL_SECTION_NAME (exp
));
4602 if (strcmp (section
, ".ldata") == 0
4603 || strcmp (section
, ".lbss") == 0)
4609 HOST_WIDE_INT size
= int_size_in_bytes (TREE_TYPE (exp
));
4611 /* If this is an incomplete type with size 0, then we can't put it
4612 in data because it might be too big when completed. */
4613 if (!size
|| size
> ix86_section_threshold
)
4620 /* Switch to the appropriate section for output of DECL.
4621 DECL is either a `VAR_DECL' node or a constant of some sort.
4622 RELOC indicates whether forming the initial value of DECL requires
4623 link-time relocations. */
4625 static section
* x86_64_elf_select_section (tree
, int, unsigned HOST_WIDE_INT
)
4629 x86_64_elf_select_section (tree decl
, int reloc
,
4630 unsigned HOST_WIDE_INT align
)
4632 if ((ix86_cmodel
== CM_MEDIUM
|| ix86_cmodel
== CM_MEDIUM_PIC
)
4633 && ix86_in_large_data_p (decl
))
4635 const char *sname
= NULL
;
4636 unsigned int flags
= SECTION_WRITE
;
4637 switch (categorize_decl_for_section (decl
, reloc
))
4642 case SECCAT_DATA_REL
:
4643 sname
= ".ldata.rel";
4645 case SECCAT_DATA_REL_LOCAL
:
4646 sname
= ".ldata.rel.local";
4648 case SECCAT_DATA_REL_RO
:
4649 sname
= ".ldata.rel.ro";
4651 case SECCAT_DATA_REL_RO_LOCAL
:
4652 sname
= ".ldata.rel.ro.local";
4656 flags
|= SECTION_BSS
;
4659 case SECCAT_RODATA_MERGE_STR
:
4660 case SECCAT_RODATA_MERGE_STR_INIT
:
4661 case SECCAT_RODATA_MERGE_CONST
:
4665 case SECCAT_SRODATA
:
4672 /* We don't split these for medium model. Place them into
4673 default sections and hope for best. */
4678 /* We might get called with string constants, but get_named_section
4679 doesn't like them as they are not DECLs. Also, we need to set
4680 flags in that case. */
4682 return get_section (sname
, flags
, NULL
);
4683 return get_named_section (decl
, sname
, reloc
);
4686 return default_elf_select_section (decl
, reloc
, align
);
4689 /* Build up a unique section name, expressed as a
4690 STRING_CST node, and assign it to DECL_SECTION_NAME (decl).
4691 RELOC indicates whether the initial value of EXP requires
4692 link-time relocations. */
4694 static void ATTRIBUTE_UNUSED
4695 x86_64_elf_unique_section (tree decl
, int reloc
)
4697 if ((ix86_cmodel
== CM_MEDIUM
|| ix86_cmodel
== CM_MEDIUM_PIC
)
4698 && ix86_in_large_data_p (decl
))
4700 const char *prefix
= NULL
;
4701 /* We only need to use .gnu.linkonce if we don't have COMDAT groups. */
4702 bool one_only
= DECL_ONE_ONLY (decl
) && !HAVE_COMDAT_GROUP
;
4704 switch (categorize_decl_for_section (decl
, reloc
))
4707 case SECCAT_DATA_REL
:
4708 case SECCAT_DATA_REL_LOCAL
:
4709 case SECCAT_DATA_REL_RO
:
4710 case SECCAT_DATA_REL_RO_LOCAL
:
4711 prefix
= one_only
? ".ld" : ".ldata";
4714 prefix
= one_only
? ".lb" : ".lbss";
4717 case SECCAT_RODATA_MERGE_STR
:
4718 case SECCAT_RODATA_MERGE_STR_INIT
:
4719 case SECCAT_RODATA_MERGE_CONST
:
4720 prefix
= one_only
? ".lr" : ".lrodata";
4722 case SECCAT_SRODATA
:
4729 /* We don't split these for medium model. Place them into
4730 default sections and hope for best. */
4735 const char *name
, *linkonce
;
4738 name
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
));
4739 name
= targetm
.strip_name_encoding (name
);
4741 /* If we're using one_only, then there needs to be a .gnu.linkonce
4742 prefix to the section name. */
4743 linkonce
= one_only
? ".gnu.linkonce" : "";
4745 string
= ACONCAT ((linkonce
, prefix
, ".", name
, NULL
));
4747 DECL_SECTION_NAME (decl
) = build_string (strlen (string
), string
);
4751 default_unique_section (decl
, reloc
);
4754 #ifdef COMMON_ASM_OP
4755 /* This says how to output assembler code to declare an
4756 uninitialized external linkage data object.
4758 For medium model x86-64 we need to use .largecomm opcode for
4761 x86_elf_aligned_common (FILE *file
,
4762 const char *name
, unsigned HOST_WIDE_INT size
,
4765 if ((ix86_cmodel
== CM_MEDIUM
|| ix86_cmodel
== CM_MEDIUM_PIC
)
4766 && size
> (unsigned int)ix86_section_threshold
)
4767 fputs (".largecomm\t", file
);
4769 fputs (COMMON_ASM_OP
, file
);
4770 assemble_name (file
, name
);
4771 fprintf (file
, "," HOST_WIDE_INT_PRINT_UNSIGNED
",%u\n",
4772 size
, align
/ BITS_PER_UNIT
);
4776 /* Utility function for targets to use in implementing
4777 ASM_OUTPUT_ALIGNED_BSS. */
4780 x86_output_aligned_bss (FILE *file
, tree decl ATTRIBUTE_UNUSED
,
4781 const char *name
, unsigned HOST_WIDE_INT size
,
4784 if ((ix86_cmodel
== CM_MEDIUM
|| ix86_cmodel
== CM_MEDIUM_PIC
)
4785 && size
> (unsigned int)ix86_section_threshold
)
4786 switch_to_section (get_named_section (decl
, ".lbss", 0));
4788 switch_to_section (bss_section
);
4789 ASM_OUTPUT_ALIGN (file
, floor_log2 (align
/ BITS_PER_UNIT
));
4790 #ifdef ASM_DECLARE_OBJECT_NAME
4791 last_assemble_variable_decl
= decl
;
4792 ASM_DECLARE_OBJECT_NAME (file
, name
, decl
);
4794 /* Standard thing is just output label for the object. */
4795 ASM_OUTPUT_LABEL (file
, name
);
4796 #endif /* ASM_DECLARE_OBJECT_NAME */
4797 ASM_OUTPUT_SKIP (file
, size
? size
: 1);
4800 /* Decide whether we must probe the stack before any space allocation
4801 on this target. It's essentially TARGET_STACK_PROBE except when
4802 -fstack-check causes the stack to be already probed differently. */
4805 ix86_target_stack_probe (void)
4807 /* Do not probe the stack twice if static stack checking is enabled. */
4808 if (flag_stack_check
== STATIC_BUILTIN_STACK_CHECK
)
4811 return TARGET_STACK_PROBE
;
4814 /* Decide whether we can make a sibling call to a function. DECL is the
4815 declaration of the function being targeted by the call and EXP is the
4816 CALL_EXPR representing the call. */
4819 ix86_function_ok_for_sibcall (tree decl
, tree exp
)
4821 tree type
, decl_or_type
;
4824 /* If we are generating position-independent code, we cannot sibcall
4825 optimize any indirect call, or a direct call to a global function,
4826 as the PLT requires %ebx be live. (Darwin does not have a PLT.) */
4830 && (!decl
|| !targetm
.binds_local_p (decl
)))
4833 /* If we need to align the outgoing stack, then sibcalling would
4834 unalign the stack, which may break the called function. */
4835 if (ix86_minimum_incoming_stack_boundary (true)
4836 < PREFERRED_STACK_BOUNDARY
)
4841 decl_or_type
= decl
;
4842 type
= TREE_TYPE (decl
);
4846 /* We're looking at the CALL_EXPR, we need the type of the function. */
4847 type
= CALL_EXPR_FN (exp
); /* pointer expression */
4848 type
= TREE_TYPE (type
); /* pointer type */
4849 type
= TREE_TYPE (type
); /* function type */
4850 decl_or_type
= type
;
4853 /* Check that the return value locations are the same. Like
4854 if we are returning floats on the 80387 register stack, we cannot
4855 make a sibcall from a function that doesn't return a float to a
4856 function that does or, conversely, from a function that does return
4857 a float to a function that doesn't; the necessary stack adjustment
4858 would not be executed. This is also the place we notice
4859 differences in the return value ABI. Note that it is ok for one
4860 of the functions to have void return type as long as the return
4861 value of the other is passed in a register. */
4862 a
= ix86_function_value (TREE_TYPE (exp
), decl_or_type
, false);
4863 b
= ix86_function_value (TREE_TYPE (DECL_RESULT (cfun
->decl
)),
4865 if (STACK_REG_P (a
) || STACK_REG_P (b
))
4867 if (!rtx_equal_p (a
, b
))
4870 else if (VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun
->decl
))))
4872 else if (!rtx_equal_p (a
, b
))
4877 /* The SYSV ABI has more call-clobbered registers;
4878 disallow sibcalls from MS to SYSV. */
4879 if (cfun
->machine
->call_abi
== MS_ABI
4880 && ix86_function_type_abi (type
) == SYSV_ABI
)
4885 /* If this call is indirect, we'll need to be able to use a
4886 call-clobbered register for the address of the target function.
4887 Make sure that all such registers are not used for passing
4888 parameters. Note that DLLIMPORT functions are indirect. */
4890 || (TARGET_DLLIMPORT_DECL_ATTRIBUTES
&& DECL_DLLIMPORT_P (decl
)))
4892 if (ix86_function_regparm (type
, NULL
) >= 3)
4894 /* ??? Need to count the actual number of registers to be used,
4895 not the possible number of registers. Fix later. */
4901 /* Otherwise okay. That also includes certain types of indirect calls. */
4905 /* Handle "cdecl", "stdcall", "fastcall", "regparm", "thiscall",
4906 and "sseregparm" calling convention attributes;
4907 arguments as in struct attribute_spec.handler. */
4910 ix86_handle_cconv_attribute (tree
*node
, tree name
,
4912 int flags ATTRIBUTE_UNUSED
,
4915 if (TREE_CODE (*node
) != FUNCTION_TYPE
4916 && TREE_CODE (*node
) != METHOD_TYPE
4917 && TREE_CODE (*node
) != FIELD_DECL
4918 && TREE_CODE (*node
) != TYPE_DECL
)
4920 warning (OPT_Wattributes
, "%qE attribute only applies to functions",
4922 *no_add_attrs
= true;
4926 /* Can combine regparm with all attributes but fastcall, and thiscall. */
4927 if (is_attribute_p ("regparm", name
))
4931 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node
)))
4933 error ("fastcall and regparm attributes are not compatible");
4936 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node
)))
4938 error ("regparam and thiscall attributes are not compatible");
4941 cst
= TREE_VALUE (args
);
4942 if (TREE_CODE (cst
) != INTEGER_CST
)
4944 warning (OPT_Wattributes
,
4945 "%qE attribute requires an integer constant argument",
4947 *no_add_attrs
= true;
4949 else if (compare_tree_int (cst
, REGPARM_MAX
) > 0)
4951 warning (OPT_Wattributes
, "argument to %qE attribute larger than %d",
4953 *no_add_attrs
= true;
4961 /* Do not warn when emulating the MS ABI. */
4962 if ((TREE_CODE (*node
) != FUNCTION_TYPE
4963 && TREE_CODE (*node
) != METHOD_TYPE
)
4964 || ix86_function_type_abi (*node
) != MS_ABI
)
4965 warning (OPT_Wattributes
, "%qE attribute ignored",
4967 *no_add_attrs
= true;
4971 /* Can combine fastcall with stdcall (redundant) and sseregparm. */
4972 if (is_attribute_p ("fastcall", name
))
4974 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node
)))
4976 error ("fastcall and cdecl attributes are not compatible");
4978 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node
)))
4980 error ("fastcall and stdcall attributes are not compatible");
4982 if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node
)))
4984 error ("fastcall and regparm attributes are not compatible");
4986 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node
)))
4988 error ("fastcall and thiscall attributes are not compatible");
4992 /* Can combine stdcall with fastcall (redundant), regparm and
4994 else if (is_attribute_p ("stdcall", name
))
4996 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node
)))
4998 error ("stdcall and cdecl attributes are not compatible");
5000 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node
)))
5002 error ("stdcall and fastcall attributes are not compatible");
5004 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node
)))
5006 error ("stdcall and thiscall attributes are not compatible");
5010 /* Can combine cdecl with regparm and sseregparm. */
5011 else if (is_attribute_p ("cdecl", name
))
5013 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node
)))
5015 error ("stdcall and cdecl attributes are not compatible");
5017 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node
)))
5019 error ("fastcall and cdecl attributes are not compatible");
5021 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node
)))
5023 error ("cdecl and thiscall attributes are not compatible");
5026 else if (is_attribute_p ("thiscall", name
))
5028 if (TREE_CODE (*node
) != METHOD_TYPE
&& pedantic
)
5029 warning (OPT_Wattributes
, "%qE attribute is used for none class-method",
5031 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node
)))
5033 error ("stdcall and thiscall attributes are not compatible");
5035 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node
)))
5037 error ("fastcall and thiscall attributes are not compatible");
5039 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node
)))
5041 error ("cdecl and thiscall attributes are not compatible");
5045 /* Can combine sseregparm with all attributes. */
5050 /* The transactional memory builtins are implicitly regparm or fastcall
5051 depending on the ABI. Override the generic do-nothing attribute that
5052 these builtins were declared with, and replace it with one of the two
5053 attributes that we expect elsewhere. */
5056 ix86_handle_tm_regparm_attribute (tree
*node
, tree name ATTRIBUTE_UNUSED
,
5057 tree args ATTRIBUTE_UNUSED
,
5058 int flags ATTRIBUTE_UNUSED
,
5063 /* In no case do we want to add the placeholder attribute. */
5064 *no_add_attrs
= true;
5066 /* The 64-bit ABI is unchanged for transactional memory. */
5070 /* ??? Is there a better way to validate 32-bit windows? We have
5071 cfun->machine->call_abi, but that seems to be set only for 64-bit. */
5072 if (CHECK_STACK_LIMIT
> 0)
5073 alt
= tree_cons (get_identifier ("fastcall"), NULL
, NULL
);
5076 alt
= tree_cons (NULL
, build_int_cst (NULL
, 2), NULL
);
5077 alt
= tree_cons (get_identifier ("regparm"), alt
, NULL
);
5079 decl_attributes (node
, alt
, flags
);
5084 /* This function determines from TYPE the calling-convention. */
5087 ix86_get_callcvt (const_tree type
)
5089 unsigned int ret
= 0;
5094 return IX86_CALLCVT_CDECL
;
5096 attrs
= TYPE_ATTRIBUTES (type
);
5097 if (attrs
!= NULL_TREE
)
5099 if (lookup_attribute ("cdecl", attrs
))
5100 ret
|= IX86_CALLCVT_CDECL
;
5101 else if (lookup_attribute ("stdcall", attrs
))
5102 ret
|= IX86_CALLCVT_STDCALL
;
5103 else if (lookup_attribute ("fastcall", attrs
))
5104 ret
|= IX86_CALLCVT_FASTCALL
;
5105 else if (lookup_attribute ("thiscall", attrs
))
5106 ret
|= IX86_CALLCVT_THISCALL
;
5108 /* Regparam isn't allowed for thiscall and fastcall. */
5109 if ((ret
& (IX86_CALLCVT_THISCALL
| IX86_CALLCVT_FASTCALL
)) == 0)
5111 if (lookup_attribute ("regparm", attrs
))
5112 ret
|= IX86_CALLCVT_REGPARM
;
5113 if (lookup_attribute ("sseregparm", attrs
))
5114 ret
|= IX86_CALLCVT_SSEREGPARM
;
5117 if (IX86_BASE_CALLCVT(ret
) != 0)
5121 is_stdarg
= stdarg_p (type
);
5122 if (TARGET_RTD
&& !is_stdarg
)
5123 return IX86_CALLCVT_STDCALL
| ret
;
5127 || TREE_CODE (type
) != METHOD_TYPE
5128 || ix86_function_type_abi (type
) != MS_ABI
)
5129 return IX86_CALLCVT_CDECL
| ret
;
5131 return IX86_CALLCVT_THISCALL
;
5134 /* Return 0 if the attributes for two types are incompatible, 1 if they
5135 are compatible, and 2 if they are nearly compatible (which causes a
5136 warning to be generated). */
5139 ix86_comp_type_attributes (const_tree type1
, const_tree type2
)
5141 unsigned int ccvt1
, ccvt2
;
5143 if (TREE_CODE (type1
) != FUNCTION_TYPE
5144 && TREE_CODE (type1
) != METHOD_TYPE
)
5147 ccvt1
= ix86_get_callcvt (type1
);
5148 ccvt2
= ix86_get_callcvt (type2
);
5151 if (ix86_function_regparm (type1
, NULL
)
5152 != ix86_function_regparm (type2
, NULL
))
5158 /* Return the regparm value for a function with the indicated TYPE and DECL.
5159 DECL may be NULL when calling function indirectly
5160 or considering a libcall. */
5163 ix86_function_regparm (const_tree type
, const_tree decl
)
5170 return (ix86_function_type_abi (type
) == SYSV_ABI
5171 ? X86_64_REGPARM_MAX
: X86_64_MS_REGPARM_MAX
);
5172 ccvt
= ix86_get_callcvt (type
);
5173 regparm
= ix86_regparm
;
5175 if ((ccvt
& IX86_CALLCVT_REGPARM
) != 0)
5177 attr
= lookup_attribute ("regparm", TYPE_ATTRIBUTES (type
));
5180 regparm
= TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr
)));
5184 else if ((ccvt
& IX86_CALLCVT_FASTCALL
) != 0)
5186 else if ((ccvt
& IX86_CALLCVT_THISCALL
) != 0)
5189 /* Use register calling convention for local functions when possible. */
5191 && TREE_CODE (decl
) == FUNCTION_DECL
5193 && !(profile_flag
&& !flag_fentry
))
5195 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
5196 struct cgraph_local_info
*i
= cgraph_local_info (CONST_CAST_TREE (decl
));
5197 if (i
&& i
->local
&& i
->can_change_signature
)
5199 int local_regparm
, globals
= 0, regno
;
5201 /* Make sure no regparm register is taken by a
5202 fixed register variable. */
5203 for (local_regparm
= 0; local_regparm
< REGPARM_MAX
; local_regparm
++)
5204 if (fixed_regs
[local_regparm
])
5207 /* We don't want to use regparm(3) for nested functions as
5208 these use a static chain pointer in the third argument. */
5209 if (local_regparm
== 3 && DECL_STATIC_CHAIN (decl
))
5212 /* In 32-bit mode save a register for the split stack. */
5213 if (!TARGET_64BIT
&& local_regparm
== 3 && flag_split_stack
)
5216 /* Each fixed register usage increases register pressure,
5217 so less registers should be used for argument passing.
5218 This functionality can be overriden by an explicit
5220 for (regno
= AX_REG
; regno
<= DI_REG
; regno
++)
5221 if (fixed_regs
[regno
])
5225 = globals
< local_regparm
? local_regparm
- globals
: 0;
5227 if (local_regparm
> regparm
)
5228 regparm
= local_regparm
;
5235 /* Return 1 or 2, if we can pass up to SSE_REGPARM_MAX SFmode (1) and
5236 DFmode (2) arguments in SSE registers for a function with the
5237 indicated TYPE and DECL. DECL may be NULL when calling function
5238 indirectly or considering a libcall. Otherwise return 0. */
5241 ix86_function_sseregparm (const_tree type
, const_tree decl
, bool warn
)
5243 gcc_assert (!TARGET_64BIT
);
5245 /* Use SSE registers to pass SFmode and DFmode arguments if requested
5246 by the sseregparm attribute. */
5247 if (TARGET_SSEREGPARM
5248 || (type
&& lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type
))))
5255 error ("calling %qD with attribute sseregparm without "
5256 "SSE/SSE2 enabled", decl
);
5258 error ("calling %qT with attribute sseregparm without "
5259 "SSE/SSE2 enabled", type
);
5267 /* For local functions, pass up to SSE_REGPARM_MAX SFmode
5268 (and DFmode for SSE2) arguments in SSE registers. */
5269 if (decl
&& TARGET_SSE_MATH
&& optimize
5270 && !(profile_flag
&& !flag_fentry
))
5272 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
5273 struct cgraph_local_info
*i
= cgraph_local_info (CONST_CAST_TREE(decl
));
5274 if (i
&& i
->local
&& i
->can_change_signature
)
5275 return TARGET_SSE2
? 2 : 1;
5281 /* Return true if EAX is live at the start of the function. Used by
5282 ix86_expand_prologue to determine if we need special help before
5283 calling allocate_stack_worker. */
5286 ix86_eax_live_at_start_p (void)
5288 /* Cheat. Don't bother working forward from ix86_function_regparm
5289 to the function type to whether an actual argument is located in
5290 eax. Instead just look at cfg info, which is still close enough
5291 to correct at this point. This gives false positives for broken
5292 functions that might use uninitialized data that happens to be
5293 allocated in eax, but who cares? */
5294 return REGNO_REG_SET_P (df_get_live_out (ENTRY_BLOCK_PTR
), 0);
5298 ix86_keep_aggregate_return_pointer (tree fntype
)
5304 attr
= lookup_attribute ("callee_pop_aggregate_return",
5305 TYPE_ATTRIBUTES (fntype
));
5307 return (TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr
))) == 0);
5309 /* For 32-bit MS-ABI the default is to keep aggregate
5311 if (ix86_function_type_abi (fntype
) == MS_ABI
)
5314 return KEEP_AGGREGATE_RETURN_POINTER
!= 0;
5317 /* Value is the number of bytes of arguments automatically
5318 popped when returning from a subroutine call.
5319 FUNDECL is the declaration node of the function (as a tree),
5320 FUNTYPE is the data type of the function (as a tree),
5321 or for a library call it is an identifier node for the subroutine name.
5322 SIZE is the number of bytes of arguments passed on the stack.
5324 On the 80386, the RTD insn may be used to pop them if the number
5325 of args is fixed, but if the number is variable then the caller
5326 must pop them all. RTD can't be used for library calls now
5327 because the library is compiled with the Unix compiler.
5328 Use of RTD is a selectable option, since it is incompatible with
5329 standard Unix calling sequences. If the option is not selected,
5330 the caller must always pop the args.
5332 The attribute stdcall is equivalent to RTD on a per module basis. */
5335 ix86_return_pops_args (tree fundecl
, tree funtype
, int size
)
5339 /* None of the 64-bit ABIs pop arguments. */
5343 ccvt
= ix86_get_callcvt (funtype
);
5345 if ((ccvt
& (IX86_CALLCVT_STDCALL
| IX86_CALLCVT_FASTCALL
5346 | IX86_CALLCVT_THISCALL
)) != 0
5347 && ! stdarg_p (funtype
))
5350 /* Lose any fake structure return argument if it is passed on the stack. */
5351 if (aggregate_value_p (TREE_TYPE (funtype
), fundecl
)
5352 && !ix86_keep_aggregate_return_pointer (funtype
))
5354 int nregs
= ix86_function_regparm (funtype
, fundecl
);
5356 return GET_MODE_SIZE (Pmode
);
5362 /* Implement the TARGET_LEGITIMATE_COMBINED_INSN hook. */
5365 ix86_legitimate_combined_insn (rtx insn
)
5367 /* Check operand constraints in case hard registers were propagated
5368 into insn pattern. This check prevents combine pass from
5369 generating insn patterns with invalid hard register operands.
5370 These invalid insns can eventually confuse reload to error out
5371 with a spill failure. See also PRs 46829 and 46843. */
5372 if ((INSN_CODE (insn
) = recog (PATTERN (insn
), insn
, 0)) >= 0)
5376 extract_insn (insn
);
5377 preprocess_constraints ();
5379 for (i
= 0; i
< recog_data
.n_operands
; i
++)
5381 rtx op
= recog_data
.operand
[i
];
5382 enum machine_mode mode
= GET_MODE (op
);
5383 struct operand_alternative
*op_alt
;
5388 /* A unary operator may be accepted by the predicate, but it
5389 is irrelevant for matching constraints. */
5393 if (GET_CODE (op
) == SUBREG
)
5395 if (REG_P (SUBREG_REG (op
))
5396 && REGNO (SUBREG_REG (op
)) < FIRST_PSEUDO_REGISTER
)
5397 offset
= subreg_regno_offset (REGNO (SUBREG_REG (op
)),
5398 GET_MODE (SUBREG_REG (op
)),
5401 op
= SUBREG_REG (op
);
5404 if (!(REG_P (op
) && HARD_REGISTER_P (op
)))
5407 op_alt
= recog_op_alt
[i
];
5409 /* Operand has no constraints, anything is OK. */
5410 win
= !recog_data
.n_alternatives
;
5412 for (j
= 0; j
< recog_data
.n_alternatives
; j
++)
5414 if (op_alt
[j
].anything_ok
5415 || (op_alt
[j
].matches
!= -1
5417 (recog_data
.operand
[i
],
5418 recog_data
.operand
[op_alt
[j
].matches
]))
5419 || reg_fits_class_p (op
, op_alt
[j
].cl
, offset
, mode
))
5434 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
5436 static unsigned HOST_WIDE_INT
5437 ix86_asan_shadow_offset (void)
5439 return (unsigned HOST_WIDE_INT
) 1 << (TARGET_LP64
? 44 : 29);
5442 /* Argument support functions. */
5444 /* Return true when register may be used to pass function parameters. */
5446 ix86_function_arg_regno_p (int regno
)
5449 const int *parm_regs
;
5454 return (regno
< REGPARM_MAX
5455 || (TARGET_SSE
&& SSE_REGNO_P (regno
) && !fixed_regs
[regno
]));
5457 return (regno
< REGPARM_MAX
5458 || (TARGET_MMX
&& MMX_REGNO_P (regno
)
5459 && (regno
< FIRST_MMX_REG
+ MMX_REGPARM_MAX
))
5460 || (TARGET_SSE
&& SSE_REGNO_P (regno
)
5461 && (regno
< FIRST_SSE_REG
+ SSE_REGPARM_MAX
)));
5466 if (SSE_REGNO_P (regno
) && TARGET_SSE
)
5471 if (TARGET_SSE
&& SSE_REGNO_P (regno
)
5472 && (regno
< FIRST_SSE_REG
+ SSE_REGPARM_MAX
))
5476 /* TODO: The function should depend on current function ABI but
5477 builtins.c would need updating then. Therefore we use the
5480 /* RAX is used as hidden argument to va_arg functions. */
5481 if (ix86_abi
== SYSV_ABI
&& regno
== AX_REG
)
5484 if (ix86_abi
== MS_ABI
)
5485 parm_regs
= x86_64_ms_abi_int_parameter_registers
;
5487 parm_regs
= x86_64_int_parameter_registers
;
5488 for (i
= 0; i
< (ix86_abi
== MS_ABI
5489 ? X86_64_MS_REGPARM_MAX
: X86_64_REGPARM_MAX
); i
++)
5490 if (regno
== parm_regs
[i
])
5495 /* Return if we do not know how to pass TYPE solely in registers. */
5498 ix86_must_pass_in_stack (enum machine_mode mode
, const_tree type
)
5500 if (must_pass_in_stack_var_size_or_pad (mode
, type
))
5503 /* For 32-bit, we want TImode aggregates to go on the stack. But watch out!
5504 The layout_type routine is crafty and tries to trick us into passing
5505 currently unsupported vector types on the stack by using TImode. */
5506 return (!TARGET_64BIT
&& mode
== TImode
5507 && type
&& TREE_CODE (type
) != VECTOR_TYPE
);
5510 /* It returns the size, in bytes, of the area reserved for arguments passed
5511 in registers for the function represented by fndecl dependent to the used
5514 ix86_reg_parm_stack_space (const_tree fndecl
)
5516 enum calling_abi call_abi
= SYSV_ABI
;
5517 if (fndecl
!= NULL_TREE
&& TREE_CODE (fndecl
) == FUNCTION_DECL
)
5518 call_abi
= ix86_function_abi (fndecl
);
5520 call_abi
= ix86_function_type_abi (fndecl
);
5521 if (TARGET_64BIT
&& call_abi
== MS_ABI
)
5526 /* Returns value SYSV_ABI, MS_ABI dependent on fntype, specifying the
5529 ix86_function_type_abi (const_tree fntype
)
5531 if (fntype
!= NULL_TREE
&& TYPE_ATTRIBUTES (fntype
) != NULL_TREE
)
5533 enum calling_abi abi
= ix86_abi
;
5534 if (abi
== SYSV_ABI
)
5536 if (lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (fntype
)))
5539 else if (lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (fntype
)))
5547 ix86_function_ms_hook_prologue (const_tree fn
)
5549 if (fn
&& lookup_attribute ("ms_hook_prologue", DECL_ATTRIBUTES (fn
)))
5551 if (decl_function_context (fn
) != NULL_TREE
)
5552 error_at (DECL_SOURCE_LOCATION (fn
),
5553 "ms_hook_prologue is not compatible with nested function");
5560 static enum calling_abi
5561 ix86_function_abi (const_tree fndecl
)
5565 return ix86_function_type_abi (TREE_TYPE (fndecl
));
5568 /* Returns value SYSV_ABI, MS_ABI dependent on cfun, specifying the
5571 ix86_cfun_abi (void)
5575 return cfun
->machine
->call_abi
;
5578 /* Write the extra assembler code needed to declare a function properly. */
5581 ix86_asm_output_function_label (FILE *asm_out_file
, const char *fname
,
5584 bool is_ms_hook
= ix86_function_ms_hook_prologue (decl
);
5588 int i
, filler_count
= (TARGET_64BIT
? 32 : 16);
5589 unsigned int filler_cc
= 0xcccccccc;
5591 for (i
= 0; i
< filler_count
; i
+= 4)
5592 fprintf (asm_out_file
, ASM_LONG
" %#x\n", filler_cc
);
5595 #ifdef SUBTARGET_ASM_UNWIND_INIT
5596 SUBTARGET_ASM_UNWIND_INIT (asm_out_file
);
5599 ASM_OUTPUT_LABEL (asm_out_file
, fname
);
5601 /* Output magic byte marker, if hot-patch attribute is set. */
5606 /* leaq [%rsp + 0], %rsp */
5607 asm_fprintf (asm_out_file
, ASM_BYTE
5608 "0x48, 0x8d, 0xa4, 0x24, 0x00, 0x00, 0x00, 0x00\n");
5612 /* movl.s %edi, %edi
5614 movl.s %esp, %ebp */
5615 asm_fprintf (asm_out_file
, ASM_BYTE
5616 "0x8b, 0xff, 0x55, 0x8b, 0xec\n");
5622 extern void init_regs (void);
5624 /* Implementation of call abi switching target hook. Specific to FNDECL
5625 the specific call register sets are set. See also
5626 ix86_conditional_register_usage for more details. */
5628 ix86_call_abi_override (const_tree fndecl
)
5630 if (fndecl
== NULL_TREE
)
5631 cfun
->machine
->call_abi
= ix86_abi
;
5633 cfun
->machine
->call_abi
= ix86_function_type_abi (TREE_TYPE (fndecl
));
5636 /* 64-bit MS and SYSV ABI have different set of call used registers. Avoid
5637 expensive re-initialization of init_regs each time we switch function context
5638 since this is needed only during RTL expansion. */
5640 ix86_maybe_switch_abi (void)
5643 call_used_regs
[SI_REG
] == (cfun
->machine
->call_abi
== MS_ABI
))
5647 /* Initialize a variable CUM of type CUMULATIVE_ARGS
5648 for a call to a function whose data type is FNTYPE.
5649 For a library call, FNTYPE is 0. */
5652 init_cumulative_args (CUMULATIVE_ARGS
*cum
, /* Argument info to initialize */
5653 tree fntype
, /* tree ptr for function decl */
5654 rtx libname
, /* SYMBOL_REF of library name or 0 */
5658 struct cgraph_local_info
*i
;
5660 memset (cum
, 0, sizeof (*cum
));
5664 i
= cgraph_local_info (fndecl
);
5665 cum
->call_abi
= ix86_function_abi (fndecl
);
5670 cum
->call_abi
= ix86_function_type_abi (fntype
);
5673 cum
->caller
= caller
;
5675 /* Set up the number of registers to use for passing arguments. */
5677 if (TARGET_64BIT
&& cum
->call_abi
== MS_ABI
&& !ACCUMULATE_OUTGOING_ARGS
)
5678 sorry ("ms_abi attribute requires -maccumulate-outgoing-args "
5679 "or subtarget optimization implying it");
5680 cum
->nregs
= ix86_regparm
;
5683 cum
->nregs
= (cum
->call_abi
== SYSV_ABI
5684 ? X86_64_REGPARM_MAX
5685 : X86_64_MS_REGPARM_MAX
);
5689 cum
->sse_nregs
= SSE_REGPARM_MAX
;
5692 cum
->sse_nregs
= (cum
->call_abi
== SYSV_ABI
5693 ? X86_64_SSE_REGPARM_MAX
5694 : X86_64_MS_SSE_REGPARM_MAX
);
5698 cum
->mmx_nregs
= MMX_REGPARM_MAX
;
5699 cum
->warn_avx
= true;
5700 cum
->warn_sse
= true;
5701 cum
->warn_mmx
= true;
5703 /* Because type might mismatch in between caller and callee, we need to
5704 use actual type of function for local calls.
5705 FIXME: cgraph_analyze can be told to actually record if function uses
5706 va_start so for local functions maybe_vaarg can be made aggressive
5708 FIXME: once typesytem is fixed, we won't need this code anymore. */
5709 if (i
&& i
->local
&& i
->can_change_signature
)
5710 fntype
= TREE_TYPE (fndecl
);
5711 cum
->maybe_vaarg
= (fntype
5712 ? (!prototype_p (fntype
) || stdarg_p (fntype
))
5717 /* If there are variable arguments, then we won't pass anything
5718 in registers in 32-bit mode. */
5719 if (stdarg_p (fntype
))
5730 /* Use ecx and edx registers if function has fastcall attribute,
5731 else look for regparm information. */
5734 unsigned int ccvt
= ix86_get_callcvt (fntype
);
5735 if ((ccvt
& IX86_CALLCVT_THISCALL
) != 0)
5738 cum
->fastcall
= 1; /* Same first register as in fastcall. */
5740 else if ((ccvt
& IX86_CALLCVT_FASTCALL
) != 0)
5746 cum
->nregs
= ix86_function_regparm (fntype
, fndecl
);
5749 /* Set up the number of SSE registers used for passing SFmode
5750 and DFmode arguments. Warn for mismatching ABI. */
5751 cum
->float_in_sse
= ix86_function_sseregparm (fntype
, fndecl
, true);
5755 /* Return the "natural" mode for TYPE. In most cases, this is just TYPE_MODE.
5756 But in the case of vector types, it is some vector mode.
5758 When we have only some of our vector isa extensions enabled, then there
5759 are some modes for which vector_mode_supported_p is false. For these
5760 modes, the generic vector support in gcc will choose some non-vector mode
5761 in order to implement the type. By computing the natural mode, we'll
5762 select the proper ABI location for the operand and not depend on whatever
5763 the middle-end decides to do with these vector types.
5765 The midde-end can't deal with the vector types > 16 bytes. In this
5766 case, we return the original mode and warn ABI change if CUM isn't
5769 static enum machine_mode
5770 type_natural_mode (const_tree type
, const CUMULATIVE_ARGS
*cum
)
5772 enum machine_mode mode
= TYPE_MODE (type
);
5774 if (TREE_CODE (type
) == VECTOR_TYPE
&& !VECTOR_MODE_P (mode
))
5776 HOST_WIDE_INT size
= int_size_in_bytes (type
);
5777 if ((size
== 8 || size
== 16 || size
== 32)
5778 /* ??? Generic code allows us to create width 1 vectors. Ignore. */
5779 && TYPE_VECTOR_SUBPARTS (type
) > 1)
5781 enum machine_mode innermode
= TYPE_MODE (TREE_TYPE (type
));
5783 if (TREE_CODE (TREE_TYPE (type
)) == REAL_TYPE
)
5784 mode
= MIN_MODE_VECTOR_FLOAT
;
5786 mode
= MIN_MODE_VECTOR_INT
;
5788 /* Get the mode which has this inner mode and number of units. */
5789 for (; mode
!= VOIDmode
; mode
= GET_MODE_WIDER_MODE (mode
))
5790 if (GET_MODE_NUNITS (mode
) == TYPE_VECTOR_SUBPARTS (type
)
5791 && GET_MODE_INNER (mode
) == innermode
)
5793 if (size
== 32 && !TARGET_AVX
)
5795 static bool warnedavx
;
5802 warning (0, "AVX vector argument without AVX "
5803 "enabled changes the ABI");
5805 return TYPE_MODE (type
);
5807 else if ((size
== 8 || size
== 16) && !TARGET_SSE
)
5809 static bool warnedsse
;
5816 warning (0, "SSE vector argument without SSE "
5817 "enabled changes the ABI");
5832 /* We want to pass a value in REGNO whose "natural" mode is MODE. However,
5833 this may not agree with the mode that the type system has chosen for the
5834 register, which is ORIG_MODE. If ORIG_MODE is not BLKmode, then we can
5835 go ahead and use it. Otherwise we have to build a PARALLEL instead. */
5838 gen_reg_or_parallel (enum machine_mode mode
, enum machine_mode orig_mode
,
5843 if (orig_mode
!= BLKmode
)
5844 tmp
= gen_rtx_REG (orig_mode
, regno
);
5847 tmp
= gen_rtx_REG (mode
, regno
);
5848 tmp
= gen_rtx_EXPR_LIST (VOIDmode
, tmp
, const0_rtx
);
5849 tmp
= gen_rtx_PARALLEL (orig_mode
, gen_rtvec (1, tmp
));
5855 /* x86-64 register passing implementation. See x86-64 ABI for details. Goal
5856 of this code is to classify each 8bytes of incoming argument by the register
5857 class and assign registers accordingly. */
5859 /* Return the union class of CLASS1 and CLASS2.
5860 See the x86-64 PS ABI for details. */
5862 static enum x86_64_reg_class
5863 merge_classes (enum x86_64_reg_class class1
, enum x86_64_reg_class class2
)
5865 /* Rule #1: If both classes are equal, this is the resulting class. */
5866 if (class1
== class2
)
5869 /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
5871 if (class1
== X86_64_NO_CLASS
)
5873 if (class2
== X86_64_NO_CLASS
)
5876 /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
5877 if (class1
== X86_64_MEMORY_CLASS
|| class2
== X86_64_MEMORY_CLASS
)
5878 return X86_64_MEMORY_CLASS
;
5880 /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
5881 if ((class1
== X86_64_INTEGERSI_CLASS
&& class2
== X86_64_SSESF_CLASS
)
5882 || (class2
== X86_64_INTEGERSI_CLASS
&& class1
== X86_64_SSESF_CLASS
))
5883 return X86_64_INTEGERSI_CLASS
;
5884 if (class1
== X86_64_INTEGER_CLASS
|| class1
== X86_64_INTEGERSI_CLASS
5885 || class2
== X86_64_INTEGER_CLASS
|| class2
== X86_64_INTEGERSI_CLASS
)
5886 return X86_64_INTEGER_CLASS
;
5888 /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class,
5890 if (class1
== X86_64_X87_CLASS
5891 || class1
== X86_64_X87UP_CLASS
5892 || class1
== X86_64_COMPLEX_X87_CLASS
5893 || class2
== X86_64_X87_CLASS
5894 || class2
== X86_64_X87UP_CLASS
5895 || class2
== X86_64_COMPLEX_X87_CLASS
)
5896 return X86_64_MEMORY_CLASS
;
5898 /* Rule #6: Otherwise class SSE is used. */
5899 return X86_64_SSE_CLASS
;
5902 /* Classify the argument of type TYPE and mode MODE.
5903 CLASSES will be filled by the register class used to pass each word
5904 of the operand. The number of words is returned. In case the parameter
5905 should be passed in memory, 0 is returned. As a special case for zero
5906 sized containers, classes[0] will be NO_CLASS and 1 is returned.
5908 BIT_OFFSET is used internally for handling records and specifies offset
5909 of the offset in bits modulo 256 to avoid overflow cases.
5911 See the x86-64 PS ABI for details.
5915 classify_argument (enum machine_mode mode
, const_tree type
,
5916 enum x86_64_reg_class classes
[MAX_CLASSES
], int bit_offset
)
5918 HOST_WIDE_INT bytes
=
5919 (mode
== BLKmode
) ? int_size_in_bytes (type
) : (int) GET_MODE_SIZE (mode
);
5921 = (bytes
+ (bit_offset
% 64) / 8 + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
5923 /* Variable sized entities are always passed/returned in memory. */
5927 if (mode
!= VOIDmode
5928 && targetm
.calls
.must_pass_in_stack (mode
, type
))
5931 if (type
&& AGGREGATE_TYPE_P (type
))
5935 enum x86_64_reg_class subclasses
[MAX_CLASSES
];
5937 /* On x86-64 we pass structures larger than 32 bytes on the stack. */
5941 for (i
= 0; i
< words
; i
++)
5942 classes
[i
] = X86_64_NO_CLASS
;
5944 /* Zero sized arrays or structures are NO_CLASS. We return 0 to
5945 signalize memory class, so handle it as special case. */
5948 classes
[0] = X86_64_NO_CLASS
;
5952 /* Classify each field of record and merge classes. */
5953 switch (TREE_CODE (type
))
5956 /* And now merge the fields of structure. */
5957 for (field
= TYPE_FIELDS (type
); field
; field
= DECL_CHAIN (field
))
5959 if (TREE_CODE (field
) == FIELD_DECL
)
5963 if (TREE_TYPE (field
) == error_mark_node
)
5966 /* Bitfields are always classified as integer. Handle them
5967 early, since later code would consider them to be
5968 misaligned integers. */
5969 if (DECL_BIT_FIELD (field
))
5971 for (i
= (int_bit_position (field
)
5972 + (bit_offset
% 64)) / 8 / 8;
5973 i
< ((int_bit_position (field
) + (bit_offset
% 64))
5974 + tree_low_cst (DECL_SIZE (field
), 0)
5977 merge_classes (X86_64_INTEGER_CLASS
,
5984 type
= TREE_TYPE (field
);
5986 /* Flexible array member is ignored. */
5987 if (TYPE_MODE (type
) == BLKmode
5988 && TREE_CODE (type
) == ARRAY_TYPE
5989 && TYPE_SIZE (type
) == NULL_TREE
5990 && TYPE_DOMAIN (type
) != NULL_TREE
5991 && (TYPE_MAX_VALUE (TYPE_DOMAIN (type
))
5996 if (!warned
&& warn_psabi
)
5999 inform (input_location
,
6000 "the ABI of passing struct with"
6001 " a flexible array member has"
6002 " changed in GCC 4.4");
6006 num
= classify_argument (TYPE_MODE (type
), type
,
6008 (int_bit_position (field
)
6009 + bit_offset
) % 256);
6012 pos
= (int_bit_position (field
)
6013 + (bit_offset
% 64)) / 8 / 8;
6014 for (i
= 0; i
< num
&& (i
+ pos
) < words
; i
++)
6016 merge_classes (subclasses
[i
], classes
[i
+ pos
]);
6023 /* Arrays are handled as small records. */
6026 num
= classify_argument (TYPE_MODE (TREE_TYPE (type
)),
6027 TREE_TYPE (type
), subclasses
, bit_offset
);
6031 /* The partial classes are now full classes. */
6032 if (subclasses
[0] == X86_64_SSESF_CLASS
&& bytes
!= 4)
6033 subclasses
[0] = X86_64_SSE_CLASS
;
6034 if (subclasses
[0] == X86_64_INTEGERSI_CLASS
6035 && !((bit_offset
% 64) == 0 && bytes
== 4))
6036 subclasses
[0] = X86_64_INTEGER_CLASS
;
6038 for (i
= 0; i
< words
; i
++)
6039 classes
[i
] = subclasses
[i
% num
];
6044 case QUAL_UNION_TYPE
:
6045 /* Unions are similar to RECORD_TYPE but offset is always 0.
6047 for (field
= TYPE_FIELDS (type
); field
; field
= DECL_CHAIN (field
))
6049 if (TREE_CODE (field
) == FIELD_DECL
)
6053 if (TREE_TYPE (field
) == error_mark_node
)
6056 num
= classify_argument (TYPE_MODE (TREE_TYPE (field
)),
6057 TREE_TYPE (field
), subclasses
,
6061 for (i
= 0; i
< num
; i
++)
6062 classes
[i
] = merge_classes (subclasses
[i
], classes
[i
]);
6073 /* When size > 16 bytes, if the first one isn't
6074 X86_64_SSE_CLASS or any other ones aren't
6075 X86_64_SSEUP_CLASS, everything should be passed in
6077 if (classes
[0] != X86_64_SSE_CLASS
)
6080 for (i
= 1; i
< words
; i
++)
6081 if (classes
[i
] != X86_64_SSEUP_CLASS
)
6085 /* Final merger cleanup. */
6086 for (i
= 0; i
< words
; i
++)
6088 /* If one class is MEMORY, everything should be passed in
6090 if (classes
[i
] == X86_64_MEMORY_CLASS
)
6093 /* The X86_64_SSEUP_CLASS should be always preceded by
6094 X86_64_SSE_CLASS or X86_64_SSEUP_CLASS. */
6095 if (classes
[i
] == X86_64_SSEUP_CLASS
6096 && classes
[i
- 1] != X86_64_SSE_CLASS
6097 && classes
[i
- 1] != X86_64_SSEUP_CLASS
)
6099 /* The first one should never be X86_64_SSEUP_CLASS. */
6100 gcc_assert (i
!= 0);
6101 classes
[i
] = X86_64_SSE_CLASS
;
6104 /* If X86_64_X87UP_CLASS isn't preceded by X86_64_X87_CLASS,
6105 everything should be passed in memory. */
6106 if (classes
[i
] == X86_64_X87UP_CLASS
6107 && (classes
[i
- 1] != X86_64_X87_CLASS
))
6111 /* The first one should never be X86_64_X87UP_CLASS. */
6112 gcc_assert (i
!= 0);
6113 if (!warned
&& warn_psabi
)
6116 inform (input_location
,
6117 "the ABI of passing union with long double"
6118 " has changed in GCC 4.4");
6126 /* Compute alignment needed. We align all types to natural boundaries with
6127 exception of XFmode that is aligned to 64bits. */
6128 if (mode
!= VOIDmode
&& mode
!= BLKmode
)
6130 int mode_alignment
= GET_MODE_BITSIZE (mode
);
6133 mode_alignment
= 128;
6134 else if (mode
== XCmode
)
6135 mode_alignment
= 256;
6136 if (COMPLEX_MODE_P (mode
))
6137 mode_alignment
/= 2;
6138 /* Misaligned fields are always returned in memory. */
6139 if (bit_offset
% mode_alignment
)
6143 /* for V1xx modes, just use the base mode */
6144 if (VECTOR_MODE_P (mode
) && mode
!= V1DImode
&& mode
!= V1TImode
6145 && GET_MODE_SIZE (GET_MODE_INNER (mode
)) == bytes
)
6146 mode
= GET_MODE_INNER (mode
);
6148 /* Classification of atomic types. */
6153 classes
[0] = X86_64_SSE_CLASS
;
6156 classes
[0] = X86_64_SSE_CLASS
;
6157 classes
[1] = X86_64_SSEUP_CLASS
;
6167 int size
= (bit_offset
% 64)+ (int) GET_MODE_BITSIZE (mode
);
6171 classes
[0] = X86_64_INTEGERSI_CLASS
;
6174 else if (size
<= 64)
6176 classes
[0] = X86_64_INTEGER_CLASS
;
6179 else if (size
<= 64+32)
6181 classes
[0] = X86_64_INTEGER_CLASS
;
6182 classes
[1] = X86_64_INTEGERSI_CLASS
;
6185 else if (size
<= 64+64)
6187 classes
[0] = classes
[1] = X86_64_INTEGER_CLASS
;
6195 classes
[0] = classes
[1] = X86_64_INTEGER_CLASS
;
6199 /* OImode shouldn't be used directly. */
6204 if (!(bit_offset
% 64))
6205 classes
[0] = X86_64_SSESF_CLASS
;
6207 classes
[0] = X86_64_SSE_CLASS
;
6210 classes
[0] = X86_64_SSEDF_CLASS
;
6213 classes
[0] = X86_64_X87_CLASS
;
6214 classes
[1] = X86_64_X87UP_CLASS
;
6217 classes
[0] = X86_64_SSE_CLASS
;
6218 classes
[1] = X86_64_SSEUP_CLASS
;
6221 classes
[0] = X86_64_SSE_CLASS
;
6222 if (!(bit_offset
% 64))
6228 if (!warned
&& warn_psabi
)
6231 inform (input_location
,
6232 "the ABI of passing structure with complex float"
6233 " member has changed in GCC 4.4");
6235 classes
[1] = X86_64_SSESF_CLASS
;
6239 classes
[0] = X86_64_SSEDF_CLASS
;
6240 classes
[1] = X86_64_SSEDF_CLASS
;
6243 classes
[0] = X86_64_COMPLEX_X87_CLASS
;
6246 /* This modes is larger than 16 bytes. */
6254 classes
[0] = X86_64_SSE_CLASS
;
6255 classes
[1] = X86_64_SSEUP_CLASS
;
6256 classes
[2] = X86_64_SSEUP_CLASS
;
6257 classes
[3] = X86_64_SSEUP_CLASS
;
6265 classes
[0] = X86_64_SSE_CLASS
;
6266 classes
[1] = X86_64_SSEUP_CLASS
;
6274 classes
[0] = X86_64_SSE_CLASS
;
6280 gcc_assert (VECTOR_MODE_P (mode
));
6285 gcc_assert (GET_MODE_CLASS (GET_MODE_INNER (mode
)) == MODE_INT
);
6287 if (bit_offset
+ GET_MODE_BITSIZE (mode
) <= 32)
6288 classes
[0] = X86_64_INTEGERSI_CLASS
;
6290 classes
[0] = X86_64_INTEGER_CLASS
;
6291 classes
[1] = X86_64_INTEGER_CLASS
;
6292 return 1 + (bytes
> 8);
6296 /* Examine the argument and return set number of register required in each
6297 class. Return 0 iff parameter should be passed in memory. */
6299 examine_argument (enum machine_mode mode
, const_tree type
, int in_return
,
6300 int *int_nregs
, int *sse_nregs
)
6302 enum x86_64_reg_class regclass
[MAX_CLASSES
];
6303 int n
= classify_argument (mode
, type
, regclass
, 0);
6309 for (n
--; n
>= 0; n
--)
6310 switch (regclass
[n
])
6312 case X86_64_INTEGER_CLASS
:
6313 case X86_64_INTEGERSI_CLASS
:
6316 case X86_64_SSE_CLASS
:
6317 case X86_64_SSESF_CLASS
:
6318 case X86_64_SSEDF_CLASS
:
6321 case X86_64_NO_CLASS
:
6322 case X86_64_SSEUP_CLASS
:
6324 case X86_64_X87_CLASS
:
6325 case X86_64_X87UP_CLASS
:
6329 case X86_64_COMPLEX_X87_CLASS
:
6330 return in_return
? 2 : 0;
6331 case X86_64_MEMORY_CLASS
:
6337 /* Construct container for the argument used by GCC interface. See
6338 FUNCTION_ARG for the detailed description. */
6341 construct_container (enum machine_mode mode
, enum machine_mode orig_mode
,
6342 const_tree type
, int in_return
, int nintregs
, int nsseregs
,
6343 const int *intreg
, int sse_regno
)
6345 /* The following variables hold the static issued_error state. */
6346 static bool issued_sse_arg_error
;
6347 static bool issued_sse_ret_error
;
6348 static bool issued_x87_ret_error
;
6350 enum machine_mode tmpmode
;
6352 (mode
== BLKmode
) ? int_size_in_bytes (type
) : (int) GET_MODE_SIZE (mode
);
6353 enum x86_64_reg_class regclass
[MAX_CLASSES
];
6357 int needed_sseregs
, needed_intregs
;
6358 rtx exp
[MAX_CLASSES
];
6361 n
= classify_argument (mode
, type
, regclass
, 0);
6364 if (!examine_argument (mode
, type
, in_return
, &needed_intregs
,
6367 if (needed_intregs
> nintregs
|| needed_sseregs
> nsseregs
)
6370 /* We allowed the user to turn off SSE for kernel mode. Don't crash if
6371 some less clueful developer tries to use floating-point anyway. */
6372 if (needed_sseregs
&& !TARGET_SSE
)
6376 if (!issued_sse_ret_error
)
6378 error ("SSE register return with SSE disabled");
6379 issued_sse_ret_error
= true;
6382 else if (!issued_sse_arg_error
)
6384 error ("SSE register argument with SSE disabled");
6385 issued_sse_arg_error
= true;
6390 /* Likewise, error if the ABI requires us to return values in the
6391 x87 registers and the user specified -mno-80387. */
6392 if (!TARGET_80387
&& in_return
)
6393 for (i
= 0; i
< n
; i
++)
6394 if (regclass
[i
] == X86_64_X87_CLASS
6395 || regclass
[i
] == X86_64_X87UP_CLASS
6396 || regclass
[i
] == X86_64_COMPLEX_X87_CLASS
)
6398 if (!issued_x87_ret_error
)
6400 error ("x87 register return with x87 disabled");
6401 issued_x87_ret_error
= true;
6406 /* First construct simple cases. Avoid SCmode, since we want to use
6407 single register to pass this type. */
6408 if (n
== 1 && mode
!= SCmode
)
6409 switch (regclass
[0])
6411 case X86_64_INTEGER_CLASS
:
6412 case X86_64_INTEGERSI_CLASS
:
6413 return gen_rtx_REG (mode
, intreg
[0]);
6414 case X86_64_SSE_CLASS
:
6415 case X86_64_SSESF_CLASS
:
6416 case X86_64_SSEDF_CLASS
:
6417 if (mode
!= BLKmode
)
6418 return gen_reg_or_parallel (mode
, orig_mode
,
6419 SSE_REGNO (sse_regno
));
6421 case X86_64_X87_CLASS
:
6422 case X86_64_COMPLEX_X87_CLASS
:
6423 return gen_rtx_REG (mode
, FIRST_STACK_REG
);
6424 case X86_64_NO_CLASS
:
6425 /* Zero sized array, struct or class. */
6431 && regclass
[0] == X86_64_SSE_CLASS
6432 && regclass
[1] == X86_64_SSEUP_CLASS
6434 return gen_reg_or_parallel (mode
, orig_mode
,
6435 SSE_REGNO (sse_regno
));
6437 && regclass
[0] == X86_64_SSE_CLASS
6438 && regclass
[1] == X86_64_SSEUP_CLASS
6439 && regclass
[2] == X86_64_SSEUP_CLASS
6440 && regclass
[3] == X86_64_SSEUP_CLASS
6442 return gen_reg_or_parallel (mode
, orig_mode
,
6443 SSE_REGNO (sse_regno
));
6445 && regclass
[0] == X86_64_X87_CLASS
6446 && regclass
[1] == X86_64_X87UP_CLASS
)
6447 return gen_rtx_REG (XFmode
, FIRST_STACK_REG
);
6450 && regclass
[0] == X86_64_INTEGER_CLASS
6451 && regclass
[1] == X86_64_INTEGER_CLASS
6452 && (mode
== CDImode
|| mode
== TImode
|| mode
== TFmode
)
6453 && intreg
[0] + 1 == intreg
[1])
6454 return gen_rtx_REG (mode
, intreg
[0]);
6456 /* Otherwise figure out the entries of the PARALLEL. */
6457 for (i
= 0; i
< n
; i
++)
6461 switch (regclass
[i
])
6463 case X86_64_NO_CLASS
:
6465 case X86_64_INTEGER_CLASS
:
6466 case X86_64_INTEGERSI_CLASS
:
6467 /* Merge TImodes on aligned occasions here too. */
6468 if (i
* 8 + 8 > bytes
)
6470 = mode_for_size ((bytes
- i
* 8) * BITS_PER_UNIT
, MODE_INT
, 0);
6471 else if (regclass
[i
] == X86_64_INTEGERSI_CLASS
)
6475 /* We've requested 24 bytes we
6476 don't have mode for. Use DImode. */
6477 if (tmpmode
== BLKmode
)
6480 = gen_rtx_EXPR_LIST (VOIDmode
,
6481 gen_rtx_REG (tmpmode
, *intreg
),
6485 case X86_64_SSESF_CLASS
:
6487 = gen_rtx_EXPR_LIST (VOIDmode
,
6488 gen_rtx_REG (SFmode
,
6489 SSE_REGNO (sse_regno
)),
6493 case X86_64_SSEDF_CLASS
:
6495 = gen_rtx_EXPR_LIST (VOIDmode
,
6496 gen_rtx_REG (DFmode
,
6497 SSE_REGNO (sse_regno
)),
6501 case X86_64_SSE_CLASS
:
6509 if (i
== 0 && regclass
[1] == X86_64_SSEUP_CLASS
)
6519 && regclass
[1] == X86_64_SSEUP_CLASS
6520 && regclass
[2] == X86_64_SSEUP_CLASS
6521 && regclass
[3] == X86_64_SSEUP_CLASS
);
6529 = gen_rtx_EXPR_LIST (VOIDmode
,
6530 gen_rtx_REG (tmpmode
,
6531 SSE_REGNO (sse_regno
)),
6540 /* Empty aligned struct, union or class. */
6544 ret
= gen_rtx_PARALLEL (mode
, rtvec_alloc (nexps
));
6545 for (i
= 0; i
< nexps
; i
++)
6546 XVECEXP (ret
, 0, i
) = exp
[i
];
6550 /* Update the data in CUM to advance over an argument of mode MODE
6551 and data type TYPE. (TYPE is null for libcalls where that information
6552 may not be available.) */
6555 function_arg_advance_32 (CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
6556 const_tree type
, HOST_WIDE_INT bytes
,
6557 HOST_WIDE_INT words
)
6573 cum
->words
+= words
;
6574 cum
->nregs
-= words
;
6575 cum
->regno
+= words
;
6577 if (cum
->nregs
<= 0)
6585 /* OImode shouldn't be used directly. */
6589 if (cum
->float_in_sse
< 2)
6592 if (cum
->float_in_sse
< 1)
6609 if (!type
|| !AGGREGATE_TYPE_P (type
))
6611 cum
->sse_words
+= words
;
6612 cum
->sse_nregs
-= 1;
6613 cum
->sse_regno
+= 1;
6614 if (cum
->sse_nregs
<= 0)
6628 if (!type
|| !AGGREGATE_TYPE_P (type
))
6630 cum
->mmx_words
+= words
;
6631 cum
->mmx_nregs
-= 1;
6632 cum
->mmx_regno
+= 1;
6633 if (cum
->mmx_nregs
<= 0)
6644 function_arg_advance_64 (CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
6645 const_tree type
, HOST_WIDE_INT words
, bool named
)
6647 int int_nregs
, sse_nregs
;
6649 /* Unnamed 256bit vector mode parameters are passed on stack. */
6650 if (!named
&& VALID_AVX256_REG_MODE (mode
))
6653 if (examine_argument (mode
, type
, 0, &int_nregs
, &sse_nregs
)
6654 && sse_nregs
<= cum
->sse_nregs
&& int_nregs
<= cum
->nregs
)
6656 cum
->nregs
-= int_nregs
;
6657 cum
->sse_nregs
-= sse_nregs
;
6658 cum
->regno
+= int_nregs
;
6659 cum
->sse_regno
+= sse_nregs
;
6663 int align
= ix86_function_arg_boundary (mode
, type
) / BITS_PER_WORD
;
6664 cum
->words
= (cum
->words
+ align
- 1) & ~(align
- 1);
6665 cum
->words
+= words
;
6670 function_arg_advance_ms_64 (CUMULATIVE_ARGS
*cum
, HOST_WIDE_INT bytes
,
6671 HOST_WIDE_INT words
)
6673 /* Otherwise, this should be passed indirect. */
6674 gcc_assert (bytes
== 1 || bytes
== 2 || bytes
== 4 || bytes
== 8);
6676 cum
->words
+= words
;
6684 /* Update the data in CUM to advance over an argument of mode MODE and
6685 data type TYPE. (TYPE is null for libcalls where that information
6686 may not be available.) */
6689 ix86_function_arg_advance (cumulative_args_t cum_v
, enum machine_mode mode
,
6690 const_tree type
, bool named
)
6692 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
6693 HOST_WIDE_INT bytes
, words
;
6695 if (mode
== BLKmode
)
6696 bytes
= int_size_in_bytes (type
);
6698 bytes
= GET_MODE_SIZE (mode
);
6699 words
= (bytes
+ UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
6702 mode
= type_natural_mode (type
, NULL
);
6704 if (TARGET_64BIT
&& (cum
? cum
->call_abi
: ix86_abi
) == MS_ABI
)
6705 function_arg_advance_ms_64 (cum
, bytes
, words
);
6706 else if (TARGET_64BIT
)
6707 function_arg_advance_64 (cum
, mode
, type
, words
, named
);
6709 function_arg_advance_32 (cum
, mode
, type
, bytes
, words
);
6712 /* Define where to put the arguments to a function.
6713 Value is zero to push the argument on the stack,
6714 or a hard register in which to store the argument.
6716 MODE is the argument's machine mode.
6717 TYPE is the data type of the argument (as a tree).
6718 This is null for libcalls where that information may
6720 CUM is a variable of type CUMULATIVE_ARGS which gives info about
6721 the preceding args and about the function being called.
6722 NAMED is nonzero if this argument is a named parameter
6723 (otherwise it is an extra parameter matching an ellipsis). */
6726 function_arg_32 (const CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
6727 enum machine_mode orig_mode
, const_tree type
,
6728 HOST_WIDE_INT bytes
, HOST_WIDE_INT words
)
6730 static bool warnedsse
, warnedmmx
;
6732 /* Avoid the AL settings for the Unix64 ABI. */
6733 if (mode
== VOIDmode
)
6749 if (words
<= cum
->nregs
)
6751 int regno
= cum
->regno
;
6753 /* Fastcall allocates the first two DWORD (SImode) or
6754 smaller arguments to ECX and EDX if it isn't an
6760 || (type
&& AGGREGATE_TYPE_P (type
)))
6763 /* ECX not EAX is the first allocated register. */
6764 if (regno
== AX_REG
)
6767 return gen_rtx_REG (mode
, regno
);
6772 if (cum
->float_in_sse
< 2)
6775 if (cum
->float_in_sse
< 1)
6779 /* In 32bit, we pass TImode in xmm registers. */
6786 if (!type
|| !AGGREGATE_TYPE_P (type
))
6788 if (!TARGET_SSE
&& !warnedsse
&& cum
->warn_sse
)
6791 warning (0, "SSE vector argument without SSE enabled "
6795 return gen_reg_or_parallel (mode
, orig_mode
,
6796 cum
->sse_regno
+ FIRST_SSE_REG
);
6801 /* OImode shouldn't be used directly. */
6810 if (!type
|| !AGGREGATE_TYPE_P (type
))
6813 return gen_reg_or_parallel (mode
, orig_mode
,
6814 cum
->sse_regno
+ FIRST_SSE_REG
);
6824 if (!type
|| !AGGREGATE_TYPE_P (type
))
6826 if (!TARGET_MMX
&& !warnedmmx
&& cum
->warn_mmx
)
6829 warning (0, "MMX vector argument without MMX enabled "
6833 return gen_reg_or_parallel (mode
, orig_mode
,
6834 cum
->mmx_regno
+ FIRST_MMX_REG
);
6843 function_arg_64 (const CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
6844 enum machine_mode orig_mode
, const_tree type
, bool named
)
6846 /* Handle a hidden AL argument containing number of registers
6847 for varargs x86-64 functions. */
6848 if (mode
== VOIDmode
)
6849 return GEN_INT (cum
->maybe_vaarg
6850 ? (cum
->sse_nregs
< 0
6851 ? X86_64_SSE_REGPARM_MAX
6866 /* Unnamed 256bit vector mode parameters are passed on stack. */
6872 return construct_container (mode
, orig_mode
, type
, 0, cum
->nregs
,
6874 &x86_64_int_parameter_registers
[cum
->regno
],
6879 function_arg_ms_64 (const CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
6880 enum machine_mode orig_mode
, bool named
,
6881 HOST_WIDE_INT bytes
)
6885 /* We need to add clobber for MS_ABI->SYSV ABI calls in expand_call.
6886 We use value of -2 to specify that current function call is MSABI. */
6887 if (mode
== VOIDmode
)
6888 return GEN_INT (-2);
6890 /* If we've run out of registers, it goes on the stack. */
6891 if (cum
->nregs
== 0)
6894 regno
= x86_64_ms_abi_int_parameter_registers
[cum
->regno
];
6896 /* Only floating point modes are passed in anything but integer regs. */
6897 if (TARGET_SSE
&& (mode
== SFmode
|| mode
== DFmode
))
6900 regno
= cum
->regno
+ FIRST_SSE_REG
;
6905 /* Unnamed floating parameters are passed in both the
6906 SSE and integer registers. */
6907 t1
= gen_rtx_REG (mode
, cum
->regno
+ FIRST_SSE_REG
);
6908 t2
= gen_rtx_REG (mode
, regno
);
6909 t1
= gen_rtx_EXPR_LIST (VOIDmode
, t1
, const0_rtx
);
6910 t2
= gen_rtx_EXPR_LIST (VOIDmode
, t2
, const0_rtx
);
6911 return gen_rtx_PARALLEL (mode
, gen_rtvec (2, t1
, t2
));
6914 /* Handle aggregated types passed in register. */
6915 if (orig_mode
== BLKmode
)
6917 if (bytes
> 0 && bytes
<= 8)
6918 mode
= (bytes
> 4 ? DImode
: SImode
);
6919 if (mode
== BLKmode
)
6923 return gen_reg_or_parallel (mode
, orig_mode
, regno
);
6926 /* Return where to put the arguments to a function.
6927 Return zero to push the argument on the stack, or a hard register in which to store the argument.
6929 MODE is the argument's machine mode. TYPE is the data type of the
6930 argument. It is null for libcalls where that information may not be
6931 available. CUM gives information about the preceding args and about
6932 the function being called. NAMED is nonzero if this argument is a
6933 named parameter (otherwise it is an extra parameter matching an
6937 ix86_function_arg (cumulative_args_t cum_v
, enum machine_mode omode
,
6938 const_tree type
, bool named
)
6940 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
6941 enum machine_mode mode
= omode
;
6942 HOST_WIDE_INT bytes
, words
;
6945 if (mode
== BLKmode
)
6946 bytes
= int_size_in_bytes (type
);
6948 bytes
= GET_MODE_SIZE (mode
);
6949 words
= (bytes
+ UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
6951 /* To simplify the code below, represent vector types with a vector mode
6952 even if MMX/SSE are not active. */
6953 if (type
&& TREE_CODE (type
) == VECTOR_TYPE
)
6954 mode
= type_natural_mode (type
, cum
);
6956 if (TARGET_64BIT
&& (cum
? cum
->call_abi
: ix86_abi
) == MS_ABI
)
6957 arg
= function_arg_ms_64 (cum
, mode
, omode
, named
, bytes
);
6958 else if (TARGET_64BIT
)
6959 arg
= function_arg_64 (cum
, mode
, omode
, type
, named
);
6961 arg
= function_arg_32 (cum
, mode
, omode
, type
, bytes
, words
);
6966 /* A C expression that indicates when an argument must be passed by
6967 reference. If nonzero for an argument, a copy of that argument is
6968 made in memory and a pointer to the argument is passed instead of
6969 the argument itself. The pointer is passed in whatever way is
6970 appropriate for passing a pointer to that type. */
6973 ix86_pass_by_reference (cumulative_args_t cum_v ATTRIBUTE_UNUSED
,
6974 enum machine_mode mode ATTRIBUTE_UNUSED
,
6975 const_tree type
, bool named ATTRIBUTE_UNUSED
)
6977 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
6979 /* See Windows x64 Software Convention. */
6980 if (TARGET_64BIT
&& (cum
? cum
->call_abi
: ix86_abi
) == MS_ABI
)
6982 int msize
= (int) GET_MODE_SIZE (mode
);
6985 /* Arrays are passed by reference. */
6986 if (TREE_CODE (type
) == ARRAY_TYPE
)
6989 if (AGGREGATE_TYPE_P (type
))
6991 /* Structs/unions of sizes other than 8, 16, 32, or 64 bits
6992 are passed by reference. */
6993 msize
= int_size_in_bytes (type
);
6997 /* __m128 is passed by reference. */
6999 case 1: case 2: case 4: case 8:
7005 else if (TARGET_64BIT
&& type
&& int_size_in_bytes (type
) == -1)
7011 /* Return true when TYPE should be 128bit aligned for 32bit argument
7012 passing ABI. XXX: This function is obsolete and is only used for
7013 checking psABI compatibility with previous versions of GCC. */
7016 ix86_compat_aligned_value_p (const_tree type
)
7018 enum machine_mode mode
= TYPE_MODE (type
);
7019 if (((TARGET_SSE
&& SSE_REG_MODE_P (mode
))
7023 && (!TYPE_USER_ALIGN (type
) || TYPE_ALIGN (type
) > 128))
7025 if (TYPE_ALIGN (type
) < 128)
7028 if (AGGREGATE_TYPE_P (type
))
7030 /* Walk the aggregates recursively. */
7031 switch (TREE_CODE (type
))
7035 case QUAL_UNION_TYPE
:
7039 /* Walk all the structure fields. */
7040 for (field
= TYPE_FIELDS (type
); field
; field
= DECL_CHAIN (field
))
7042 if (TREE_CODE (field
) == FIELD_DECL
7043 && ix86_compat_aligned_value_p (TREE_TYPE (field
)))
7050 /* Just for use if some languages passes arrays by value. */
7051 if (ix86_compat_aligned_value_p (TREE_TYPE (type
)))
7062 /* Return the alignment boundary for MODE and TYPE with alignment ALIGN.
7063 XXX: This function is obsolete and is only used for checking psABI
7064 compatibility with previous versions of GCC. */
7067 ix86_compat_function_arg_boundary (enum machine_mode mode
,
7068 const_tree type
, unsigned int align
)
7070 /* In 32bit, only _Decimal128 and __float128 are aligned to their
7071 natural boundaries. */
7072 if (!TARGET_64BIT
&& mode
!= TDmode
&& mode
!= TFmode
)
7074 /* i386 ABI defines all arguments to be 4 byte aligned. We have to
7075 make an exception for SSE modes since these require 128bit
7078 The handling here differs from field_alignment. ICC aligns MMX
7079 arguments to 4 byte boundaries, while structure fields are aligned
7080 to 8 byte boundaries. */
7083 if (!(TARGET_SSE
&& SSE_REG_MODE_P (mode
)))
7084 align
= PARM_BOUNDARY
;
7088 if (!ix86_compat_aligned_value_p (type
))
7089 align
= PARM_BOUNDARY
;
7092 if (align
> BIGGEST_ALIGNMENT
)
7093 align
= BIGGEST_ALIGNMENT
;
7097 /* Return true when TYPE should be 128bit aligned for 32bit argument
7101 ix86_contains_aligned_value_p (const_tree type
)
7103 enum machine_mode mode
= TYPE_MODE (type
);
7105 if (mode
== XFmode
|| mode
== XCmode
)
7108 if (TYPE_ALIGN (type
) < 128)
7111 if (AGGREGATE_TYPE_P (type
))
7113 /* Walk the aggregates recursively. */
7114 switch (TREE_CODE (type
))
7118 case QUAL_UNION_TYPE
:
7122 /* Walk all the structure fields. */
7123 for (field
= TYPE_FIELDS (type
);
7125 field
= DECL_CHAIN (field
))
7127 if (TREE_CODE (field
) == FIELD_DECL
7128 && ix86_contains_aligned_value_p (TREE_TYPE (field
)))
7135 /* Just for use if some languages passes arrays by value. */
7136 if (ix86_contains_aligned_value_p (TREE_TYPE (type
)))
7145 return TYPE_ALIGN (type
) >= 128;
7150 /* Gives the alignment boundary, in bits, of an argument with the
7151 specified mode and type. */
7154 ix86_function_arg_boundary (enum machine_mode mode
, const_tree type
)
7159 /* Since the main variant type is used for call, we convert it to
7160 the main variant type. */
7161 type
= TYPE_MAIN_VARIANT (type
);
7162 align
= TYPE_ALIGN (type
);
7165 align
= GET_MODE_ALIGNMENT (mode
);
7166 if (align
< PARM_BOUNDARY
)
7167 align
= PARM_BOUNDARY
;
7171 unsigned int saved_align
= align
;
7175 /* i386 ABI defines XFmode arguments to be 4 byte aligned. */
7178 if (mode
== XFmode
|| mode
== XCmode
)
7179 align
= PARM_BOUNDARY
;
7181 else if (!ix86_contains_aligned_value_p (type
))
7182 align
= PARM_BOUNDARY
;
7185 align
= PARM_BOUNDARY
;
7190 && align
!= ix86_compat_function_arg_boundary (mode
, type
,
7194 inform (input_location
,
7195 "The ABI for passing parameters with %d-byte"
7196 " alignment has changed in GCC 4.6",
7197 align
/ BITS_PER_UNIT
);
7204 /* Return true if N is a possible register number of function value. */
7207 ix86_function_value_regno_p (const unsigned int regno
)
7214 case FIRST_FLOAT_REG
:
7215 /* TODO: The function should depend on current function ABI but
7216 builtins.c would need updating then. Therefore we use the
7218 if (TARGET_64BIT
&& ix86_abi
== MS_ABI
)
7220 return TARGET_FLOAT_RETURNS_IN_80387
;
7226 if (TARGET_MACHO
|| TARGET_64BIT
)
7234 /* Define how to find the value returned by a function.
7235 VALTYPE is the data type of the value (as a tree).
7236 If the precise function being called is known, FUNC is its FUNCTION_DECL;
7237 otherwise, FUNC is 0. */
7240 function_value_32 (enum machine_mode orig_mode
, enum machine_mode mode
,
7241 const_tree fntype
, const_tree fn
)
7245 /* 8-byte vector modes in %mm0. See ix86_return_in_memory for where
7246 we normally prevent this case when mmx is not available. However
7247 some ABIs may require the result to be returned like DImode. */
7248 if (VECTOR_MODE_P (mode
) && GET_MODE_SIZE (mode
) == 8)
7249 regno
= FIRST_MMX_REG
;
7251 /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where
7252 we prevent this case when sse is not available. However some ABIs
7253 may require the result to be returned like integer TImode. */
7254 else if (mode
== TImode
7255 || (VECTOR_MODE_P (mode
) && GET_MODE_SIZE (mode
) == 16))
7256 regno
= FIRST_SSE_REG
;
7258 /* 32-byte vector modes in %ymm0. */
7259 else if (VECTOR_MODE_P (mode
) && GET_MODE_SIZE (mode
) == 32)
7260 regno
= FIRST_SSE_REG
;
7262 /* Floating point return values in %st(0) (unless -mno-fp-ret-in-387). */
7263 else if (X87_FLOAT_MODE_P (mode
) && TARGET_FLOAT_RETURNS_IN_80387
)
7264 regno
= FIRST_FLOAT_REG
;
7266 /* Most things go in %eax. */
7269 /* Override FP return register with %xmm0 for local functions when
7270 SSE math is enabled or for functions with sseregparm attribute. */
7271 if ((fn
|| fntype
) && (mode
== SFmode
|| mode
== DFmode
))
7273 int sse_level
= ix86_function_sseregparm (fntype
, fn
, false);
7274 if ((sse_level
>= 1 && mode
== SFmode
)
7275 || (sse_level
== 2 && mode
== DFmode
))
7276 regno
= FIRST_SSE_REG
;
7279 /* OImode shouldn't be used directly. */
7280 gcc_assert (mode
!= OImode
);
7282 return gen_rtx_REG (orig_mode
, regno
);
7286 function_value_64 (enum machine_mode orig_mode
, enum machine_mode mode
,
7291 /* Handle libcalls, which don't provide a type node. */
7292 if (valtype
== NULL
)
7306 regno
= FIRST_SSE_REG
;
7310 regno
= FIRST_FLOAT_REG
;
7318 return gen_rtx_REG (mode
, regno
);
7320 else if (POINTER_TYPE_P (valtype
))
7322 /* Pointers are always returned in word_mode. */
7326 ret
= construct_container (mode
, orig_mode
, valtype
, 1,
7327 X86_64_REGPARM_MAX
, X86_64_SSE_REGPARM_MAX
,
7328 x86_64_int_return_registers
, 0);
7330 /* For zero sized structures, construct_container returns NULL, but we
7331 need to keep rest of compiler happy by returning meaningful value. */
7333 ret
= gen_rtx_REG (orig_mode
, AX_REG
);
7339 function_value_ms_64 (enum machine_mode orig_mode
, enum machine_mode mode
,
7342 unsigned int regno
= AX_REG
;
7346 switch (GET_MODE_SIZE (mode
))
7349 if (valtype
!= NULL_TREE
7350 && !VECTOR_INTEGER_TYPE_P (valtype
)
7351 && !VECTOR_INTEGER_TYPE_P (valtype
)
7352 && !INTEGRAL_TYPE_P (valtype
)
7353 && !VECTOR_FLOAT_TYPE_P (valtype
))
7355 if ((SCALAR_INT_MODE_P (mode
) || VECTOR_MODE_P (mode
))
7356 && !COMPLEX_MODE_P (mode
))
7357 regno
= FIRST_SSE_REG
;
7361 if (mode
== SFmode
|| mode
== DFmode
)
7362 regno
= FIRST_SSE_REG
;
7368 return gen_rtx_REG (orig_mode
, regno
);
7372 ix86_function_value_1 (const_tree valtype
, const_tree fntype_or_decl
,
7373 enum machine_mode orig_mode
, enum machine_mode mode
)
7375 const_tree fn
, fntype
;
7378 if (fntype_or_decl
&& DECL_P (fntype_or_decl
))
7379 fn
= fntype_or_decl
;
7380 fntype
= fn
? TREE_TYPE (fn
) : fntype_or_decl
;
7382 if (TARGET_64BIT
&& ix86_function_type_abi (fntype
) == MS_ABI
)
7383 return function_value_ms_64 (orig_mode
, mode
, valtype
);
7384 else if (TARGET_64BIT
)
7385 return function_value_64 (orig_mode
, mode
, valtype
);
7387 return function_value_32 (orig_mode
, mode
, fntype
, fn
);
7391 ix86_function_value (const_tree valtype
, const_tree fntype_or_decl
,
7392 bool outgoing ATTRIBUTE_UNUSED
)
7394 enum machine_mode mode
, orig_mode
;
7396 orig_mode
= TYPE_MODE (valtype
);
7397 mode
= type_natural_mode (valtype
, NULL
);
7398 return ix86_function_value_1 (valtype
, fntype_or_decl
, orig_mode
, mode
);
7401 /* Pointer function arguments and return values are promoted to
7404 static enum machine_mode
7405 ix86_promote_function_mode (const_tree type
, enum machine_mode mode
,
7406 int *punsignedp
, const_tree fntype
,
7409 if (type
!= NULL_TREE
&& POINTER_TYPE_P (type
))
7411 *punsignedp
= POINTERS_EXTEND_UNSIGNED
;
7414 return default_promote_function_mode (type
, mode
, punsignedp
, fntype
,
7418 /* Return true if a structure, union or array with MODE containing FIELD
7419 should be accessed using BLKmode. */
7422 ix86_member_type_forces_blk (const_tree field
, enum machine_mode mode
)
7424 /* Union with XFmode must be in BLKmode. */
7425 return (mode
== XFmode
7426 && (TREE_CODE (DECL_FIELD_CONTEXT (field
)) == UNION_TYPE
7427 || TREE_CODE (DECL_FIELD_CONTEXT (field
)) == QUAL_UNION_TYPE
));
7431 ix86_libcall_value (enum machine_mode mode
)
7433 return ix86_function_value_1 (NULL
, NULL
, mode
, mode
);
7436 /* Return true iff type is returned in memory. */
7438 static bool ATTRIBUTE_UNUSED
7439 return_in_memory_32 (const_tree type
, enum machine_mode mode
)
7443 if (mode
== BLKmode
)
7446 size
= int_size_in_bytes (type
);
7448 if (MS_AGGREGATE_RETURN
&& AGGREGATE_TYPE_P (type
) && size
<= 8)
7451 if (VECTOR_MODE_P (mode
) || mode
== TImode
)
7453 /* User-created vectors small enough to fit in EAX. */
7457 /* MMX/3dNow values are returned in MM0,
7458 except when it doesn't exits or the ABI prescribes otherwise. */
7460 return !TARGET_MMX
|| TARGET_VECT8_RETURNS
;
7462 /* SSE values are returned in XMM0, except when it doesn't exist. */
7466 /* AVX values are returned in YMM0, except when it doesn't exist. */
7477 /* OImode shouldn't be used directly. */
7478 gcc_assert (mode
!= OImode
);
7483 static bool ATTRIBUTE_UNUSED
7484 return_in_memory_64 (const_tree type
, enum machine_mode mode
)
7486 int needed_intregs
, needed_sseregs
;
7487 return !examine_argument (mode
, type
, 1, &needed_intregs
, &needed_sseregs
);
7490 static bool ATTRIBUTE_UNUSED
7491 return_in_memory_ms_64 (const_tree type
, enum machine_mode mode
)
7493 HOST_WIDE_INT size
= int_size_in_bytes (type
);
7495 /* __m128 is returned in xmm0. */
7496 if ((!type
|| VECTOR_INTEGER_TYPE_P (type
) || INTEGRAL_TYPE_P (type
)
7497 || VECTOR_FLOAT_TYPE_P (type
))
7498 && (SCALAR_INT_MODE_P (mode
) || VECTOR_MODE_P (mode
))
7499 && !COMPLEX_MODE_P (mode
) && (GET_MODE_SIZE (mode
) == 16 || size
== 16))
7502 /* Otherwise, the size must be exactly in [1248]. */
7503 return size
!= 1 && size
!= 2 && size
!= 4 && size
!= 8;
7507 ix86_return_in_memory (const_tree type
, const_tree fntype ATTRIBUTE_UNUSED
)
7509 #ifdef SUBTARGET_RETURN_IN_MEMORY
7510 return SUBTARGET_RETURN_IN_MEMORY (type
, fntype
);
7512 const enum machine_mode mode
= type_natural_mode (type
, NULL
);
7516 if (ix86_function_type_abi (fntype
) == MS_ABI
)
7517 return return_in_memory_ms_64 (type
, mode
);
7519 return return_in_memory_64 (type
, mode
);
7522 return return_in_memory_32 (type
, mode
);
7526 /* When returning SSE vector types, we have a choice of either
7527 (1) being abi incompatible with a -march switch, or
7528 (2) generating an error.
7529 Given no good solution, I think the safest thing is one warning.
7530 The user won't be able to use -Werror, but....
7532 Choose the STRUCT_VALUE_RTX hook because that's (at present) only
7533 called in response to actually generating a caller or callee that
7534 uses such a type. As opposed to TARGET_RETURN_IN_MEMORY, which is called
7535 via aggregate_value_p for general type probing from tree-ssa. */
7538 ix86_struct_value_rtx (tree type
, int incoming ATTRIBUTE_UNUSED
)
7540 static bool warnedsse
, warnedmmx
;
7542 if (!TARGET_64BIT
&& type
)
7544 /* Look at the return type of the function, not the function type. */
7545 enum machine_mode mode
= TYPE_MODE (TREE_TYPE (type
));
7547 if (!TARGET_SSE
&& !warnedsse
)
7550 || (VECTOR_MODE_P (mode
) && GET_MODE_SIZE (mode
) == 16))
7553 warning (0, "SSE vector return without SSE enabled "
7558 if (!TARGET_MMX
&& !warnedmmx
)
7560 if (VECTOR_MODE_P (mode
) && GET_MODE_SIZE (mode
) == 8)
7563 warning (0, "MMX vector return without MMX enabled "
7573 /* Create the va_list data type. */
7575 /* Returns the calling convention specific va_list date type.
7576 The argument ABI can be DEFAULT_ABI, MS_ABI, or SYSV_ABI. */
7579 ix86_build_builtin_va_list_abi (enum calling_abi abi
)
7581 tree f_gpr
, f_fpr
, f_ovf
, f_sav
, record
, type_decl
;
7583 /* For i386 we use plain pointer to argument area. */
7584 if (!TARGET_64BIT
|| abi
== MS_ABI
)
7585 return build_pointer_type (char_type_node
);
7587 record
= lang_hooks
.types
.make_type (RECORD_TYPE
);
7588 type_decl
= build_decl (BUILTINS_LOCATION
,
7589 TYPE_DECL
, get_identifier ("__va_list_tag"), record
);
7591 f_gpr
= build_decl (BUILTINS_LOCATION
,
7592 FIELD_DECL
, get_identifier ("gp_offset"),
7593 unsigned_type_node
);
7594 f_fpr
= build_decl (BUILTINS_LOCATION
,
7595 FIELD_DECL
, get_identifier ("fp_offset"),
7596 unsigned_type_node
);
7597 f_ovf
= build_decl (BUILTINS_LOCATION
,
7598 FIELD_DECL
, get_identifier ("overflow_arg_area"),
7600 f_sav
= build_decl (BUILTINS_LOCATION
,
7601 FIELD_DECL
, get_identifier ("reg_save_area"),
7604 va_list_gpr_counter_field
= f_gpr
;
7605 va_list_fpr_counter_field
= f_fpr
;
7607 DECL_FIELD_CONTEXT (f_gpr
) = record
;
7608 DECL_FIELD_CONTEXT (f_fpr
) = record
;
7609 DECL_FIELD_CONTEXT (f_ovf
) = record
;
7610 DECL_FIELD_CONTEXT (f_sav
) = record
;
7612 TYPE_STUB_DECL (record
) = type_decl
;
7613 TYPE_NAME (record
) = type_decl
;
7614 TYPE_FIELDS (record
) = f_gpr
;
7615 DECL_CHAIN (f_gpr
) = f_fpr
;
7616 DECL_CHAIN (f_fpr
) = f_ovf
;
7617 DECL_CHAIN (f_ovf
) = f_sav
;
7619 layout_type (record
);
7621 /* The correct type is an array type of one element. */
7622 return build_array_type (record
, build_index_type (size_zero_node
));
7625 /* Setup the builtin va_list data type and for 64-bit the additional
7626 calling convention specific va_list data types. */
7629 ix86_build_builtin_va_list (void)
7631 tree ret
= ix86_build_builtin_va_list_abi (ix86_abi
);
7633 /* Initialize abi specific va_list builtin types. */
7637 if (ix86_abi
== MS_ABI
)
7639 t
= ix86_build_builtin_va_list_abi (SYSV_ABI
);
7640 if (TREE_CODE (t
) != RECORD_TYPE
)
7641 t
= build_variant_type_copy (t
);
7642 sysv_va_list_type_node
= t
;
7647 if (TREE_CODE (t
) != RECORD_TYPE
)
7648 t
= build_variant_type_copy (t
);
7649 sysv_va_list_type_node
= t
;
7651 if (ix86_abi
!= MS_ABI
)
7653 t
= ix86_build_builtin_va_list_abi (MS_ABI
);
7654 if (TREE_CODE (t
) != RECORD_TYPE
)
7655 t
= build_variant_type_copy (t
);
7656 ms_va_list_type_node
= t
;
7661 if (TREE_CODE (t
) != RECORD_TYPE
)
7662 t
= build_variant_type_copy (t
);
7663 ms_va_list_type_node
= t
;
7670 /* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
7673 setup_incoming_varargs_64 (CUMULATIVE_ARGS
*cum
)
7679 /* GPR size of varargs save area. */
7680 if (cfun
->va_list_gpr_size
)
7681 ix86_varargs_gpr_size
= X86_64_REGPARM_MAX
* UNITS_PER_WORD
;
7683 ix86_varargs_gpr_size
= 0;
7685 /* FPR size of varargs save area. We don't need it if we don't pass
7686 anything in SSE registers. */
7687 if (TARGET_SSE
&& cfun
->va_list_fpr_size
)
7688 ix86_varargs_fpr_size
= X86_64_SSE_REGPARM_MAX
* 16;
7690 ix86_varargs_fpr_size
= 0;
7692 if (! ix86_varargs_gpr_size
&& ! ix86_varargs_fpr_size
)
7695 save_area
= frame_pointer_rtx
;
7696 set
= get_varargs_alias_set ();
7698 max
= cum
->regno
+ cfun
->va_list_gpr_size
/ UNITS_PER_WORD
;
7699 if (max
> X86_64_REGPARM_MAX
)
7700 max
= X86_64_REGPARM_MAX
;
7702 for (i
= cum
->regno
; i
< max
; i
++)
7704 mem
= gen_rtx_MEM (word_mode
,
7705 plus_constant (Pmode
, save_area
, i
* UNITS_PER_WORD
));
7706 MEM_NOTRAP_P (mem
) = 1;
7707 set_mem_alias_set (mem
, set
);
7708 emit_move_insn (mem
,
7709 gen_rtx_REG (word_mode
,
7710 x86_64_int_parameter_registers
[i
]));
7713 if (ix86_varargs_fpr_size
)
7715 enum machine_mode smode
;
7718 /* Now emit code to save SSE registers. The AX parameter contains number
7719 of SSE parameter registers used to call this function, though all we
7720 actually check here is the zero/non-zero status. */
7722 label
= gen_label_rtx ();
7723 test
= gen_rtx_EQ (VOIDmode
, gen_rtx_REG (QImode
, AX_REG
), const0_rtx
);
7724 emit_jump_insn (gen_cbranchqi4 (test
, XEXP (test
, 0), XEXP (test
, 1),
7727 /* ??? If !TARGET_SSE_TYPELESS_STORES, would we perform better if
7728 we used movdqa (i.e. TImode) instead? Perhaps even better would
7729 be if we could determine the real mode of the data, via a hook
7730 into pass_stdarg. Ignore all that for now. */
7732 if (crtl
->stack_alignment_needed
< GET_MODE_ALIGNMENT (smode
))
7733 crtl
->stack_alignment_needed
= GET_MODE_ALIGNMENT (smode
);
7735 max
= cum
->sse_regno
+ cfun
->va_list_fpr_size
/ 16;
7736 if (max
> X86_64_SSE_REGPARM_MAX
)
7737 max
= X86_64_SSE_REGPARM_MAX
;
7739 for (i
= cum
->sse_regno
; i
< max
; ++i
)
7741 mem
= plus_constant (Pmode
, save_area
,
7742 i
* 16 + ix86_varargs_gpr_size
);
7743 mem
= gen_rtx_MEM (smode
, mem
);
7744 MEM_NOTRAP_P (mem
) = 1;
7745 set_mem_alias_set (mem
, set
);
7746 set_mem_align (mem
, GET_MODE_ALIGNMENT (smode
));
7748 emit_move_insn (mem
, gen_rtx_REG (smode
, SSE_REGNO (i
)));
7756 setup_incoming_varargs_ms_64 (CUMULATIVE_ARGS
*cum
)
7758 alias_set_type set
= get_varargs_alias_set ();
7761 /* Reset to zero, as there might be a sysv vaarg used
7763 ix86_varargs_gpr_size
= 0;
7764 ix86_varargs_fpr_size
= 0;
7766 for (i
= cum
->regno
; i
< X86_64_MS_REGPARM_MAX
; i
++)
7770 mem
= gen_rtx_MEM (Pmode
,
7771 plus_constant (Pmode
, virtual_incoming_args_rtx
,
7772 i
* UNITS_PER_WORD
));
7773 MEM_NOTRAP_P (mem
) = 1;
7774 set_mem_alias_set (mem
, set
);
7776 reg
= gen_rtx_REG (Pmode
, x86_64_ms_abi_int_parameter_registers
[i
]);
7777 emit_move_insn (mem
, reg
);
7782 ix86_setup_incoming_varargs (cumulative_args_t cum_v
, enum machine_mode mode
,
7783 tree type
, int *pretend_size ATTRIBUTE_UNUSED
,
7786 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
7787 CUMULATIVE_ARGS next_cum
;
7790 /* This argument doesn't appear to be used anymore. Which is good,
7791 because the old code here didn't suppress rtl generation. */
7792 gcc_assert (!no_rtl
);
7797 fntype
= TREE_TYPE (current_function_decl
);
7799 /* For varargs, we do not want to skip the dummy va_dcl argument.
7800 For stdargs, we do want to skip the last named argument. */
7802 if (stdarg_p (fntype
))
7803 ix86_function_arg_advance (pack_cumulative_args (&next_cum
), mode
, type
,
7806 if (cum
->call_abi
== MS_ABI
)
7807 setup_incoming_varargs_ms_64 (&next_cum
);
7809 setup_incoming_varargs_64 (&next_cum
);
7812 /* Checks if TYPE is of kind va_list char *. */
7815 is_va_list_char_pointer (tree type
)
7819 /* For 32-bit it is always true. */
7822 canonic
= ix86_canonical_va_list_type (type
);
7823 return (canonic
== ms_va_list_type_node
7824 || (ix86_abi
== MS_ABI
&& canonic
== va_list_type_node
));
7827 /* Implement va_start. */
7830 ix86_va_start (tree valist
, rtx nextarg
)
7832 HOST_WIDE_INT words
, n_gpr
, n_fpr
;
7833 tree f_gpr
, f_fpr
, f_ovf
, f_sav
;
7834 tree gpr
, fpr
, ovf
, sav
, t
;
7838 if (flag_split_stack
7839 && cfun
->machine
->split_stack_varargs_pointer
== NULL_RTX
)
7841 unsigned int scratch_regno
;
7843 /* When we are splitting the stack, we can't refer to the stack
7844 arguments using internal_arg_pointer, because they may be on
7845 the old stack. The split stack prologue will arrange to
7846 leave a pointer to the old stack arguments in a scratch
7847 register, which we here copy to a pseudo-register. The split
7848 stack prologue can't set the pseudo-register directly because
7849 it (the prologue) runs before any registers have been saved. */
7851 scratch_regno
= split_stack_prologue_scratch_regno ();
7852 if (scratch_regno
!= INVALID_REGNUM
)
7856 reg
= gen_reg_rtx (Pmode
);
7857 cfun
->machine
->split_stack_varargs_pointer
= reg
;
7860 emit_move_insn (reg
, gen_rtx_REG (Pmode
, scratch_regno
));
7864 push_topmost_sequence ();
7865 emit_insn_after (seq
, entry_of_function ());
7866 pop_topmost_sequence ();
7870 /* Only 64bit target needs something special. */
7871 if (!TARGET_64BIT
|| is_va_list_char_pointer (TREE_TYPE (valist
)))
7873 if (cfun
->machine
->split_stack_varargs_pointer
== NULL_RTX
)
7874 std_expand_builtin_va_start (valist
, nextarg
);
7879 va_r
= expand_expr (valist
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
7880 next
= expand_binop (ptr_mode
, add_optab
,
7881 cfun
->machine
->split_stack_varargs_pointer
,
7882 crtl
->args
.arg_offset_rtx
,
7883 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
7884 convert_move (va_r
, next
, 0);
7889 f_gpr
= TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node
));
7890 f_fpr
= DECL_CHAIN (f_gpr
);
7891 f_ovf
= DECL_CHAIN (f_fpr
);
7892 f_sav
= DECL_CHAIN (f_ovf
);
7894 valist
= build_simple_mem_ref (valist
);
7895 TREE_TYPE (valist
) = TREE_TYPE (sysv_va_list_type_node
);
7896 /* The following should be folded into the MEM_REF offset. */
7897 gpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_gpr
), unshare_expr (valist
),
7899 fpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_fpr
), unshare_expr (valist
),
7901 ovf
= build3 (COMPONENT_REF
, TREE_TYPE (f_ovf
), unshare_expr (valist
),
7903 sav
= build3 (COMPONENT_REF
, TREE_TYPE (f_sav
), unshare_expr (valist
),
7906 /* Count number of gp and fp argument registers used. */
7907 words
= crtl
->args
.info
.words
;
7908 n_gpr
= crtl
->args
.info
.regno
;
7909 n_fpr
= crtl
->args
.info
.sse_regno
;
7911 if (cfun
->va_list_gpr_size
)
7913 type
= TREE_TYPE (gpr
);
7914 t
= build2 (MODIFY_EXPR
, type
,
7915 gpr
, build_int_cst (type
, n_gpr
* 8));
7916 TREE_SIDE_EFFECTS (t
) = 1;
7917 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
7920 if (TARGET_SSE
&& cfun
->va_list_fpr_size
)
7922 type
= TREE_TYPE (fpr
);
7923 t
= build2 (MODIFY_EXPR
, type
, fpr
,
7924 build_int_cst (type
, n_fpr
* 16 + 8*X86_64_REGPARM_MAX
));
7925 TREE_SIDE_EFFECTS (t
) = 1;
7926 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
7929 /* Find the overflow area. */
7930 type
= TREE_TYPE (ovf
);
7931 if (cfun
->machine
->split_stack_varargs_pointer
== NULL_RTX
)
7932 ovf_rtx
= crtl
->args
.internal_arg_pointer
;
7934 ovf_rtx
= cfun
->machine
->split_stack_varargs_pointer
;
7935 t
= make_tree (type
, ovf_rtx
);
7937 t
= fold_build_pointer_plus_hwi (t
, words
* UNITS_PER_WORD
);
7938 t
= build2 (MODIFY_EXPR
, type
, ovf
, t
);
7939 TREE_SIDE_EFFECTS (t
) = 1;
7940 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
7942 if (ix86_varargs_gpr_size
|| ix86_varargs_fpr_size
)
7944 /* Find the register save area.
7945 Prologue of the function save it right above stack frame. */
7946 type
= TREE_TYPE (sav
);
7947 t
= make_tree (type
, frame_pointer_rtx
);
7948 if (!ix86_varargs_gpr_size
)
7949 t
= fold_build_pointer_plus_hwi (t
, -8 * X86_64_REGPARM_MAX
);
7950 t
= build2 (MODIFY_EXPR
, type
, sav
, t
);
7951 TREE_SIDE_EFFECTS (t
) = 1;
7952 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
7956 /* Implement va_arg. */
7959 ix86_gimplify_va_arg (tree valist
, tree type
, gimple_seq
*pre_p
,
7962 static const int intreg
[6] = { 0, 1, 2, 3, 4, 5 };
7963 tree f_gpr
, f_fpr
, f_ovf
, f_sav
;
7964 tree gpr
, fpr
, ovf
, sav
, t
;
7966 tree lab_false
, lab_over
= NULL_TREE
;
7971 enum machine_mode nat_mode
;
7972 unsigned int arg_boundary
;
7974 /* Only 64bit target needs something special. */
7975 if (!TARGET_64BIT
|| is_va_list_char_pointer (TREE_TYPE (valist
)))
7976 return std_gimplify_va_arg_expr (valist
, type
, pre_p
, post_p
);
7978 f_gpr
= TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node
));
7979 f_fpr
= DECL_CHAIN (f_gpr
);
7980 f_ovf
= DECL_CHAIN (f_fpr
);
7981 f_sav
= DECL_CHAIN (f_ovf
);
7983 gpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_gpr
),
7984 build_va_arg_indirect_ref (valist
), f_gpr
, NULL_TREE
);
7985 valist
= build_va_arg_indirect_ref (valist
);
7986 fpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_fpr
), valist
, f_fpr
, NULL_TREE
);
7987 ovf
= build3 (COMPONENT_REF
, TREE_TYPE (f_ovf
), valist
, f_ovf
, NULL_TREE
);
7988 sav
= build3 (COMPONENT_REF
, TREE_TYPE (f_sav
), valist
, f_sav
, NULL_TREE
);
7990 indirect_p
= pass_by_reference (NULL
, TYPE_MODE (type
), type
, false);
7992 type
= build_pointer_type (type
);
7993 size
= int_size_in_bytes (type
);
7994 rsize
= (size
+ UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
7996 nat_mode
= type_natural_mode (type
, NULL
);
8005 /* Unnamed 256bit vector mode parameters are passed on stack. */
8006 if (!TARGET_64BIT_MS_ABI
)
8013 container
= construct_container (nat_mode
, TYPE_MODE (type
),
8014 type
, 0, X86_64_REGPARM_MAX
,
8015 X86_64_SSE_REGPARM_MAX
, intreg
,
8020 /* Pull the value out of the saved registers. */
8022 addr
= create_tmp_var (ptr_type_node
, "addr");
8026 int needed_intregs
, needed_sseregs
;
8028 tree int_addr
, sse_addr
;
8030 lab_false
= create_artificial_label (UNKNOWN_LOCATION
);
8031 lab_over
= create_artificial_label (UNKNOWN_LOCATION
);
8033 examine_argument (nat_mode
, type
, 0, &needed_intregs
, &needed_sseregs
);
8035 need_temp
= (!REG_P (container
)
8036 && ((needed_intregs
&& TYPE_ALIGN (type
) > 64)
8037 || TYPE_ALIGN (type
) > 128));
8039 /* In case we are passing structure, verify that it is consecutive block
8040 on the register save area. If not we need to do moves. */
8041 if (!need_temp
&& !REG_P (container
))
8043 /* Verify that all registers are strictly consecutive */
8044 if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container
, 0, 0), 0))))
8048 for (i
= 0; i
< XVECLEN (container
, 0) && !need_temp
; i
++)
8050 rtx slot
= XVECEXP (container
, 0, i
);
8051 if (REGNO (XEXP (slot
, 0)) != FIRST_SSE_REG
+ (unsigned int) i
8052 || INTVAL (XEXP (slot
, 1)) != i
* 16)
8060 for (i
= 0; i
< XVECLEN (container
, 0) && !need_temp
; i
++)
8062 rtx slot
= XVECEXP (container
, 0, i
);
8063 if (REGNO (XEXP (slot
, 0)) != (unsigned int) i
8064 || INTVAL (XEXP (slot
, 1)) != i
* 8)
8076 int_addr
= create_tmp_var (ptr_type_node
, "int_addr");
8077 sse_addr
= create_tmp_var (ptr_type_node
, "sse_addr");
8080 /* First ensure that we fit completely in registers. */
8083 t
= build_int_cst (TREE_TYPE (gpr
),
8084 (X86_64_REGPARM_MAX
- needed_intregs
+ 1) * 8);
8085 t
= build2 (GE_EXPR
, boolean_type_node
, gpr
, t
);
8086 t2
= build1 (GOTO_EXPR
, void_type_node
, lab_false
);
8087 t
= build3 (COND_EXPR
, void_type_node
, t
, t2
, NULL_TREE
);
8088 gimplify_and_add (t
, pre_p
);
8092 t
= build_int_cst (TREE_TYPE (fpr
),
8093 (X86_64_SSE_REGPARM_MAX
- needed_sseregs
+ 1) * 16
8094 + X86_64_REGPARM_MAX
* 8);
8095 t
= build2 (GE_EXPR
, boolean_type_node
, fpr
, t
);
8096 t2
= build1 (GOTO_EXPR
, void_type_node
, lab_false
);
8097 t
= build3 (COND_EXPR
, void_type_node
, t
, t2
, NULL_TREE
);
8098 gimplify_and_add (t
, pre_p
);
8101 /* Compute index to start of area used for integer regs. */
8104 /* int_addr = gpr + sav; */
8105 t
= fold_build_pointer_plus (sav
, gpr
);
8106 gimplify_assign (int_addr
, t
, pre_p
);
8110 /* sse_addr = fpr + sav; */
8111 t
= fold_build_pointer_plus (sav
, fpr
);
8112 gimplify_assign (sse_addr
, t
, pre_p
);
8116 int i
, prev_size
= 0;
8117 tree temp
= create_tmp_var (type
, "va_arg_tmp");
8120 t
= build1 (ADDR_EXPR
, build_pointer_type (type
), temp
);
8121 gimplify_assign (addr
, t
, pre_p
);
8123 for (i
= 0; i
< XVECLEN (container
, 0); i
++)
8125 rtx slot
= XVECEXP (container
, 0, i
);
8126 rtx reg
= XEXP (slot
, 0);
8127 enum machine_mode mode
= GET_MODE (reg
);
8133 tree dest_addr
, dest
;
8134 int cur_size
= GET_MODE_SIZE (mode
);
8136 gcc_assert (prev_size
<= INTVAL (XEXP (slot
, 1)));
8137 prev_size
= INTVAL (XEXP (slot
, 1));
8138 if (prev_size
+ cur_size
> size
)
8140 cur_size
= size
- prev_size
;
8141 mode
= mode_for_size (cur_size
* BITS_PER_UNIT
, MODE_INT
, 1);
8142 if (mode
== BLKmode
)
8145 piece_type
= lang_hooks
.types
.type_for_mode (mode
, 1);
8146 if (mode
== GET_MODE (reg
))
8147 addr_type
= build_pointer_type (piece_type
);
8149 addr_type
= build_pointer_type_for_mode (piece_type
, ptr_mode
,
8151 daddr_type
= build_pointer_type_for_mode (piece_type
, ptr_mode
,
8154 if (SSE_REGNO_P (REGNO (reg
)))
8156 src_addr
= sse_addr
;
8157 src_offset
= (REGNO (reg
) - FIRST_SSE_REG
) * 16;
8161 src_addr
= int_addr
;
8162 src_offset
= REGNO (reg
) * 8;
8164 src_addr
= fold_convert (addr_type
, src_addr
);
8165 src_addr
= fold_build_pointer_plus_hwi (src_addr
, src_offset
);
8167 dest_addr
= fold_convert (daddr_type
, addr
);
8168 dest_addr
= fold_build_pointer_plus_hwi (dest_addr
, prev_size
);
8169 if (cur_size
== GET_MODE_SIZE (mode
))
8171 src
= build_va_arg_indirect_ref (src_addr
);
8172 dest
= build_va_arg_indirect_ref (dest_addr
);
8174 gimplify_assign (dest
, src
, pre_p
);
8179 = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY
),
8180 3, dest_addr
, src_addr
,
8181 size_int (cur_size
));
8182 gimplify_and_add (copy
, pre_p
);
8184 prev_size
+= cur_size
;
8190 t
= build2 (PLUS_EXPR
, TREE_TYPE (gpr
), gpr
,
8191 build_int_cst (TREE_TYPE (gpr
), needed_intregs
* 8));
8192 gimplify_assign (gpr
, t
, pre_p
);
8197 t
= build2 (PLUS_EXPR
, TREE_TYPE (fpr
), fpr
,
8198 build_int_cst (TREE_TYPE (fpr
), needed_sseregs
* 16));
8199 gimplify_assign (fpr
, t
, pre_p
);
8202 gimple_seq_add_stmt (pre_p
, gimple_build_goto (lab_over
));
8204 gimple_seq_add_stmt (pre_p
, gimple_build_label (lab_false
));
8207 /* ... otherwise out of the overflow area. */
8209 /* When we align parameter on stack for caller, if the parameter
8210 alignment is beyond MAX_SUPPORTED_STACK_ALIGNMENT, it will be
8211 aligned at MAX_SUPPORTED_STACK_ALIGNMENT. We will match callee
8212 here with caller. */
8213 arg_boundary
= ix86_function_arg_boundary (VOIDmode
, type
);
8214 if ((unsigned int) arg_boundary
> MAX_SUPPORTED_STACK_ALIGNMENT
)
8215 arg_boundary
= MAX_SUPPORTED_STACK_ALIGNMENT
;
8217 /* Care for on-stack alignment if needed. */
8218 if (arg_boundary
<= 64 || size
== 0)
8222 HOST_WIDE_INT align
= arg_boundary
/ 8;
8223 t
= fold_build_pointer_plus_hwi (ovf
, align
- 1);
8224 t
= build2 (BIT_AND_EXPR
, TREE_TYPE (t
), t
,
8225 build_int_cst (TREE_TYPE (t
), -align
));
8228 gimplify_expr (&t
, pre_p
, NULL
, is_gimple_val
, fb_rvalue
);
8229 gimplify_assign (addr
, t
, pre_p
);
8231 t
= fold_build_pointer_plus_hwi (t
, rsize
* UNITS_PER_WORD
);
8232 gimplify_assign (unshare_expr (ovf
), t
, pre_p
);
8235 gimple_seq_add_stmt (pre_p
, gimple_build_label (lab_over
));
8237 ptrtype
= build_pointer_type_for_mode (type
, ptr_mode
, true);
8238 addr
= fold_convert (ptrtype
, addr
);
8241 addr
= build_va_arg_indirect_ref (addr
);
8242 return build_va_arg_indirect_ref (addr
);
8245 /* Return true if OPNUM's MEM should be matched
8246 in movabs* patterns. */
8249 ix86_check_movabs (rtx insn
, int opnum
)
8253 set
= PATTERN (insn
);
8254 if (GET_CODE (set
) == PARALLEL
)
8255 set
= XVECEXP (set
, 0, 0);
8256 gcc_assert (GET_CODE (set
) == SET
);
8257 mem
= XEXP (set
, opnum
);
8258 while (GET_CODE (mem
) == SUBREG
)
8259 mem
= SUBREG_REG (mem
);
8260 gcc_assert (MEM_P (mem
));
8261 return volatile_ok
|| !MEM_VOLATILE_P (mem
);
8264 /* Initialize the table of extra 80387 mathematical constants. */
8267 init_ext_80387_constants (void)
8269 static const char * cst
[5] =
8271 "0.3010299956639811952256464283594894482", /* 0: fldlg2 */
8272 "0.6931471805599453094286904741849753009", /* 1: fldln2 */
8273 "1.4426950408889634073876517827983434472", /* 2: fldl2e */
8274 "3.3219280948873623478083405569094566090", /* 3: fldl2t */
8275 "3.1415926535897932385128089594061862044", /* 4: fldpi */
8279 for (i
= 0; i
< 5; i
++)
8281 real_from_string (&ext_80387_constants_table
[i
], cst
[i
]);
8282 /* Ensure each constant is rounded to XFmode precision. */
8283 real_convert (&ext_80387_constants_table
[i
],
8284 XFmode
, &ext_80387_constants_table
[i
]);
8287 ext_80387_constants_init
= 1;
8290 /* Return non-zero if the constant is something that
8291 can be loaded with a special instruction. */
8294 standard_80387_constant_p (rtx x
)
8296 enum machine_mode mode
= GET_MODE (x
);
8300 if (!(X87_FLOAT_MODE_P (mode
) && (GET_CODE (x
) == CONST_DOUBLE
)))
8303 if (x
== CONST0_RTX (mode
))
8305 if (x
== CONST1_RTX (mode
))
8308 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
8310 /* For XFmode constants, try to find a special 80387 instruction when
8311 optimizing for size or on those CPUs that benefit from them. */
8313 && (optimize_function_for_size_p (cfun
) || TARGET_EXT_80387_CONSTANTS
))
8317 if (! ext_80387_constants_init
)
8318 init_ext_80387_constants ();
8320 for (i
= 0; i
< 5; i
++)
8321 if (real_identical (&r
, &ext_80387_constants_table
[i
]))
8325 /* Load of the constant -0.0 or -1.0 will be split as
8326 fldz;fchs or fld1;fchs sequence. */
8327 if (real_isnegzero (&r
))
8329 if (real_identical (&r
, &dconstm1
))
8335 /* Return the opcode of the special instruction to be used to load
8339 standard_80387_constant_opcode (rtx x
)
8341 switch (standard_80387_constant_p (x
))
8365 /* Return the CONST_DOUBLE representing the 80387 constant that is
8366 loaded by the specified special instruction. The argument IDX
8367 matches the return value from standard_80387_constant_p. */
8370 standard_80387_constant_rtx (int idx
)
8374 if (! ext_80387_constants_init
)
8375 init_ext_80387_constants ();
8391 return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table
[i
],
8395 /* Return 1 if X is all 0s and 2 if x is all 1s
8396 in supported SSE/AVX vector mode. */
8399 standard_sse_constant_p (rtx x
)
8401 enum machine_mode mode
= GET_MODE (x
);
8403 if (x
== const0_rtx
|| x
== CONST0_RTX (GET_MODE (x
)))
8405 if (vector_all_ones_operand (x
, mode
))
8427 /* Return the opcode of the special instruction to be used to load
8431 standard_sse_constant_opcode (rtx insn
, rtx x
)
8433 switch (standard_sse_constant_p (x
))
8436 switch (get_attr_mode (insn
))
8439 return "%vpxor\t%0, %d0";
8441 return "%vxorpd\t%0, %d0";
8443 return "%vxorps\t%0, %d0";
8446 return "vpxor\t%x0, %x0, %x0";
8448 return "vxorpd\t%x0, %x0, %x0";
8450 return "vxorps\t%x0, %x0, %x0";
8458 return "vpcmpeqd\t%0, %0, %0";
8460 return "pcmpeqd\t%0, %0";
8468 /* Returns true if OP contains a symbol reference */
8471 symbolic_reference_mentioned_p (rtx op
)
8476 if (GET_CODE (op
) == SYMBOL_REF
|| GET_CODE (op
) == LABEL_REF
)
8479 fmt
= GET_RTX_FORMAT (GET_CODE (op
));
8480 for (i
= GET_RTX_LENGTH (GET_CODE (op
)) - 1; i
>= 0; i
--)
8486 for (j
= XVECLEN (op
, i
) - 1; j
>= 0; j
--)
8487 if (symbolic_reference_mentioned_p (XVECEXP (op
, i
, j
)))
8491 else if (fmt
[i
] == 'e' && symbolic_reference_mentioned_p (XEXP (op
, i
)))
8498 /* Return true if it is appropriate to emit `ret' instructions in the
8499 body of a function. Do this only if the epilogue is simple, needing a
8500 couple of insns. Prior to reloading, we can't tell how many registers
8501 must be saved, so return false then. Return false if there is no frame
8502 marker to de-allocate. */
8505 ix86_can_use_return_insn_p (void)
8507 struct ix86_frame frame
;
8509 if (! reload_completed
|| frame_pointer_needed
)
8512 /* Don't allow more than 32k pop, since that's all we can do
8513 with one instruction. */
8514 if (crtl
->args
.pops_args
&& crtl
->args
.size
>= 32768)
8517 ix86_compute_frame_layout (&frame
);
8518 return (frame
.stack_pointer_offset
== UNITS_PER_WORD
8519 && (frame
.nregs
+ frame
.nsseregs
) == 0);
8522 /* Value should be nonzero if functions must have frame pointers.
8523 Zero means the frame pointer need not be set up (and parms may
8524 be accessed via the stack pointer) in functions that seem suitable. */
8527 ix86_frame_pointer_required (void)
8529 /* If we accessed previous frames, then the generated code expects
8530 to be able to access the saved ebp value in our frame. */
8531 if (cfun
->machine
->accesses_prev_frame
)
8534 /* Several x86 os'es need a frame pointer for other reasons,
8535 usually pertaining to setjmp. */
8536 if (SUBTARGET_FRAME_POINTER_REQUIRED
)
8539 /* For older 32-bit runtimes setjmp requires valid frame-pointer. */
8540 if (TARGET_32BIT_MS_ABI
&& cfun
->calls_setjmp
)
8543 /* Win64 SEH, very large frames need a frame-pointer as maximum stack
8544 allocation is 4GB. */
8545 if (TARGET_64BIT_MS_ABI
&& get_frame_size () > SEH_MAX_FRAME_SIZE
)
8548 /* In ix86_option_override_internal, TARGET_OMIT_LEAF_FRAME_POINTER
8549 turns off the frame pointer by default. Turn it back on now if
8550 we've not got a leaf function. */
8551 if (TARGET_OMIT_LEAF_FRAME_POINTER
8553 || ix86_current_function_calls_tls_descriptor
))
8556 if (crtl
->profile
&& !flag_fentry
)
8562 /* Record that the current function accesses previous call frames. */
8565 ix86_setup_frame_addresses (void)
8567 cfun
->machine
->accesses_prev_frame
= 1;
8570 #ifndef USE_HIDDEN_LINKONCE
8571 # if defined(HAVE_GAS_HIDDEN) && (SUPPORTS_ONE_ONLY - 0)
8572 # define USE_HIDDEN_LINKONCE 1
8574 # define USE_HIDDEN_LINKONCE 0
8578 static int pic_labels_used
;
8580 /* Fills in the label name that should be used for a pc thunk for
8581 the given register. */
8584 get_pc_thunk_name (char name
[32], unsigned int regno
)
8586 gcc_assert (!TARGET_64BIT
);
8588 if (USE_HIDDEN_LINKONCE
)
8589 sprintf (name
, "__x86.get_pc_thunk.%s", reg_names
[regno
]);
8591 ASM_GENERATE_INTERNAL_LABEL (name
, "LPR", regno
);
8595 /* This function generates code for -fpic that loads %ebx with
8596 the return address of the caller and then returns. */
8599 ix86_code_end (void)
8604 for (regno
= AX_REG
; regno
<= SP_REG
; regno
++)
8609 if (!(pic_labels_used
& (1 << regno
)))
8612 get_pc_thunk_name (name
, regno
);
8614 decl
= build_decl (BUILTINS_LOCATION
, FUNCTION_DECL
,
8615 get_identifier (name
),
8616 build_function_type_list (void_type_node
, NULL_TREE
));
8617 DECL_RESULT (decl
) = build_decl (BUILTINS_LOCATION
, RESULT_DECL
,
8618 NULL_TREE
, void_type_node
);
8619 TREE_PUBLIC (decl
) = 1;
8620 TREE_STATIC (decl
) = 1;
8621 DECL_IGNORED_P (decl
) = 1;
8626 switch_to_section (darwin_sections
[text_coal_section
]);
8627 fputs ("\t.weak_definition\t", asm_out_file
);
8628 assemble_name (asm_out_file
, name
);
8629 fputs ("\n\t.private_extern\t", asm_out_file
);
8630 assemble_name (asm_out_file
, name
);
8631 putc ('\n', asm_out_file
);
8632 ASM_OUTPUT_LABEL (asm_out_file
, name
);
8633 DECL_WEAK (decl
) = 1;
8637 if (USE_HIDDEN_LINKONCE
)
8639 DECL_COMDAT_GROUP (decl
) = DECL_ASSEMBLER_NAME (decl
);
8641 targetm
.asm_out
.unique_section (decl
, 0);
8642 switch_to_section (get_named_section (decl
, NULL
, 0));
8644 targetm
.asm_out
.globalize_label (asm_out_file
, name
);
8645 fputs ("\t.hidden\t", asm_out_file
);
8646 assemble_name (asm_out_file
, name
);
8647 putc ('\n', asm_out_file
);
8648 ASM_DECLARE_FUNCTION_NAME (asm_out_file
, name
, decl
);
8652 switch_to_section (text_section
);
8653 ASM_OUTPUT_LABEL (asm_out_file
, name
);
8656 DECL_INITIAL (decl
) = make_node (BLOCK
);
8657 current_function_decl
= decl
;
8658 init_function_start (decl
);
8659 first_function_block_is_cold
= false;
8660 /* Make sure unwind info is emitted for the thunk if needed. */
8661 final_start_function (emit_barrier (), asm_out_file
, 1);
8663 /* Pad stack IP move with 4 instructions (two NOPs count
8664 as one instruction). */
8665 if (TARGET_PAD_SHORT_FUNCTION
)
8670 fputs ("\tnop\n", asm_out_file
);
8673 xops
[0] = gen_rtx_REG (Pmode
, regno
);
8674 xops
[1] = gen_rtx_MEM (Pmode
, stack_pointer_rtx
);
8675 output_asm_insn ("mov%z0\t{%1, %0|%0, %1}", xops
);
8676 fputs ("\tret\n", asm_out_file
);
8677 final_end_function ();
8678 init_insn_lengths ();
8679 free_after_compilation (cfun
);
8681 current_function_decl
= NULL
;
8684 if (flag_split_stack
)
8685 file_end_indicate_split_stack ();
8688 /* Emit code for the SET_GOT patterns. */
8691 output_set_got (rtx dest
, rtx label ATTRIBUTE_UNUSED
)
8697 if (TARGET_VXWORKS_RTP
&& flag_pic
)
8699 /* Load (*VXWORKS_GOTT_BASE) into the PIC register. */
8700 xops
[2] = gen_rtx_MEM (Pmode
,
8701 gen_rtx_SYMBOL_REF (Pmode
, VXWORKS_GOTT_BASE
));
8702 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops
);
8704 /* Load (*VXWORKS_GOTT_BASE)[VXWORKS_GOTT_INDEX] into the PIC register.
8705 Use %P and a local symbol in order to print VXWORKS_GOTT_INDEX as
8706 an unadorned address. */
8707 xops
[2] = gen_rtx_SYMBOL_REF (Pmode
, VXWORKS_GOTT_INDEX
);
8708 SYMBOL_REF_FLAGS (xops
[2]) |= SYMBOL_FLAG_LOCAL
;
8709 output_asm_insn ("mov{l}\t{%P2(%0), %0|%0, DWORD PTR %P2[%0]}", xops
);
8713 xops
[1] = gen_rtx_SYMBOL_REF (Pmode
, GOT_SYMBOL_NAME
);
8717 xops
[2] = gen_rtx_LABEL_REF (Pmode
, label
? label
: gen_label_rtx ());
8719 output_asm_insn ("mov%z0\t{%2, %0|%0, %2}", xops
);
8722 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
8723 is what will be referenced by the Mach-O PIC subsystem. */
8725 ASM_OUTPUT_LABEL (asm_out_file
, MACHOPIC_FUNCTION_BASE_NAME
);
8728 targetm
.asm_out
.internal_label (asm_out_file
, "L",
8729 CODE_LABEL_NUMBER (XEXP (xops
[2], 0)));
8734 get_pc_thunk_name (name
, REGNO (dest
));
8735 pic_labels_used
|= 1 << REGNO (dest
);
8737 xops
[2] = gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (name
));
8738 xops
[2] = gen_rtx_MEM (QImode
, xops
[2]);
8739 output_asm_insn ("call\t%X2", xops
);
8740 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
8741 is what will be referenced by the Mach-O PIC subsystem. */
8744 ASM_OUTPUT_LABEL (asm_out_file
, MACHOPIC_FUNCTION_BASE_NAME
);
8746 targetm
.asm_out
.internal_label (asm_out_file
, "L",
8747 CODE_LABEL_NUMBER (label
));
8752 output_asm_insn ("add%z0\t{%1, %0|%0, %1}", xops
);
8757 /* Generate an "push" pattern for input ARG. */
8762 struct machine_function
*m
= cfun
->machine
;
8764 if (m
->fs
.cfa_reg
== stack_pointer_rtx
)
8765 m
->fs
.cfa_offset
+= UNITS_PER_WORD
;
8766 m
->fs
.sp_offset
+= UNITS_PER_WORD
;
8768 if (REG_P (arg
) && GET_MODE (arg
) != word_mode
)
8769 arg
= gen_rtx_REG (word_mode
, REGNO (arg
));
8771 return gen_rtx_SET (VOIDmode
,
8772 gen_rtx_MEM (word_mode
,
8773 gen_rtx_PRE_DEC (Pmode
,
8774 stack_pointer_rtx
)),
8778 /* Generate an "pop" pattern for input ARG. */
8783 if (REG_P (arg
) && GET_MODE (arg
) != word_mode
)
8784 arg
= gen_rtx_REG (word_mode
, REGNO (arg
));
8786 return gen_rtx_SET (VOIDmode
,
8788 gen_rtx_MEM (word_mode
,
8789 gen_rtx_POST_INC (Pmode
,
8790 stack_pointer_rtx
)));
8793 /* Return >= 0 if there is an unused call-clobbered register available
8794 for the entire function. */
8797 ix86_select_alt_pic_regnum (void)
8801 && !ix86_current_function_calls_tls_descriptor
)
8804 /* Can't use the same register for both PIC and DRAP. */
8806 drap
= REGNO (crtl
->drap_reg
);
8809 for (i
= 2; i
>= 0; --i
)
8810 if (i
!= drap
&& !df_regs_ever_live_p (i
))
8814 return INVALID_REGNUM
;
8817 /* Return TRUE if we need to save REGNO. */
8820 ix86_save_reg (unsigned int regno
, bool maybe_eh_return
)
8822 if (pic_offset_table_rtx
8823 && regno
== REAL_PIC_OFFSET_TABLE_REGNUM
8824 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM
)
8826 || crtl
->calls_eh_return
8827 || crtl
->uses_const_pool
))
8828 return ix86_select_alt_pic_regnum () == INVALID_REGNUM
;
8830 if (crtl
->calls_eh_return
&& maybe_eh_return
)
8835 unsigned test
= EH_RETURN_DATA_REGNO (i
);
8836 if (test
== INVALID_REGNUM
)
8843 if (crtl
->drap_reg
&& regno
== REGNO (crtl
->drap_reg
))
8846 return (df_regs_ever_live_p (regno
)
8847 && !call_used_regs
[regno
]
8848 && !fixed_regs
[regno
]
8849 && (regno
!= HARD_FRAME_POINTER_REGNUM
|| !frame_pointer_needed
));
8852 /* Return number of saved general prupose registers. */
8855 ix86_nsaved_regs (void)
8860 for (regno
= 0; regno
< FIRST_PSEUDO_REGISTER
; regno
++)
8861 if (!SSE_REGNO_P (regno
) && ix86_save_reg (regno
, true))
8866 /* Return number of saved SSE registrers. */
8869 ix86_nsaved_sseregs (void)
8874 if (!TARGET_64BIT_MS_ABI
)
8876 for (regno
= 0; regno
< FIRST_PSEUDO_REGISTER
; regno
++)
8877 if (SSE_REGNO_P (regno
) && ix86_save_reg (regno
, true))
8882 /* Given FROM and TO register numbers, say whether this elimination is
8883 allowed. If stack alignment is needed, we can only replace argument
8884 pointer with hard frame pointer, or replace frame pointer with stack
8885 pointer. Otherwise, frame pointer elimination is automatically
8886 handled and all other eliminations are valid. */
8889 ix86_can_eliminate (const int from
, const int to
)
8891 if (stack_realign_fp
)
8892 return ((from
== ARG_POINTER_REGNUM
8893 && to
== HARD_FRAME_POINTER_REGNUM
)
8894 || (from
== FRAME_POINTER_REGNUM
8895 && to
== STACK_POINTER_REGNUM
));
8897 return to
== STACK_POINTER_REGNUM
? !frame_pointer_needed
: true;
8900 /* Return the offset between two registers, one to be eliminated, and the other
8901 its replacement, at the start of a routine. */
8904 ix86_initial_elimination_offset (int from
, int to
)
8906 struct ix86_frame frame
;
8907 ix86_compute_frame_layout (&frame
);
8909 if (from
== ARG_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
8910 return frame
.hard_frame_pointer_offset
;
8911 else if (from
== FRAME_POINTER_REGNUM
8912 && to
== HARD_FRAME_POINTER_REGNUM
)
8913 return frame
.hard_frame_pointer_offset
- frame
.frame_pointer_offset
;
8916 gcc_assert (to
== STACK_POINTER_REGNUM
);
8918 if (from
== ARG_POINTER_REGNUM
)
8919 return frame
.stack_pointer_offset
;
8921 gcc_assert (from
== FRAME_POINTER_REGNUM
);
8922 return frame
.stack_pointer_offset
- frame
.frame_pointer_offset
;
8926 /* In a dynamically-aligned function, we can't know the offset from
8927 stack pointer to frame pointer, so we must ensure that setjmp
8928 eliminates fp against the hard fp (%ebp) rather than trying to
8929 index from %esp up to the top of the frame across a gap that is
8930 of unknown (at compile-time) size. */
8932 ix86_builtin_setjmp_frame_value (void)
8934 return stack_realign_fp
? hard_frame_pointer_rtx
: virtual_stack_vars_rtx
;
8937 /* When using -fsplit-stack, the allocation routines set a field in
8938 the TCB to the bottom of the stack plus this much space, measured
8941 #define SPLIT_STACK_AVAILABLE 256
8943 /* Fill structure ix86_frame about frame of currently computed function. */
8946 ix86_compute_frame_layout (struct ix86_frame
*frame
)
8948 unsigned HOST_WIDE_INT stack_alignment_needed
;
8949 HOST_WIDE_INT offset
;
8950 unsigned HOST_WIDE_INT preferred_alignment
;
8951 HOST_WIDE_INT size
= get_frame_size ();
8952 HOST_WIDE_INT to_allocate
;
8954 frame
->nregs
= ix86_nsaved_regs ();
8955 frame
->nsseregs
= ix86_nsaved_sseregs ();
8957 stack_alignment_needed
= crtl
->stack_alignment_needed
/ BITS_PER_UNIT
;
8958 preferred_alignment
= crtl
->preferred_stack_boundary
/ BITS_PER_UNIT
;
8960 /* 64-bit MS ABI seem to require stack alignment to be always 16 except for
8961 function prologues and leaf. */
8962 if ((TARGET_64BIT_MS_ABI
&& preferred_alignment
< 16)
8963 && (!crtl
->is_leaf
|| cfun
->calls_alloca
!= 0
8964 || ix86_current_function_calls_tls_descriptor
))
8966 preferred_alignment
= 16;
8967 stack_alignment_needed
= 16;
8968 crtl
->preferred_stack_boundary
= 128;
8969 crtl
->stack_alignment_needed
= 128;
8972 gcc_assert (!size
|| stack_alignment_needed
);
8973 gcc_assert (preferred_alignment
>= STACK_BOUNDARY
/ BITS_PER_UNIT
);
8974 gcc_assert (preferred_alignment
<= stack_alignment_needed
);
8976 /* For SEH we have to limit the amount of code movement into the prologue.
8977 At present we do this via a BLOCKAGE, at which point there's very little
8978 scheduling that can be done, which means that there's very little point
8979 in doing anything except PUSHs. */
8981 cfun
->machine
->use_fast_prologue_epilogue
= false;
8983 /* During reload iteration the amount of registers saved can change.
8984 Recompute the value as needed. Do not recompute when amount of registers
8985 didn't change as reload does multiple calls to the function and does not
8986 expect the decision to change within single iteration. */
8987 else if (!optimize_function_for_size_p (cfun
)
8988 && cfun
->machine
->use_fast_prologue_epilogue_nregs
!= frame
->nregs
)
8990 int count
= frame
->nregs
;
8991 struct cgraph_node
*node
= cgraph_get_node (current_function_decl
);
8993 cfun
->machine
->use_fast_prologue_epilogue_nregs
= count
;
8995 /* The fast prologue uses move instead of push to save registers. This
8996 is significantly longer, but also executes faster as modern hardware
8997 can execute the moves in parallel, but can't do that for push/pop.
8999 Be careful about choosing what prologue to emit: When function takes
9000 many instructions to execute we may use slow version as well as in
9001 case function is known to be outside hot spot (this is known with
9002 feedback only). Weight the size of function by number of registers
9003 to save as it is cheap to use one or two push instructions but very
9004 slow to use many of them. */
9006 count
= (count
- 1) * FAST_PROLOGUE_INSN_COUNT
;
9007 if (node
->frequency
< NODE_FREQUENCY_NORMAL
9008 || (flag_branch_probabilities
9009 && node
->frequency
< NODE_FREQUENCY_HOT
))
9010 cfun
->machine
->use_fast_prologue_epilogue
= false;
9012 cfun
->machine
->use_fast_prologue_epilogue
9013 = !expensive_function_p (count
);
9016 frame
->save_regs_using_mov
9017 = (TARGET_PROLOGUE_USING_MOVE
&& cfun
->machine
->use_fast_prologue_epilogue
9018 /* If static stack checking is enabled and done with probes,
9019 the registers need to be saved before allocating the frame. */
9020 && flag_stack_check
!= STATIC_BUILTIN_STACK_CHECK
);
9022 /* Skip return address. */
9023 offset
= UNITS_PER_WORD
;
9025 /* Skip pushed static chain. */
9026 if (ix86_static_chain_on_stack
)
9027 offset
+= UNITS_PER_WORD
;
9029 /* Skip saved base pointer. */
9030 if (frame_pointer_needed
)
9031 offset
+= UNITS_PER_WORD
;
9032 frame
->hfp_save_offset
= offset
;
9034 /* The traditional frame pointer location is at the top of the frame. */
9035 frame
->hard_frame_pointer_offset
= offset
;
9037 /* Register save area */
9038 offset
+= frame
->nregs
* UNITS_PER_WORD
;
9039 frame
->reg_save_offset
= offset
;
9041 /* On SEH target, registers are pushed just before the frame pointer
9044 frame
->hard_frame_pointer_offset
= offset
;
9046 /* Align and set SSE register save area. */
9047 if (frame
->nsseregs
)
9049 /* The only ABI that has saved SSE registers (Win64) also has a
9050 16-byte aligned default stack, and thus we don't need to be
9051 within the re-aligned local stack frame to save them. */
9052 gcc_assert (INCOMING_STACK_BOUNDARY
>= 128);
9053 offset
= (offset
+ 16 - 1) & -16;
9054 offset
+= frame
->nsseregs
* 16;
9056 frame
->sse_reg_save_offset
= offset
;
9058 /* The re-aligned stack starts here. Values before this point are not
9059 directly comparable with values below this point. In order to make
9060 sure that no value happens to be the same before and after, force
9061 the alignment computation below to add a non-zero value. */
9062 if (stack_realign_fp
)
9063 offset
= (offset
+ stack_alignment_needed
) & -stack_alignment_needed
;
9066 frame
->va_arg_size
= ix86_varargs_gpr_size
+ ix86_varargs_fpr_size
;
9067 offset
+= frame
->va_arg_size
;
9069 /* Align start of frame for local function. */
9070 if (stack_realign_fp
9071 || offset
!= frame
->sse_reg_save_offset
9074 || cfun
->calls_alloca
9075 || ix86_current_function_calls_tls_descriptor
)
9076 offset
= (offset
+ stack_alignment_needed
- 1) & -stack_alignment_needed
;
9078 /* Frame pointer points here. */
9079 frame
->frame_pointer_offset
= offset
;
9083 /* Add outgoing arguments area. Can be skipped if we eliminated
9084 all the function calls as dead code.
9085 Skipping is however impossible when function calls alloca. Alloca
9086 expander assumes that last crtl->outgoing_args_size
9087 of stack frame are unused. */
9088 if (ACCUMULATE_OUTGOING_ARGS
9089 && (!crtl
->is_leaf
|| cfun
->calls_alloca
9090 || ix86_current_function_calls_tls_descriptor
))
9092 offset
+= crtl
->outgoing_args_size
;
9093 frame
->outgoing_arguments_size
= crtl
->outgoing_args_size
;
9096 frame
->outgoing_arguments_size
= 0;
9098 /* Align stack boundary. Only needed if we're calling another function
9100 if (!crtl
->is_leaf
|| cfun
->calls_alloca
9101 || ix86_current_function_calls_tls_descriptor
)
9102 offset
= (offset
+ preferred_alignment
- 1) & -preferred_alignment
;
9104 /* We've reached end of stack frame. */
9105 frame
->stack_pointer_offset
= offset
;
9107 /* Size prologue needs to allocate. */
9108 to_allocate
= offset
- frame
->sse_reg_save_offset
;
9110 if ((!to_allocate
&& frame
->nregs
<= 1)
9111 || (TARGET_64BIT
&& to_allocate
>= (HOST_WIDE_INT
) 0x80000000))
9112 frame
->save_regs_using_mov
= false;
9114 if (ix86_using_red_zone ()
9115 && crtl
->sp_is_unchanging
9117 && !ix86_current_function_calls_tls_descriptor
)
9119 frame
->red_zone_size
= to_allocate
;
9120 if (frame
->save_regs_using_mov
)
9121 frame
->red_zone_size
+= frame
->nregs
* UNITS_PER_WORD
;
9122 if (frame
->red_zone_size
> RED_ZONE_SIZE
- RED_ZONE_RESERVE
)
9123 frame
->red_zone_size
= RED_ZONE_SIZE
- RED_ZONE_RESERVE
;
9126 frame
->red_zone_size
= 0;
9127 frame
->stack_pointer_offset
-= frame
->red_zone_size
;
9129 /* The SEH frame pointer location is near the bottom of the frame.
9130 This is enforced by the fact that the difference between the
9131 stack pointer and the frame pointer is limited to 240 bytes in
9132 the unwind data structure. */
9137 /* If we can leave the frame pointer where it is, do so. Also, returns
9138 the establisher frame for __builtin_frame_address (0). */
9139 diff
= frame
->stack_pointer_offset
- frame
->hard_frame_pointer_offset
;
9140 if (diff
<= SEH_MAX_FRAME_SIZE
9141 && (diff
> 240 || (diff
& 15) != 0)
9142 && !crtl
->accesses_prior_frames
)
9144 /* Ideally we'd determine what portion of the local stack frame
9145 (within the constraint of the lowest 240) is most heavily used.
9146 But without that complication, simply bias the frame pointer
9147 by 128 bytes so as to maximize the amount of the local stack
9148 frame that is addressable with 8-bit offsets. */
9149 frame
->hard_frame_pointer_offset
= frame
->stack_pointer_offset
- 128;
9154 /* This is semi-inlined memory_address_length, but simplified
9155 since we know that we're always dealing with reg+offset, and
9156 to avoid having to create and discard all that rtl. */
9159 choose_baseaddr_len (unsigned int regno
, HOST_WIDE_INT offset
)
9165 /* EBP and R13 cannot be encoded without an offset. */
9166 len
= (regno
== BP_REG
|| regno
== R13_REG
);
9168 else if (IN_RANGE (offset
, -128, 127))
9171 /* ESP and R12 must be encoded with a SIB byte. */
9172 if (regno
== SP_REG
|| regno
== R12_REG
)
9178 /* Return an RTX that points to CFA_OFFSET within the stack frame.
9179 The valid base registers are taken from CFUN->MACHINE->FS. */
9182 choose_baseaddr (HOST_WIDE_INT cfa_offset
)
9184 const struct machine_function
*m
= cfun
->machine
;
9185 rtx base_reg
= NULL
;
9186 HOST_WIDE_INT base_offset
= 0;
9188 if (m
->use_fast_prologue_epilogue
)
9190 /* Choose the base register most likely to allow the most scheduling
9191 opportunities. Generally FP is valid throughout the function,
9192 while DRAP must be reloaded within the epilogue. But choose either
9193 over the SP due to increased encoding size. */
9197 base_reg
= hard_frame_pointer_rtx
;
9198 base_offset
= m
->fs
.fp_offset
- cfa_offset
;
9200 else if (m
->fs
.drap_valid
)
9202 base_reg
= crtl
->drap_reg
;
9203 base_offset
= 0 - cfa_offset
;
9205 else if (m
->fs
.sp_valid
)
9207 base_reg
= stack_pointer_rtx
;
9208 base_offset
= m
->fs
.sp_offset
- cfa_offset
;
9213 HOST_WIDE_INT toffset
;
9216 /* Choose the base register with the smallest address encoding.
9217 With a tie, choose FP > DRAP > SP. */
9220 base_reg
= stack_pointer_rtx
;
9221 base_offset
= m
->fs
.sp_offset
- cfa_offset
;
9222 len
= choose_baseaddr_len (STACK_POINTER_REGNUM
, base_offset
);
9224 if (m
->fs
.drap_valid
)
9226 toffset
= 0 - cfa_offset
;
9227 tlen
= choose_baseaddr_len (REGNO (crtl
->drap_reg
), toffset
);
9230 base_reg
= crtl
->drap_reg
;
9231 base_offset
= toffset
;
9237 toffset
= m
->fs
.fp_offset
- cfa_offset
;
9238 tlen
= choose_baseaddr_len (HARD_FRAME_POINTER_REGNUM
, toffset
);
9241 base_reg
= hard_frame_pointer_rtx
;
9242 base_offset
= toffset
;
9247 gcc_assert (base_reg
!= NULL
);
9249 return plus_constant (Pmode
, base_reg
, base_offset
);
9252 /* Emit code to save registers in the prologue. */
9255 ix86_emit_save_regs (void)
9260 for (regno
= FIRST_PSEUDO_REGISTER
- 1; regno
-- > 0; )
9261 if (!SSE_REGNO_P (regno
) && ix86_save_reg (regno
, true))
9263 insn
= emit_insn (gen_push (gen_rtx_REG (word_mode
, regno
)));
9264 RTX_FRAME_RELATED_P (insn
) = 1;
9268 /* Emit a single register save at CFA - CFA_OFFSET. */
9271 ix86_emit_save_reg_using_mov (enum machine_mode mode
, unsigned int regno
,
9272 HOST_WIDE_INT cfa_offset
)
9274 struct machine_function
*m
= cfun
->machine
;
9275 rtx reg
= gen_rtx_REG (mode
, regno
);
9276 rtx mem
, addr
, base
, insn
;
9278 addr
= choose_baseaddr (cfa_offset
);
9279 mem
= gen_frame_mem (mode
, addr
);
9281 /* For SSE saves, we need to indicate the 128-bit alignment. */
9282 set_mem_align (mem
, GET_MODE_ALIGNMENT (mode
));
9284 insn
= emit_move_insn (mem
, reg
);
9285 RTX_FRAME_RELATED_P (insn
) = 1;
9288 if (GET_CODE (base
) == PLUS
)
9289 base
= XEXP (base
, 0);
9290 gcc_checking_assert (REG_P (base
));
9292 /* When saving registers into a re-aligned local stack frame, avoid
9293 any tricky guessing by dwarf2out. */
9294 if (m
->fs
.realigned
)
9296 gcc_checking_assert (stack_realign_drap
);
9298 if (regno
== REGNO (crtl
->drap_reg
))
9300 /* A bit of a hack. We force the DRAP register to be saved in
9301 the re-aligned stack frame, which provides us with a copy
9302 of the CFA that will last past the prologue. Install it. */
9303 gcc_checking_assert (cfun
->machine
->fs
.fp_valid
);
9304 addr
= plus_constant (Pmode
, hard_frame_pointer_rtx
,
9305 cfun
->machine
->fs
.fp_offset
- cfa_offset
);
9306 mem
= gen_rtx_MEM (mode
, addr
);
9307 add_reg_note (insn
, REG_CFA_DEF_CFA
, mem
);
9311 /* The frame pointer is a stable reference within the
9312 aligned frame. Use it. */
9313 gcc_checking_assert (cfun
->machine
->fs
.fp_valid
);
9314 addr
= plus_constant (Pmode
, hard_frame_pointer_rtx
,
9315 cfun
->machine
->fs
.fp_offset
- cfa_offset
);
9316 mem
= gen_rtx_MEM (mode
, addr
);
9317 add_reg_note (insn
, REG_CFA_EXPRESSION
,
9318 gen_rtx_SET (VOIDmode
, mem
, reg
));
9322 /* The memory may not be relative to the current CFA register,
9323 which means that we may need to generate a new pattern for
9324 use by the unwind info. */
9325 else if (base
!= m
->fs
.cfa_reg
)
9327 addr
= plus_constant (Pmode
, m
->fs
.cfa_reg
,
9328 m
->fs
.cfa_offset
- cfa_offset
);
9329 mem
= gen_rtx_MEM (mode
, addr
);
9330 add_reg_note (insn
, REG_CFA_OFFSET
, gen_rtx_SET (VOIDmode
, mem
, reg
));
9334 /* Emit code to save registers using MOV insns.
9335 First register is stored at CFA - CFA_OFFSET. */
9337 ix86_emit_save_regs_using_mov (HOST_WIDE_INT cfa_offset
)
9341 for (regno
= 0; regno
< FIRST_PSEUDO_REGISTER
; regno
++)
9342 if (!SSE_REGNO_P (regno
) && ix86_save_reg (regno
, true))
9344 ix86_emit_save_reg_using_mov (word_mode
, regno
, cfa_offset
);
9345 cfa_offset
-= UNITS_PER_WORD
;
9349 /* Emit code to save SSE registers using MOV insns.
9350 First register is stored at CFA - CFA_OFFSET. */
9352 ix86_emit_save_sse_regs_using_mov (HOST_WIDE_INT cfa_offset
)
9356 for (regno
= 0; regno
< FIRST_PSEUDO_REGISTER
; regno
++)
9357 if (SSE_REGNO_P (regno
) && ix86_save_reg (regno
, true))
9359 ix86_emit_save_reg_using_mov (V4SFmode
, regno
, cfa_offset
);
9364 static GTY(()) rtx queued_cfa_restores
;
9366 /* Add a REG_CFA_RESTORE REG note to INSN or queue them until next stack
9367 manipulation insn. The value is on the stack at CFA - CFA_OFFSET.
9368 Don't add the note if the previously saved value will be left untouched
9369 within stack red-zone till return, as unwinders can find the same value
9370 in the register and on the stack. */
9373 ix86_add_cfa_restore_note (rtx insn
, rtx reg
, HOST_WIDE_INT cfa_offset
)
9375 if (!crtl
->shrink_wrapped
9376 && cfa_offset
<= cfun
->machine
->fs
.red_zone_offset
)
9381 add_reg_note (insn
, REG_CFA_RESTORE
, reg
);
9382 RTX_FRAME_RELATED_P (insn
) = 1;
9386 = alloc_reg_note (REG_CFA_RESTORE
, reg
, queued_cfa_restores
);
9389 /* Add queued REG_CFA_RESTORE notes if any to INSN. */
9392 ix86_add_queued_cfa_restore_notes (rtx insn
)
9395 if (!queued_cfa_restores
)
9397 for (last
= queued_cfa_restores
; XEXP (last
, 1); last
= XEXP (last
, 1))
9399 XEXP (last
, 1) = REG_NOTES (insn
);
9400 REG_NOTES (insn
) = queued_cfa_restores
;
9401 queued_cfa_restores
= NULL_RTX
;
9402 RTX_FRAME_RELATED_P (insn
) = 1;
9405 /* Expand prologue or epilogue stack adjustment.
9406 The pattern exist to put a dependency on all ebp-based memory accesses.
9407 STYLE should be negative if instructions should be marked as frame related,
9408 zero if %r11 register is live and cannot be freely used and positive
9412 pro_epilogue_adjust_stack (rtx dest
, rtx src
, rtx offset
,
9413 int style
, bool set_cfa
)
9415 struct machine_function
*m
= cfun
->machine
;
9417 bool add_frame_related_expr
= false;
9419 if (Pmode
== SImode
)
9420 insn
= gen_pro_epilogue_adjust_stack_si_add (dest
, src
, offset
);
9421 else if (x86_64_immediate_operand (offset
, DImode
))
9422 insn
= gen_pro_epilogue_adjust_stack_di_add (dest
, src
, offset
);
9426 /* r11 is used by indirect sibcall return as well, set before the
9427 epilogue and used after the epilogue. */
9429 tmp
= gen_rtx_REG (DImode
, R11_REG
);
9432 gcc_assert (src
!= hard_frame_pointer_rtx
9433 && dest
!= hard_frame_pointer_rtx
);
9434 tmp
= hard_frame_pointer_rtx
;
9436 insn
= emit_insn (gen_rtx_SET (DImode
, tmp
, offset
));
9438 add_frame_related_expr
= true;
9440 insn
= gen_pro_epilogue_adjust_stack_di_add (dest
, src
, tmp
);
9443 insn
= emit_insn (insn
);
9445 ix86_add_queued_cfa_restore_notes (insn
);
9451 gcc_assert (m
->fs
.cfa_reg
== src
);
9452 m
->fs
.cfa_offset
+= INTVAL (offset
);
9453 m
->fs
.cfa_reg
= dest
;
9455 r
= gen_rtx_PLUS (Pmode
, src
, offset
);
9456 r
= gen_rtx_SET (VOIDmode
, dest
, r
);
9457 add_reg_note (insn
, REG_CFA_ADJUST_CFA
, r
);
9458 RTX_FRAME_RELATED_P (insn
) = 1;
9462 RTX_FRAME_RELATED_P (insn
) = 1;
9463 if (add_frame_related_expr
)
9465 rtx r
= gen_rtx_PLUS (Pmode
, src
, offset
);
9466 r
= gen_rtx_SET (VOIDmode
, dest
, r
);
9467 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, r
);
9471 if (dest
== stack_pointer_rtx
)
9473 HOST_WIDE_INT ooffset
= m
->fs
.sp_offset
;
9474 bool valid
= m
->fs
.sp_valid
;
9476 if (src
== hard_frame_pointer_rtx
)
9478 valid
= m
->fs
.fp_valid
;
9479 ooffset
= m
->fs
.fp_offset
;
9481 else if (src
== crtl
->drap_reg
)
9483 valid
= m
->fs
.drap_valid
;
9488 /* Else there are two possibilities: SP itself, which we set
9489 up as the default above. Or EH_RETURN_STACKADJ_RTX, which is
9490 taken care of this by hand along the eh_return path. */
9491 gcc_checking_assert (src
== stack_pointer_rtx
9492 || offset
== const0_rtx
);
9495 m
->fs
.sp_offset
= ooffset
- INTVAL (offset
);
9496 m
->fs
.sp_valid
= valid
;
9500 /* Find an available register to be used as dynamic realign argument
9501 pointer regsiter. Such a register will be written in prologue and
9502 used in begin of body, so it must not be
9503 1. parameter passing register.
9505 We reuse static-chain register if it is available. Otherwise, we
9506 use DI for i386 and R13 for x86-64. We chose R13 since it has
9509 Return: the regno of chosen register. */
9512 find_drap_reg (void)
9514 tree decl
= cfun
->decl
;
9518 /* Use R13 for nested function or function need static chain.
9519 Since function with tail call may use any caller-saved
9520 registers in epilogue, DRAP must not use caller-saved
9521 register in such case. */
9522 if (DECL_STATIC_CHAIN (decl
) || crtl
->tail_call_emit
)
9529 /* Use DI for nested function or function need static chain.
9530 Since function with tail call may use any caller-saved
9531 registers in epilogue, DRAP must not use caller-saved
9532 register in such case. */
9533 if (DECL_STATIC_CHAIN (decl
) || crtl
->tail_call_emit
)
9536 /* Reuse static chain register if it isn't used for parameter
9538 if (ix86_function_regparm (TREE_TYPE (decl
), decl
) <= 2)
9540 unsigned int ccvt
= ix86_get_callcvt (TREE_TYPE (decl
));
9541 if ((ccvt
& (IX86_CALLCVT_FASTCALL
| IX86_CALLCVT_THISCALL
)) == 0)
9548 /* Return minimum incoming stack alignment. */
9551 ix86_minimum_incoming_stack_boundary (bool sibcall
)
9553 unsigned int incoming_stack_boundary
;
9555 /* Prefer the one specified at command line. */
9556 if (ix86_user_incoming_stack_boundary
)
9557 incoming_stack_boundary
= ix86_user_incoming_stack_boundary
;
9558 /* In 32bit, use MIN_STACK_BOUNDARY for incoming stack boundary
9559 if -mstackrealign is used, it isn't used for sibcall check and
9560 estimated stack alignment is 128bit. */
9563 && ix86_force_align_arg_pointer
9564 && crtl
->stack_alignment_estimated
== 128)
9565 incoming_stack_boundary
= MIN_STACK_BOUNDARY
;
9567 incoming_stack_boundary
= ix86_default_incoming_stack_boundary
;
9569 /* Incoming stack alignment can be changed on individual functions
9570 via force_align_arg_pointer attribute. We use the smallest
9571 incoming stack boundary. */
9572 if (incoming_stack_boundary
> MIN_STACK_BOUNDARY
9573 && lookup_attribute (ix86_force_align_arg_pointer_string
,
9574 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl
))))
9575 incoming_stack_boundary
= MIN_STACK_BOUNDARY
;
9577 /* The incoming stack frame has to be aligned at least at
9578 parm_stack_boundary. */
9579 if (incoming_stack_boundary
< crtl
->parm_stack_boundary
)
9580 incoming_stack_boundary
= crtl
->parm_stack_boundary
;
9582 /* Stack at entrance of main is aligned by runtime. We use the
9583 smallest incoming stack boundary. */
9584 if (incoming_stack_boundary
> MAIN_STACK_BOUNDARY
9585 && DECL_NAME (current_function_decl
)
9586 && MAIN_NAME_P (DECL_NAME (current_function_decl
))
9587 && DECL_FILE_SCOPE_P (current_function_decl
))
9588 incoming_stack_boundary
= MAIN_STACK_BOUNDARY
;
9590 return incoming_stack_boundary
;
9593 /* Update incoming stack boundary and estimated stack alignment. */
9596 ix86_update_stack_boundary (void)
9598 ix86_incoming_stack_boundary
9599 = ix86_minimum_incoming_stack_boundary (false);
9601 /* x86_64 vararg needs 16byte stack alignment for register save
9605 && crtl
->stack_alignment_estimated
< 128)
9606 crtl
->stack_alignment_estimated
= 128;
9609 /* Handle the TARGET_GET_DRAP_RTX hook. Return NULL if no DRAP is
9610 needed or an rtx for DRAP otherwise. */
9613 ix86_get_drap_rtx (void)
9615 if (ix86_force_drap
|| !ACCUMULATE_OUTGOING_ARGS
)
9616 crtl
->need_drap
= true;
9618 if (stack_realign_drap
)
9620 /* Assign DRAP to vDRAP and returns vDRAP */
9621 unsigned int regno
= find_drap_reg ();
9626 arg_ptr
= gen_rtx_REG (Pmode
, regno
);
9627 crtl
->drap_reg
= arg_ptr
;
9630 drap_vreg
= copy_to_reg (arg_ptr
);
9634 insn
= emit_insn_before (seq
, NEXT_INSN (entry_of_function ()));
9637 add_reg_note (insn
, REG_CFA_SET_VDRAP
, drap_vreg
);
9638 RTX_FRAME_RELATED_P (insn
) = 1;
9646 /* Handle the TARGET_INTERNAL_ARG_POINTER hook. */
9649 ix86_internal_arg_pointer (void)
9651 return virtual_incoming_args_rtx
;
9654 struct scratch_reg
{
9659 /* Return a short-lived scratch register for use on function entry.
9660 In 32-bit mode, it is valid only after the registers are saved
9661 in the prologue. This register must be released by means of
9662 release_scratch_register_on_entry once it is dead. */
9665 get_scratch_register_on_entry (struct scratch_reg
*sr
)
9673 /* We always use R11 in 64-bit mode. */
9678 tree decl
= current_function_decl
, fntype
= TREE_TYPE (decl
);
9680 = lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype
)) != NULL_TREE
;
9682 = lookup_attribute ("thiscall", TYPE_ATTRIBUTES (fntype
)) != NULL_TREE
;
9683 bool static_chain_p
= DECL_STATIC_CHAIN (decl
);
9684 int regparm
= ix86_function_regparm (fntype
, decl
);
9686 = crtl
->drap_reg
? REGNO (crtl
->drap_reg
) : INVALID_REGNUM
;
9688 /* 'fastcall' sets regparm to 2, uses ecx/edx for arguments and eax
9689 for the static chain register. */
9690 if ((regparm
< 1 || (fastcall_p
&& !static_chain_p
))
9691 && drap_regno
!= AX_REG
)
9693 /* 'thiscall' sets regparm to 1, uses ecx for arguments and edx
9694 for the static chain register. */
9695 else if (thiscall_p
&& !static_chain_p
&& drap_regno
!= AX_REG
)
9697 else if (regparm
< 2 && !thiscall_p
&& drap_regno
!= DX_REG
)
9699 /* ecx is the static chain register. */
9700 else if (regparm
< 3 && !fastcall_p
&& !thiscall_p
9702 && drap_regno
!= CX_REG
)
9704 else if (ix86_save_reg (BX_REG
, true))
9706 /* esi is the static chain register. */
9707 else if (!(regparm
== 3 && static_chain_p
)
9708 && ix86_save_reg (SI_REG
, true))
9710 else if (ix86_save_reg (DI_REG
, true))
9714 regno
= (drap_regno
== AX_REG
? DX_REG
: AX_REG
);
9719 sr
->reg
= gen_rtx_REG (Pmode
, regno
);
9722 rtx insn
= emit_insn (gen_push (sr
->reg
));
9723 RTX_FRAME_RELATED_P (insn
) = 1;
9727 /* Release a scratch register obtained from the preceding function. */
9730 release_scratch_register_on_entry (struct scratch_reg
*sr
)
9734 struct machine_function
*m
= cfun
->machine
;
9735 rtx x
, insn
= emit_insn (gen_pop (sr
->reg
));
9737 /* The RTX_FRAME_RELATED_P mechanism doesn't know about pop. */
9738 RTX_FRAME_RELATED_P (insn
) = 1;
9739 x
= gen_rtx_PLUS (Pmode
, stack_pointer_rtx
, GEN_INT (UNITS_PER_WORD
));
9740 x
= gen_rtx_SET (VOIDmode
, stack_pointer_rtx
, x
);
9741 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, x
);
9742 m
->fs
.sp_offset
-= UNITS_PER_WORD
;
9746 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
9748 /* Emit code to adjust the stack pointer by SIZE bytes while probing it. */
9751 ix86_adjust_stack_and_probe (const HOST_WIDE_INT size
)
9753 /* We skip the probe for the first interval + a small dope of 4 words and
9754 probe that many bytes past the specified size to maintain a protection
9755 area at the botton of the stack. */
9756 const int dope
= 4 * UNITS_PER_WORD
;
9757 rtx size_rtx
= GEN_INT (size
), last
;
9759 /* See if we have a constant small number of probes to generate. If so,
9760 that's the easy case. The run-time loop is made up of 11 insns in the
9761 generic case while the compile-time loop is made up of 3+2*(n-1) insns
9762 for n # of intervals. */
9763 if (size
<= 5 * PROBE_INTERVAL
)
9765 HOST_WIDE_INT i
, adjust
;
9766 bool first_probe
= true;
9768 /* Adjust SP and probe at PROBE_INTERVAL + N * PROBE_INTERVAL for
9769 values of N from 1 until it exceeds SIZE. If only one probe is
9770 needed, this will not generate any code. Then adjust and probe
9771 to PROBE_INTERVAL + SIZE. */
9772 for (i
= PROBE_INTERVAL
; i
< size
; i
+= PROBE_INTERVAL
)
9776 adjust
= 2 * PROBE_INTERVAL
+ dope
;
9777 first_probe
= false;
9780 adjust
= PROBE_INTERVAL
;
9782 emit_insn (gen_rtx_SET (VOIDmode
, stack_pointer_rtx
,
9783 plus_constant (Pmode
, stack_pointer_rtx
,
9785 emit_stack_probe (stack_pointer_rtx
);
9789 adjust
= size
+ PROBE_INTERVAL
+ dope
;
9791 adjust
= size
+ PROBE_INTERVAL
- i
;
9793 emit_insn (gen_rtx_SET (VOIDmode
, stack_pointer_rtx
,
9794 plus_constant (Pmode
, stack_pointer_rtx
,
9796 emit_stack_probe (stack_pointer_rtx
);
9798 /* Adjust back to account for the additional first interval. */
9799 last
= emit_insn (gen_rtx_SET (VOIDmode
, stack_pointer_rtx
,
9800 plus_constant (Pmode
, stack_pointer_rtx
,
9801 PROBE_INTERVAL
+ dope
)));
9804 /* Otherwise, do the same as above, but in a loop. Note that we must be
9805 extra careful with variables wrapping around because we might be at
9806 the very top (or the very bottom) of the address space and we have
9807 to be able to handle this case properly; in particular, we use an
9808 equality test for the loop condition. */
9811 HOST_WIDE_INT rounded_size
;
9812 struct scratch_reg sr
;
9814 get_scratch_register_on_entry (&sr
);
9817 /* Step 1: round SIZE to the previous multiple of the interval. */
9819 rounded_size
= size
& -PROBE_INTERVAL
;
9822 /* Step 2: compute initial and final value of the loop counter. */
9824 /* SP = SP_0 + PROBE_INTERVAL. */
9825 emit_insn (gen_rtx_SET (VOIDmode
, stack_pointer_rtx
,
9826 plus_constant (Pmode
, stack_pointer_rtx
,
9827 - (PROBE_INTERVAL
+ dope
))));
9829 /* LAST_ADDR = SP_0 + PROBE_INTERVAL + ROUNDED_SIZE. */
9830 emit_move_insn (sr
.reg
, GEN_INT (-rounded_size
));
9831 emit_insn (gen_rtx_SET (VOIDmode
, sr
.reg
,
9832 gen_rtx_PLUS (Pmode
, sr
.reg
,
9833 stack_pointer_rtx
)));
9838 while (SP != LAST_ADDR)
9840 SP = SP + PROBE_INTERVAL
9844 adjusts SP and probes to PROBE_INTERVAL + N * PROBE_INTERVAL for
9845 values of N from 1 until it is equal to ROUNDED_SIZE. */
9847 emit_insn (ix86_gen_adjust_stack_and_probe (sr
.reg
, sr
.reg
, size_rtx
));
9850 /* Step 4: adjust SP and probe at PROBE_INTERVAL + SIZE if we cannot
9851 assert at compile-time that SIZE is equal to ROUNDED_SIZE. */
9853 if (size
!= rounded_size
)
9855 emit_insn (gen_rtx_SET (VOIDmode
, stack_pointer_rtx
,
9856 plus_constant (Pmode
, stack_pointer_rtx
,
9857 rounded_size
- size
)));
9858 emit_stack_probe (stack_pointer_rtx
);
9861 /* Adjust back to account for the additional first interval. */
9862 last
= emit_insn (gen_rtx_SET (VOIDmode
, stack_pointer_rtx
,
9863 plus_constant (Pmode
, stack_pointer_rtx
,
9864 PROBE_INTERVAL
+ dope
)));
9866 release_scratch_register_on_entry (&sr
);
9869 gcc_assert (cfun
->machine
->fs
.cfa_reg
!= stack_pointer_rtx
);
9871 /* Even if the stack pointer isn't the CFA register, we need to correctly
9872 describe the adjustments made to it, in particular differentiate the
9873 frame-related ones from the frame-unrelated ones. */
9876 rtx expr
= gen_rtx_SEQUENCE (VOIDmode
, rtvec_alloc (2));
9877 XVECEXP (expr
, 0, 0)
9878 = gen_rtx_SET (VOIDmode
, stack_pointer_rtx
,
9879 plus_constant (Pmode
, stack_pointer_rtx
, -size
));
9880 XVECEXP (expr
, 0, 1)
9881 = gen_rtx_SET (VOIDmode
, stack_pointer_rtx
,
9882 plus_constant (Pmode
, stack_pointer_rtx
,
9883 PROBE_INTERVAL
+ dope
+ size
));
9884 add_reg_note (last
, REG_FRAME_RELATED_EXPR
, expr
);
9885 RTX_FRAME_RELATED_P (last
) = 1;
9887 cfun
->machine
->fs
.sp_offset
+= size
;
9890 /* Make sure nothing is scheduled before we are done. */
9891 emit_insn (gen_blockage ());
9894 /* Adjust the stack pointer up to REG while probing it. */
9897 output_adjust_stack_and_probe (rtx reg
)
9899 static int labelno
= 0;
9900 char loop_lab
[32], end_lab
[32];
9903 ASM_GENERATE_INTERNAL_LABEL (loop_lab
, "LPSRL", labelno
);
9904 ASM_GENERATE_INTERNAL_LABEL (end_lab
, "LPSRE", labelno
++);
9906 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file
, loop_lab
);
9908 /* Jump to END_LAB if SP == LAST_ADDR. */
9909 xops
[0] = stack_pointer_rtx
;
9911 output_asm_insn ("cmp%z0\t{%1, %0|%0, %1}", xops
);
9912 fputs ("\tje\t", asm_out_file
);
9913 assemble_name_raw (asm_out_file
, end_lab
);
9914 fputc ('\n', asm_out_file
);
9916 /* SP = SP + PROBE_INTERVAL. */
9917 xops
[1] = GEN_INT (PROBE_INTERVAL
);
9918 output_asm_insn ("sub%z0\t{%1, %0|%0, %1}", xops
);
9921 xops
[1] = const0_rtx
;
9922 output_asm_insn ("or%z0\t{%1, (%0)|DWORD PTR [%0], %1}", xops
);
9924 fprintf (asm_out_file
, "\tjmp\t");
9925 assemble_name_raw (asm_out_file
, loop_lab
);
9926 fputc ('\n', asm_out_file
);
9928 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file
, end_lab
);
9933 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
9934 inclusive. These are offsets from the current stack pointer. */
9937 ix86_emit_probe_stack_range (HOST_WIDE_INT first
, HOST_WIDE_INT size
)
9939 /* See if we have a constant small number of probes to generate. If so,
9940 that's the easy case. The run-time loop is made up of 7 insns in the
9941 generic case while the compile-time loop is made up of n insns for n #
9943 if (size
<= 7 * PROBE_INTERVAL
)
9947 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
9948 it exceeds SIZE. If only one probe is needed, this will not
9949 generate any code. Then probe at FIRST + SIZE. */
9950 for (i
= PROBE_INTERVAL
; i
< size
; i
+= PROBE_INTERVAL
)
9951 emit_stack_probe (plus_constant (Pmode
, stack_pointer_rtx
,
9954 emit_stack_probe (plus_constant (Pmode
, stack_pointer_rtx
,
9958 /* Otherwise, do the same as above, but in a loop. Note that we must be
9959 extra careful with variables wrapping around because we might be at
9960 the very top (or the very bottom) of the address space and we have
9961 to be able to handle this case properly; in particular, we use an
9962 equality test for the loop condition. */
9965 HOST_WIDE_INT rounded_size
, last
;
9966 struct scratch_reg sr
;
9968 get_scratch_register_on_entry (&sr
);
9971 /* Step 1: round SIZE to the previous multiple of the interval. */
9973 rounded_size
= size
& -PROBE_INTERVAL
;
9976 /* Step 2: compute initial and final value of the loop counter. */
9978 /* TEST_OFFSET = FIRST. */
9979 emit_move_insn (sr
.reg
, GEN_INT (-first
));
9981 /* LAST_OFFSET = FIRST + ROUNDED_SIZE. */
9982 last
= first
+ rounded_size
;
9987 while (TEST_ADDR != LAST_ADDR)
9989 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
9993 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
9994 until it is equal to ROUNDED_SIZE. */
9996 emit_insn (ix86_gen_probe_stack_range (sr
.reg
, sr
.reg
, GEN_INT (-last
)));
9999 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
10000 that SIZE is equal to ROUNDED_SIZE. */
10002 if (size
!= rounded_size
)
10003 emit_stack_probe (plus_constant (Pmode
,
10004 gen_rtx_PLUS (Pmode
,
10007 rounded_size
- size
));
10009 release_scratch_register_on_entry (&sr
);
10012 /* Make sure nothing is scheduled before we are done. */
10013 emit_insn (gen_blockage ());
10016 /* Probe a range of stack addresses from REG to END, inclusive. These are
10017 offsets from the current stack pointer. */
10020 output_probe_stack_range (rtx reg
, rtx end
)
10022 static int labelno
= 0;
10023 char loop_lab
[32], end_lab
[32];
10026 ASM_GENERATE_INTERNAL_LABEL (loop_lab
, "LPSRL", labelno
);
10027 ASM_GENERATE_INTERNAL_LABEL (end_lab
, "LPSRE", labelno
++);
10029 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file
, loop_lab
);
10031 /* Jump to END_LAB if TEST_ADDR == LAST_ADDR. */
10034 output_asm_insn ("cmp%z0\t{%1, %0|%0, %1}", xops
);
10035 fputs ("\tje\t", asm_out_file
);
10036 assemble_name_raw (asm_out_file
, end_lab
);
10037 fputc ('\n', asm_out_file
);
10039 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
10040 xops
[1] = GEN_INT (PROBE_INTERVAL
);
10041 output_asm_insn ("sub%z0\t{%1, %0|%0, %1}", xops
);
10043 /* Probe at TEST_ADDR. */
10044 xops
[0] = stack_pointer_rtx
;
10046 xops
[2] = const0_rtx
;
10047 output_asm_insn ("or%z0\t{%2, (%0,%1)|DWORD PTR [%0+%1], %2}", xops
);
10049 fprintf (asm_out_file
, "\tjmp\t");
10050 assemble_name_raw (asm_out_file
, loop_lab
);
10051 fputc ('\n', asm_out_file
);
10053 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file
, end_lab
);
10058 /* Finalize stack_realign_needed flag, which will guide prologue/epilogue
10059 to be generated in correct form. */
10061 ix86_finalize_stack_realign_flags (void)
10063 /* Check if stack realign is really needed after reload, and
10064 stores result in cfun */
10065 unsigned int incoming_stack_boundary
10066 = (crtl
->parm_stack_boundary
> ix86_incoming_stack_boundary
10067 ? crtl
->parm_stack_boundary
: ix86_incoming_stack_boundary
);
10068 unsigned int stack_realign
= (incoming_stack_boundary
10070 ? crtl
->max_used_stack_slot_alignment
10071 : crtl
->stack_alignment_needed
));
10073 if (crtl
->stack_realign_finalized
)
10075 /* After stack_realign_needed is finalized, we can't no longer
10077 gcc_assert (crtl
->stack_realign_needed
== stack_realign
);
10081 /* If the only reason for frame_pointer_needed is that we conservatively
10082 assumed stack realignment might be needed, but in the end nothing that
10083 needed the stack alignment had been spilled, clear frame_pointer_needed
10084 and say we don't need stack realignment. */
10086 && !crtl
->need_drap
10087 && frame_pointer_needed
10089 && flag_omit_frame_pointer
10090 && crtl
->sp_is_unchanging
10091 && !ix86_current_function_calls_tls_descriptor
10092 && !crtl
->accesses_prior_frames
10093 && !cfun
->calls_alloca
10094 && !crtl
->calls_eh_return
10095 && !(flag_stack_check
&& STACK_CHECK_MOVING_SP
)
10096 && !ix86_frame_pointer_required ()
10097 && get_frame_size () == 0
10098 && ix86_nsaved_sseregs () == 0
10099 && ix86_varargs_gpr_size
+ ix86_varargs_fpr_size
== 0)
10101 HARD_REG_SET set_up_by_prologue
, prologue_used
;
10104 CLEAR_HARD_REG_SET (prologue_used
);
10105 CLEAR_HARD_REG_SET (set_up_by_prologue
);
10106 add_to_hard_reg_set (&set_up_by_prologue
, Pmode
, STACK_POINTER_REGNUM
);
10107 add_to_hard_reg_set (&set_up_by_prologue
, Pmode
, ARG_POINTER_REGNUM
);
10108 add_to_hard_reg_set (&set_up_by_prologue
, Pmode
,
10109 HARD_FRAME_POINTER_REGNUM
);
10113 FOR_BB_INSNS (bb
, insn
)
10114 if (NONDEBUG_INSN_P (insn
)
10115 && requires_stack_frame_p (insn
, prologue_used
,
10116 set_up_by_prologue
))
10118 crtl
->stack_realign_needed
= stack_realign
;
10119 crtl
->stack_realign_finalized
= true;
10124 frame_pointer_needed
= false;
10125 stack_realign
= false;
10126 crtl
->max_used_stack_slot_alignment
= incoming_stack_boundary
;
10127 crtl
->stack_alignment_needed
= incoming_stack_boundary
;
10128 crtl
->stack_alignment_estimated
= incoming_stack_boundary
;
10129 if (crtl
->preferred_stack_boundary
> incoming_stack_boundary
)
10130 crtl
->preferred_stack_boundary
= incoming_stack_boundary
;
10131 df_finish_pass (true);
10132 df_scan_alloc (NULL
);
10134 df_compute_regs_ever_live (true);
10138 crtl
->stack_realign_needed
= stack_realign
;
10139 crtl
->stack_realign_finalized
= true;
10142 /* Expand the prologue into a bunch of separate insns. */
10145 ix86_expand_prologue (void)
10147 struct machine_function
*m
= cfun
->machine
;
10150 struct ix86_frame frame
;
10151 HOST_WIDE_INT allocate
;
10152 bool int_registers_saved
;
10153 bool sse_registers_saved
;
10155 ix86_finalize_stack_realign_flags ();
10157 /* DRAP should not coexist with stack_realign_fp */
10158 gcc_assert (!(crtl
->drap_reg
&& stack_realign_fp
));
10160 memset (&m
->fs
, 0, sizeof (m
->fs
));
10162 /* Initialize CFA state for before the prologue. */
10163 m
->fs
.cfa_reg
= stack_pointer_rtx
;
10164 m
->fs
.cfa_offset
= INCOMING_FRAME_SP_OFFSET
;
10166 /* Track SP offset to the CFA. We continue tracking this after we've
10167 swapped the CFA register away from SP. In the case of re-alignment
10168 this is fudged; we're interested to offsets within the local frame. */
10169 m
->fs
.sp_offset
= INCOMING_FRAME_SP_OFFSET
;
10170 m
->fs
.sp_valid
= true;
10172 ix86_compute_frame_layout (&frame
);
10174 if (!TARGET_64BIT
&& ix86_function_ms_hook_prologue (current_function_decl
))
10176 /* We should have already generated an error for any use of
10177 ms_hook on a nested function. */
10178 gcc_checking_assert (!ix86_static_chain_on_stack
);
10180 /* Check if profiling is active and we shall use profiling before
10181 prologue variant. If so sorry. */
10182 if (crtl
->profile
&& flag_fentry
!= 0)
10183 sorry ("ms_hook_prologue attribute isn%'t compatible "
10184 "with -mfentry for 32-bit");
10186 /* In ix86_asm_output_function_label we emitted:
10187 8b ff movl.s %edi,%edi
10189 8b ec movl.s %esp,%ebp
10191 This matches the hookable function prologue in Win32 API
10192 functions in Microsoft Windows XP Service Pack 2 and newer.
10193 Wine uses this to enable Windows apps to hook the Win32 API
10194 functions provided by Wine.
10196 What that means is that we've already set up the frame pointer. */
10198 if (frame_pointer_needed
10199 && !(crtl
->drap_reg
&& crtl
->stack_realign_needed
))
10203 /* We've decided to use the frame pointer already set up.
10204 Describe this to the unwinder by pretending that both
10205 push and mov insns happen right here.
10207 Putting the unwind info here at the end of the ms_hook
10208 is done so that we can make absolutely certain we get
10209 the required byte sequence at the start of the function,
10210 rather than relying on an assembler that can produce
10211 the exact encoding required.
10213 However it does mean (in the unpatched case) that we have
10214 a 1 insn window where the asynchronous unwind info is
10215 incorrect. However, if we placed the unwind info at
10216 its correct location we would have incorrect unwind info
10217 in the patched case. Which is probably all moot since
10218 I don't expect Wine generates dwarf2 unwind info for the
10219 system libraries that use this feature. */
10221 insn
= emit_insn (gen_blockage ());
10223 push
= gen_push (hard_frame_pointer_rtx
);
10224 mov
= gen_rtx_SET (VOIDmode
, hard_frame_pointer_rtx
,
10225 stack_pointer_rtx
);
10226 RTX_FRAME_RELATED_P (push
) = 1;
10227 RTX_FRAME_RELATED_P (mov
) = 1;
10229 RTX_FRAME_RELATED_P (insn
) = 1;
10230 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
10231 gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, push
, mov
)));
10233 /* Note that gen_push incremented m->fs.cfa_offset, even
10234 though we didn't emit the push insn here. */
10235 m
->fs
.cfa_reg
= hard_frame_pointer_rtx
;
10236 m
->fs
.fp_offset
= m
->fs
.cfa_offset
;
10237 m
->fs
.fp_valid
= true;
10241 /* The frame pointer is not needed so pop %ebp again.
10242 This leaves us with a pristine state. */
10243 emit_insn (gen_pop (hard_frame_pointer_rtx
));
10247 /* The first insn of a function that accepts its static chain on the
10248 stack is to push the register that would be filled in by a direct
10249 call. This insn will be skipped by the trampoline. */
10250 else if (ix86_static_chain_on_stack
)
10252 insn
= emit_insn (gen_push (ix86_static_chain (cfun
->decl
, false)));
10253 emit_insn (gen_blockage ());
10255 /* We don't want to interpret this push insn as a register save,
10256 only as a stack adjustment. The real copy of the register as
10257 a save will be done later, if needed. */
10258 t
= plus_constant (Pmode
, stack_pointer_rtx
, -UNITS_PER_WORD
);
10259 t
= gen_rtx_SET (VOIDmode
, stack_pointer_rtx
, t
);
10260 add_reg_note (insn
, REG_CFA_ADJUST_CFA
, t
);
10261 RTX_FRAME_RELATED_P (insn
) = 1;
10264 /* Emit prologue code to adjust stack alignment and setup DRAP, in case
10265 of DRAP is needed and stack realignment is really needed after reload */
10266 if (stack_realign_drap
)
10268 int align_bytes
= crtl
->stack_alignment_needed
/ BITS_PER_UNIT
;
10270 /* Only need to push parameter pointer reg if it is caller saved. */
10271 if (!call_used_regs
[REGNO (crtl
->drap_reg
)])
10273 /* Push arg pointer reg */
10274 insn
= emit_insn (gen_push (crtl
->drap_reg
));
10275 RTX_FRAME_RELATED_P (insn
) = 1;
10278 /* Grab the argument pointer. */
10279 t
= plus_constant (Pmode
, stack_pointer_rtx
, m
->fs
.sp_offset
);
10280 insn
= emit_insn (gen_rtx_SET (VOIDmode
, crtl
->drap_reg
, t
));
10281 RTX_FRAME_RELATED_P (insn
) = 1;
10282 m
->fs
.cfa_reg
= crtl
->drap_reg
;
10283 m
->fs
.cfa_offset
= 0;
10285 /* Align the stack. */
10286 insn
= emit_insn (ix86_gen_andsp (stack_pointer_rtx
,
10288 GEN_INT (-align_bytes
)));
10289 RTX_FRAME_RELATED_P (insn
) = 1;
10291 /* Replicate the return address on the stack so that return
10292 address can be reached via (argp - 1) slot. This is needed
10293 to implement macro RETURN_ADDR_RTX and intrinsic function
10294 expand_builtin_return_addr etc. */
10295 t
= plus_constant (Pmode
, crtl
->drap_reg
, -UNITS_PER_WORD
);
10296 t
= gen_frame_mem (word_mode
, t
);
10297 insn
= emit_insn (gen_push (t
));
10298 RTX_FRAME_RELATED_P (insn
) = 1;
10300 /* For the purposes of frame and register save area addressing,
10301 we've started over with a new frame. */
10302 m
->fs
.sp_offset
= INCOMING_FRAME_SP_OFFSET
;
10303 m
->fs
.realigned
= true;
10306 int_registers_saved
= (frame
.nregs
== 0);
10307 sse_registers_saved
= (frame
.nsseregs
== 0);
10309 if (frame_pointer_needed
&& !m
->fs
.fp_valid
)
10311 /* Note: AT&T enter does NOT have reversed args. Enter is probably
10312 slower on all targets. Also sdb doesn't like it. */
10313 insn
= emit_insn (gen_push (hard_frame_pointer_rtx
));
10314 RTX_FRAME_RELATED_P (insn
) = 1;
10316 /* Push registers now, before setting the frame pointer
10318 if (!int_registers_saved
10320 && !frame
.save_regs_using_mov
)
10322 ix86_emit_save_regs ();
10323 int_registers_saved
= true;
10324 gcc_assert (m
->fs
.sp_offset
== frame
.reg_save_offset
);
10327 if (m
->fs
.sp_offset
== frame
.hard_frame_pointer_offset
)
10329 insn
= emit_move_insn (hard_frame_pointer_rtx
, stack_pointer_rtx
);
10330 RTX_FRAME_RELATED_P (insn
) = 1;
10332 if (m
->fs
.cfa_reg
== stack_pointer_rtx
)
10333 m
->fs
.cfa_reg
= hard_frame_pointer_rtx
;
10334 m
->fs
.fp_offset
= m
->fs
.sp_offset
;
10335 m
->fs
.fp_valid
= true;
10339 if (!int_registers_saved
)
10341 /* If saving registers via PUSH, do so now. */
10342 if (!frame
.save_regs_using_mov
)
10344 ix86_emit_save_regs ();
10345 int_registers_saved
= true;
10346 gcc_assert (m
->fs
.sp_offset
== frame
.reg_save_offset
);
10349 /* When using red zone we may start register saving before allocating
10350 the stack frame saving one cycle of the prologue. However, avoid
10351 doing this if we have to probe the stack; at least on x86_64 the
10352 stack probe can turn into a call that clobbers a red zone location. */
10353 else if (ix86_using_red_zone ()
10354 && (! TARGET_STACK_PROBE
10355 || frame
.stack_pointer_offset
< CHECK_STACK_LIMIT
))
10357 ix86_emit_save_regs_using_mov (frame
.reg_save_offset
);
10358 int_registers_saved
= true;
10362 if (stack_realign_fp
)
10364 int align_bytes
= crtl
->stack_alignment_needed
/ BITS_PER_UNIT
;
10365 gcc_assert (align_bytes
> MIN_STACK_BOUNDARY
/ BITS_PER_UNIT
);
10367 /* The computation of the size of the re-aligned stack frame means
10368 that we must allocate the size of the register save area before
10369 performing the actual alignment. Otherwise we cannot guarantee
10370 that there's enough storage above the realignment point. */
10371 if (m
->fs
.sp_offset
!= frame
.sse_reg_save_offset
)
10372 pro_epilogue_adjust_stack (stack_pointer_rtx
, stack_pointer_rtx
,
10373 GEN_INT (m
->fs
.sp_offset
10374 - frame
.sse_reg_save_offset
),
10377 /* Align the stack. */
10378 insn
= emit_insn (ix86_gen_andsp (stack_pointer_rtx
,
10380 GEN_INT (-align_bytes
)));
10382 /* For the purposes of register save area addressing, the stack
10383 pointer is no longer valid. As for the value of sp_offset,
10384 see ix86_compute_frame_layout, which we need to match in order
10385 to pass verification of stack_pointer_offset at the end. */
10386 m
->fs
.sp_offset
= (m
->fs
.sp_offset
+ align_bytes
) & -align_bytes
;
10387 m
->fs
.sp_valid
= false;
10390 allocate
= frame
.stack_pointer_offset
- m
->fs
.sp_offset
;
10392 if (flag_stack_usage_info
)
10394 /* We start to count from ARG_POINTER. */
10395 HOST_WIDE_INT stack_size
= frame
.stack_pointer_offset
;
10397 /* If it was realigned, take into account the fake frame. */
10398 if (stack_realign_drap
)
10400 if (ix86_static_chain_on_stack
)
10401 stack_size
+= UNITS_PER_WORD
;
10403 if (!call_used_regs
[REGNO (crtl
->drap_reg
)])
10404 stack_size
+= UNITS_PER_WORD
;
10406 /* This over-estimates by 1 minimal-stack-alignment-unit but
10407 mitigates that by counting in the new return address slot. */
10408 current_function_dynamic_stack_size
10409 += crtl
->stack_alignment_needed
/ BITS_PER_UNIT
;
10412 current_function_static_stack_size
= stack_size
;
10415 /* On SEH target with very large frame size, allocate an area to save
10416 SSE registers (as the very large allocation won't be described). */
10418 && frame
.stack_pointer_offset
> SEH_MAX_FRAME_SIZE
10419 && !sse_registers_saved
)
10421 HOST_WIDE_INT sse_size
=
10422 frame
.sse_reg_save_offset
- frame
.reg_save_offset
;
10424 gcc_assert (int_registers_saved
);
10426 /* No need to do stack checking as the area will be immediately
10428 pro_epilogue_adjust_stack (stack_pointer_rtx
, stack_pointer_rtx
,
10429 GEN_INT (-sse_size
), -1,
10430 m
->fs
.cfa_reg
== stack_pointer_rtx
);
10431 allocate
-= sse_size
;
10432 ix86_emit_save_sse_regs_using_mov (frame
.sse_reg_save_offset
);
10433 sse_registers_saved
= true;
10436 /* The stack has already been decremented by the instruction calling us
10437 so probe if the size is non-negative to preserve the protection area. */
10438 if (allocate
>= 0 && flag_stack_check
== STATIC_BUILTIN_STACK_CHECK
)
10440 /* We expect the registers to be saved when probes are used. */
10441 gcc_assert (int_registers_saved
);
10443 if (STACK_CHECK_MOVING_SP
)
10445 ix86_adjust_stack_and_probe (allocate
);
10450 HOST_WIDE_INT size
= allocate
;
10452 if (TARGET_64BIT
&& size
>= (HOST_WIDE_INT
) 0x80000000)
10453 size
= 0x80000000 - STACK_CHECK_PROTECT
- 1;
10455 if (TARGET_STACK_PROBE
)
10456 ix86_emit_probe_stack_range (0, size
+ STACK_CHECK_PROTECT
);
10458 ix86_emit_probe_stack_range (STACK_CHECK_PROTECT
, size
);
10464 else if (!ix86_target_stack_probe ()
10465 || frame
.stack_pointer_offset
< CHECK_STACK_LIMIT
)
10467 pro_epilogue_adjust_stack (stack_pointer_rtx
, stack_pointer_rtx
,
10468 GEN_INT (-allocate
), -1,
10469 m
->fs
.cfa_reg
== stack_pointer_rtx
);
10473 rtx eax
= gen_rtx_REG (Pmode
, AX_REG
);
10475 rtx (*adjust_stack_insn
)(rtx
, rtx
, rtx
);
10476 const bool sp_is_cfa_reg
= (m
->fs
.cfa_reg
== stack_pointer_rtx
);
10477 bool eax_live
= false;
10478 bool r10_live
= false;
10481 r10_live
= (DECL_STATIC_CHAIN (current_function_decl
) != 0);
10482 if (!TARGET_64BIT_MS_ABI
)
10483 eax_live
= ix86_eax_live_at_start_p ();
10485 /* Note that SEH directives need to continue tracking the stack
10486 pointer even after the frame pointer has been set up. */
10489 insn
= emit_insn (gen_push (eax
));
10490 allocate
-= UNITS_PER_WORD
;
10491 if (sp_is_cfa_reg
|| TARGET_SEH
)
10494 m
->fs
.cfa_offset
+= UNITS_PER_WORD
;
10495 RTX_FRAME_RELATED_P (insn
) = 1;
10501 r10
= gen_rtx_REG (Pmode
, R10_REG
);
10502 insn
= emit_insn (gen_push (r10
));
10503 allocate
-= UNITS_PER_WORD
;
10504 if (sp_is_cfa_reg
|| TARGET_SEH
)
10507 m
->fs
.cfa_offset
+= UNITS_PER_WORD
;
10508 RTX_FRAME_RELATED_P (insn
) = 1;
10512 emit_move_insn (eax
, GEN_INT (allocate
));
10513 emit_insn (ix86_gen_allocate_stack_worker (eax
, eax
));
10515 /* Use the fact that AX still contains ALLOCATE. */
10516 adjust_stack_insn
= (Pmode
== DImode
10517 ? gen_pro_epilogue_adjust_stack_di_sub
10518 : gen_pro_epilogue_adjust_stack_si_sub
);
10520 insn
= emit_insn (adjust_stack_insn (stack_pointer_rtx
,
10521 stack_pointer_rtx
, eax
));
10523 if (sp_is_cfa_reg
|| TARGET_SEH
)
10526 m
->fs
.cfa_offset
+= allocate
;
10527 RTX_FRAME_RELATED_P (insn
) = 1;
10528 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
10529 gen_rtx_SET (VOIDmode
, stack_pointer_rtx
,
10530 plus_constant (Pmode
, stack_pointer_rtx
,
10533 m
->fs
.sp_offset
+= allocate
;
10535 if (r10_live
&& eax_live
)
10537 t
= choose_baseaddr (m
->fs
.sp_offset
- allocate
);
10538 emit_move_insn (gen_rtx_REG (word_mode
, R10_REG
),
10539 gen_frame_mem (word_mode
, t
));
10540 t
= choose_baseaddr (m
->fs
.sp_offset
- allocate
- UNITS_PER_WORD
);
10541 emit_move_insn (gen_rtx_REG (word_mode
, AX_REG
),
10542 gen_frame_mem (word_mode
, t
));
10544 else if (eax_live
|| r10_live
)
10546 t
= choose_baseaddr (m
->fs
.sp_offset
- allocate
);
10547 emit_move_insn (gen_rtx_REG (word_mode
,
10548 (eax_live
? AX_REG
: R10_REG
)),
10549 gen_frame_mem (word_mode
, t
));
10552 gcc_assert (m
->fs
.sp_offset
== frame
.stack_pointer_offset
);
10554 /* If we havn't already set up the frame pointer, do so now. */
10555 if (frame_pointer_needed
&& !m
->fs
.fp_valid
)
10557 insn
= ix86_gen_add3 (hard_frame_pointer_rtx
, stack_pointer_rtx
,
10558 GEN_INT (frame
.stack_pointer_offset
10559 - frame
.hard_frame_pointer_offset
));
10560 insn
= emit_insn (insn
);
10561 RTX_FRAME_RELATED_P (insn
) = 1;
10562 add_reg_note (insn
, REG_CFA_ADJUST_CFA
, NULL
);
10564 if (m
->fs
.cfa_reg
== stack_pointer_rtx
)
10565 m
->fs
.cfa_reg
= hard_frame_pointer_rtx
;
10566 m
->fs
.fp_offset
= frame
.hard_frame_pointer_offset
;
10567 m
->fs
.fp_valid
= true;
10570 if (!int_registers_saved
)
10571 ix86_emit_save_regs_using_mov (frame
.reg_save_offset
);
10572 if (!sse_registers_saved
)
10573 ix86_emit_save_sse_regs_using_mov (frame
.sse_reg_save_offset
);
10575 pic_reg_used
= false;
10576 if (pic_offset_table_rtx
10577 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM
)
10580 unsigned int alt_pic_reg_used
= ix86_select_alt_pic_regnum ();
10582 if (alt_pic_reg_used
!= INVALID_REGNUM
)
10583 SET_REGNO (pic_offset_table_rtx
, alt_pic_reg_used
);
10585 pic_reg_used
= true;
10592 if (ix86_cmodel
== CM_LARGE_PIC
)
10594 rtx label
, tmp_reg
;
10596 gcc_assert (Pmode
== DImode
);
10597 label
= gen_label_rtx ();
10598 emit_label (label
);
10599 LABEL_PRESERVE_P (label
) = 1;
10600 tmp_reg
= gen_rtx_REG (Pmode
, R11_REG
);
10601 gcc_assert (REGNO (pic_offset_table_rtx
) != REGNO (tmp_reg
));
10602 insn
= emit_insn (gen_set_rip_rex64 (pic_offset_table_rtx
,
10604 insn
= emit_insn (gen_set_got_offset_rex64 (tmp_reg
, label
));
10605 insn
= emit_insn (ix86_gen_add3 (pic_offset_table_rtx
,
10606 pic_offset_table_rtx
, tmp_reg
));
10609 insn
= emit_insn (gen_set_got_rex64 (pic_offset_table_rtx
));
10613 insn
= emit_insn (gen_set_got (pic_offset_table_rtx
));
10614 RTX_FRAME_RELATED_P (insn
) = 1;
10615 add_reg_note (insn
, REG_CFA_FLUSH_QUEUE
, NULL_RTX
);
10619 /* In the pic_reg_used case, make sure that the got load isn't deleted
10620 when mcount needs it. Blockage to avoid call movement across mcount
10621 call is emitted in generic code after the NOTE_INSN_PROLOGUE_END
10623 if (crtl
->profile
&& !flag_fentry
&& pic_reg_used
)
10624 emit_insn (gen_prologue_use (pic_offset_table_rtx
));
10626 if (crtl
->drap_reg
&& !crtl
->stack_realign_needed
)
10628 /* vDRAP is setup but after reload it turns out stack realign
10629 isn't necessary, here we will emit prologue to setup DRAP
10630 without stack realign adjustment */
10631 t
= choose_baseaddr (0);
10632 emit_insn (gen_rtx_SET (VOIDmode
, crtl
->drap_reg
, t
));
10635 /* Prevent instructions from being scheduled into register save push
10636 sequence when access to the redzone area is done through frame pointer.
10637 The offset between the frame pointer and the stack pointer is calculated
10638 relative to the value of the stack pointer at the end of the function
10639 prologue, and moving instructions that access redzone area via frame
10640 pointer inside push sequence violates this assumption. */
10641 if (frame_pointer_needed
&& frame
.red_zone_size
)
10642 emit_insn (gen_memory_blockage ());
10644 /* Emit cld instruction if stringops are used in the function. */
10645 if (TARGET_CLD
&& ix86_current_function_needs_cld
)
10646 emit_insn (gen_cld ());
10648 /* SEH requires that the prologue end within 256 bytes of the start of
10649 the function. Prevent instruction schedules that would extend that.
10650 Further, prevent alloca modifications to the stack pointer from being
10651 combined with prologue modifications. */
10653 emit_insn (gen_prologue_use (stack_pointer_rtx
));
10656 /* Emit code to restore REG using a POP insn. */
10659 ix86_emit_restore_reg_using_pop (rtx reg
)
10661 struct machine_function
*m
= cfun
->machine
;
10662 rtx insn
= emit_insn (gen_pop (reg
));
10664 ix86_add_cfa_restore_note (insn
, reg
, m
->fs
.sp_offset
);
10665 m
->fs
.sp_offset
-= UNITS_PER_WORD
;
10667 if (m
->fs
.cfa_reg
== crtl
->drap_reg
10668 && REGNO (reg
) == REGNO (crtl
->drap_reg
))
10670 /* Previously we'd represented the CFA as an expression
10671 like *(%ebp - 8). We've just popped that value from
10672 the stack, which means we need to reset the CFA to
10673 the drap register. This will remain until we restore
10674 the stack pointer. */
10675 add_reg_note (insn
, REG_CFA_DEF_CFA
, reg
);
10676 RTX_FRAME_RELATED_P (insn
) = 1;
10678 /* This means that the DRAP register is valid for addressing too. */
10679 m
->fs
.drap_valid
= true;
10683 if (m
->fs
.cfa_reg
== stack_pointer_rtx
)
10685 rtx x
= plus_constant (Pmode
, stack_pointer_rtx
, UNITS_PER_WORD
);
10686 x
= gen_rtx_SET (VOIDmode
, stack_pointer_rtx
, x
);
10687 add_reg_note (insn
, REG_CFA_ADJUST_CFA
, x
);
10688 RTX_FRAME_RELATED_P (insn
) = 1;
10690 m
->fs
.cfa_offset
-= UNITS_PER_WORD
;
10693 /* When the frame pointer is the CFA, and we pop it, we are
10694 swapping back to the stack pointer as the CFA. This happens
10695 for stack frames that don't allocate other data, so we assume
10696 the stack pointer is now pointing at the return address, i.e.
10697 the function entry state, which makes the offset be 1 word. */
10698 if (reg
== hard_frame_pointer_rtx
)
10700 m
->fs
.fp_valid
= false;
10701 if (m
->fs
.cfa_reg
== hard_frame_pointer_rtx
)
10703 m
->fs
.cfa_reg
= stack_pointer_rtx
;
10704 m
->fs
.cfa_offset
-= UNITS_PER_WORD
;
10706 add_reg_note (insn
, REG_CFA_DEF_CFA
,
10707 gen_rtx_PLUS (Pmode
, stack_pointer_rtx
,
10708 GEN_INT (m
->fs
.cfa_offset
)));
10709 RTX_FRAME_RELATED_P (insn
) = 1;
10714 /* Emit code to restore saved registers using POP insns. */
10717 ix86_emit_restore_regs_using_pop (void)
10719 unsigned int regno
;
10721 for (regno
= 0; regno
< FIRST_PSEUDO_REGISTER
; regno
++)
10722 if (!SSE_REGNO_P (regno
) && ix86_save_reg (regno
, false))
10723 ix86_emit_restore_reg_using_pop (gen_rtx_REG (word_mode
, regno
));
10726 /* Emit code and notes for the LEAVE instruction. */
10729 ix86_emit_leave (void)
10731 struct machine_function
*m
= cfun
->machine
;
10732 rtx insn
= emit_insn (ix86_gen_leave ());
10734 ix86_add_queued_cfa_restore_notes (insn
);
10736 gcc_assert (m
->fs
.fp_valid
);
10737 m
->fs
.sp_valid
= true;
10738 m
->fs
.sp_offset
= m
->fs
.fp_offset
- UNITS_PER_WORD
;
10739 m
->fs
.fp_valid
= false;
10741 if (m
->fs
.cfa_reg
== hard_frame_pointer_rtx
)
10743 m
->fs
.cfa_reg
= stack_pointer_rtx
;
10744 m
->fs
.cfa_offset
= m
->fs
.sp_offset
;
10746 add_reg_note (insn
, REG_CFA_DEF_CFA
,
10747 plus_constant (Pmode
, stack_pointer_rtx
,
10749 RTX_FRAME_RELATED_P (insn
) = 1;
10751 ix86_add_cfa_restore_note (insn
, hard_frame_pointer_rtx
,
10755 /* Emit code to restore saved registers using MOV insns.
10756 First register is restored from CFA - CFA_OFFSET. */
10758 ix86_emit_restore_regs_using_mov (HOST_WIDE_INT cfa_offset
,
10759 bool maybe_eh_return
)
10761 struct machine_function
*m
= cfun
->machine
;
10762 unsigned int regno
;
10764 for (regno
= 0; regno
< FIRST_PSEUDO_REGISTER
; regno
++)
10765 if (!SSE_REGNO_P (regno
) && ix86_save_reg (regno
, maybe_eh_return
))
10767 rtx reg
= gen_rtx_REG (word_mode
, regno
);
10770 mem
= choose_baseaddr (cfa_offset
);
10771 mem
= gen_frame_mem (word_mode
, mem
);
10772 insn
= emit_move_insn (reg
, mem
);
10774 if (m
->fs
.cfa_reg
== crtl
->drap_reg
&& regno
== REGNO (crtl
->drap_reg
))
10776 /* Previously we'd represented the CFA as an expression
10777 like *(%ebp - 8). We've just popped that value from
10778 the stack, which means we need to reset the CFA to
10779 the drap register. This will remain until we restore
10780 the stack pointer. */
10781 add_reg_note (insn
, REG_CFA_DEF_CFA
, reg
);
10782 RTX_FRAME_RELATED_P (insn
) = 1;
10784 /* This means that the DRAP register is valid for addressing. */
10785 m
->fs
.drap_valid
= true;
10788 ix86_add_cfa_restore_note (NULL_RTX
, reg
, cfa_offset
);
10790 cfa_offset
-= UNITS_PER_WORD
;
10794 /* Emit code to restore saved registers using MOV insns.
10795 First register is restored from CFA - CFA_OFFSET. */
10797 ix86_emit_restore_sse_regs_using_mov (HOST_WIDE_INT cfa_offset
,
10798 bool maybe_eh_return
)
10800 unsigned int regno
;
10802 for (regno
= 0; regno
< FIRST_PSEUDO_REGISTER
; regno
++)
10803 if (SSE_REGNO_P (regno
) && ix86_save_reg (regno
, maybe_eh_return
))
10805 rtx reg
= gen_rtx_REG (V4SFmode
, regno
);
10808 mem
= choose_baseaddr (cfa_offset
);
10809 mem
= gen_rtx_MEM (V4SFmode
, mem
);
10810 set_mem_align (mem
, 128);
10811 emit_move_insn (reg
, mem
);
10813 ix86_add_cfa_restore_note (NULL_RTX
, reg
, cfa_offset
);
10819 /* Restore function stack, frame, and registers. */
10822 ix86_expand_epilogue (int style
)
10824 struct machine_function
*m
= cfun
->machine
;
10825 struct machine_frame_state frame_state_save
= m
->fs
;
10826 struct ix86_frame frame
;
10827 bool restore_regs_via_mov
;
10830 ix86_finalize_stack_realign_flags ();
10831 ix86_compute_frame_layout (&frame
);
10833 m
->fs
.sp_valid
= (!frame_pointer_needed
10834 || (crtl
->sp_is_unchanging
10835 && !stack_realign_fp
));
10836 gcc_assert (!m
->fs
.sp_valid
10837 || m
->fs
.sp_offset
== frame
.stack_pointer_offset
);
10839 /* The FP must be valid if the frame pointer is present. */
10840 gcc_assert (frame_pointer_needed
== m
->fs
.fp_valid
);
10841 gcc_assert (!m
->fs
.fp_valid
10842 || m
->fs
.fp_offset
== frame
.hard_frame_pointer_offset
);
10844 /* We must have *some* valid pointer to the stack frame. */
10845 gcc_assert (m
->fs
.sp_valid
|| m
->fs
.fp_valid
);
10847 /* The DRAP is never valid at this point. */
10848 gcc_assert (!m
->fs
.drap_valid
);
10850 /* See the comment about red zone and frame
10851 pointer usage in ix86_expand_prologue. */
10852 if (frame_pointer_needed
&& frame
.red_zone_size
)
10853 emit_insn (gen_memory_blockage ());
10855 using_drap
= crtl
->drap_reg
&& crtl
->stack_realign_needed
;
10856 gcc_assert (!using_drap
|| m
->fs
.cfa_reg
== crtl
->drap_reg
);
10858 /* Determine the CFA offset of the end of the red-zone. */
10859 m
->fs
.red_zone_offset
= 0;
10860 if (ix86_using_red_zone () && crtl
->args
.pops_args
< 65536)
10862 /* The red-zone begins below the return address. */
10863 m
->fs
.red_zone_offset
= RED_ZONE_SIZE
+ UNITS_PER_WORD
;
10865 /* When the register save area is in the aligned portion of
10866 the stack, determine the maximum runtime displacement that
10867 matches up with the aligned frame. */
10868 if (stack_realign_drap
)
10869 m
->fs
.red_zone_offset
-= (crtl
->stack_alignment_needed
/ BITS_PER_UNIT
10873 /* Special care must be taken for the normal return case of a function
10874 using eh_return: the eax and edx registers are marked as saved, but
10875 not restored along this path. Adjust the save location to match. */
10876 if (crtl
->calls_eh_return
&& style
!= 2)
10877 frame
.reg_save_offset
-= 2 * UNITS_PER_WORD
;
10879 /* EH_RETURN requires the use of moves to function properly. */
10880 if (crtl
->calls_eh_return
)
10881 restore_regs_via_mov
= true;
10882 /* SEH requires the use of pops to identify the epilogue. */
10883 else if (TARGET_SEH
)
10884 restore_regs_via_mov
= false;
10885 /* If we're only restoring one register and sp is not valid then
10886 using a move instruction to restore the register since it's
10887 less work than reloading sp and popping the register. */
10888 else if (!m
->fs
.sp_valid
&& frame
.nregs
<= 1)
10889 restore_regs_via_mov
= true;
10890 else if (TARGET_EPILOGUE_USING_MOVE
10891 && cfun
->machine
->use_fast_prologue_epilogue
10892 && (frame
.nregs
> 1
10893 || m
->fs
.sp_offset
!= frame
.reg_save_offset
))
10894 restore_regs_via_mov
= true;
10895 else if (frame_pointer_needed
10897 && m
->fs
.sp_offset
!= frame
.reg_save_offset
)
10898 restore_regs_via_mov
= true;
10899 else if (frame_pointer_needed
10900 && TARGET_USE_LEAVE
10901 && cfun
->machine
->use_fast_prologue_epilogue
10902 && frame
.nregs
== 1)
10903 restore_regs_via_mov
= true;
10905 restore_regs_via_mov
= false;
10907 if (restore_regs_via_mov
|| frame
.nsseregs
)
10909 /* Ensure that the entire register save area is addressable via
10910 the stack pointer, if we will restore via sp. */
10912 && m
->fs
.sp_offset
> 0x7fffffff
10913 && !(m
->fs
.fp_valid
|| m
->fs
.drap_valid
)
10914 && (frame
.nsseregs
+ frame
.nregs
) != 0)
10916 pro_epilogue_adjust_stack (stack_pointer_rtx
, stack_pointer_rtx
,
10917 GEN_INT (m
->fs
.sp_offset
10918 - frame
.sse_reg_save_offset
),
10920 m
->fs
.cfa_reg
== stack_pointer_rtx
);
10924 /* If there are any SSE registers to restore, then we have to do it
10925 via moves, since there's obviously no pop for SSE regs. */
10926 if (frame
.nsseregs
)
10927 ix86_emit_restore_sse_regs_using_mov (frame
.sse_reg_save_offset
,
10930 if (restore_regs_via_mov
)
10935 ix86_emit_restore_regs_using_mov (frame
.reg_save_offset
, style
== 2);
10937 /* eh_return epilogues need %ecx added to the stack pointer. */
10940 rtx insn
, sa
= EH_RETURN_STACKADJ_RTX
;
10942 /* Stack align doesn't work with eh_return. */
10943 gcc_assert (!stack_realign_drap
);
10944 /* Neither does regparm nested functions. */
10945 gcc_assert (!ix86_static_chain_on_stack
);
10947 if (frame_pointer_needed
)
10949 t
= gen_rtx_PLUS (Pmode
, hard_frame_pointer_rtx
, sa
);
10950 t
= plus_constant (Pmode
, t
, m
->fs
.fp_offset
- UNITS_PER_WORD
);
10951 emit_insn (gen_rtx_SET (VOIDmode
, sa
, t
));
10953 t
= gen_frame_mem (Pmode
, hard_frame_pointer_rtx
);
10954 insn
= emit_move_insn (hard_frame_pointer_rtx
, t
);
10956 /* Note that we use SA as a temporary CFA, as the return
10957 address is at the proper place relative to it. We
10958 pretend this happens at the FP restore insn because
10959 prior to this insn the FP would be stored at the wrong
10960 offset relative to SA, and after this insn we have no
10961 other reasonable register to use for the CFA. We don't
10962 bother resetting the CFA to the SP for the duration of
10963 the return insn. */
10964 add_reg_note (insn
, REG_CFA_DEF_CFA
,
10965 plus_constant (Pmode
, sa
, UNITS_PER_WORD
));
10966 ix86_add_queued_cfa_restore_notes (insn
);
10967 add_reg_note (insn
, REG_CFA_RESTORE
, hard_frame_pointer_rtx
);
10968 RTX_FRAME_RELATED_P (insn
) = 1;
10970 m
->fs
.cfa_reg
= sa
;
10971 m
->fs
.cfa_offset
= UNITS_PER_WORD
;
10972 m
->fs
.fp_valid
= false;
10974 pro_epilogue_adjust_stack (stack_pointer_rtx
, sa
,
10975 const0_rtx
, style
, false);
10979 t
= gen_rtx_PLUS (Pmode
, stack_pointer_rtx
, sa
);
10980 t
= plus_constant (Pmode
, t
, m
->fs
.sp_offset
- UNITS_PER_WORD
);
10981 insn
= emit_insn (gen_rtx_SET (VOIDmode
, stack_pointer_rtx
, t
));
10982 ix86_add_queued_cfa_restore_notes (insn
);
10984 gcc_assert (m
->fs
.cfa_reg
== stack_pointer_rtx
);
10985 if (m
->fs
.cfa_offset
!= UNITS_PER_WORD
)
10987 m
->fs
.cfa_offset
= UNITS_PER_WORD
;
10988 add_reg_note (insn
, REG_CFA_DEF_CFA
,
10989 plus_constant (Pmode
, stack_pointer_rtx
,
10991 RTX_FRAME_RELATED_P (insn
) = 1;
10994 m
->fs
.sp_offset
= UNITS_PER_WORD
;
10995 m
->fs
.sp_valid
= true;
11000 /* SEH requires that the function end with (1) a stack adjustment
11001 if necessary, (2) a sequence of pops, and (3) a return or
11002 jump instruction. Prevent insns from the function body from
11003 being scheduled into this sequence. */
11006 /* Prevent a catch region from being adjacent to the standard
11007 epilogue sequence. Unfortuantely crtl->uses_eh_lsda nor
11008 several other flags that would be interesting to test are
11010 if (flag_non_call_exceptions
)
11011 emit_insn (gen_nops (const1_rtx
));
11013 emit_insn (gen_blockage ());
11016 /* First step is to deallocate the stack frame so that we can
11017 pop the registers. Also do it on SEH target for very large
11018 frame as the emitted instructions aren't allowed by the ABI in
11020 if (!m
->fs
.sp_valid
11022 && (m
->fs
.sp_offset
- frame
.reg_save_offset
11023 >= SEH_MAX_FRAME_SIZE
)))
11025 pro_epilogue_adjust_stack (stack_pointer_rtx
, hard_frame_pointer_rtx
,
11026 GEN_INT (m
->fs
.fp_offset
11027 - frame
.reg_save_offset
),
11030 else if (m
->fs
.sp_offset
!= frame
.reg_save_offset
)
11032 pro_epilogue_adjust_stack (stack_pointer_rtx
, stack_pointer_rtx
,
11033 GEN_INT (m
->fs
.sp_offset
11034 - frame
.reg_save_offset
),
11036 m
->fs
.cfa_reg
== stack_pointer_rtx
);
11039 ix86_emit_restore_regs_using_pop ();
11042 /* If we used a stack pointer and haven't already got rid of it,
11044 if (m
->fs
.fp_valid
)
11046 /* If the stack pointer is valid and pointing at the frame
11047 pointer store address, then we only need a pop. */
11048 if (m
->fs
.sp_valid
&& m
->fs
.sp_offset
== frame
.hfp_save_offset
)
11049 ix86_emit_restore_reg_using_pop (hard_frame_pointer_rtx
);
11050 /* Leave results in shorter dependency chains on CPUs that are
11051 able to grok it fast. */
11052 else if (TARGET_USE_LEAVE
11053 || optimize_function_for_size_p (cfun
)
11054 || !cfun
->machine
->use_fast_prologue_epilogue
)
11055 ix86_emit_leave ();
11058 pro_epilogue_adjust_stack (stack_pointer_rtx
,
11059 hard_frame_pointer_rtx
,
11060 const0_rtx
, style
, !using_drap
);
11061 ix86_emit_restore_reg_using_pop (hard_frame_pointer_rtx
);
11067 int param_ptr_offset
= UNITS_PER_WORD
;
11070 gcc_assert (stack_realign_drap
);
11072 if (ix86_static_chain_on_stack
)
11073 param_ptr_offset
+= UNITS_PER_WORD
;
11074 if (!call_used_regs
[REGNO (crtl
->drap_reg
)])
11075 param_ptr_offset
+= UNITS_PER_WORD
;
11077 insn
= emit_insn (gen_rtx_SET
11078 (VOIDmode
, stack_pointer_rtx
,
11079 gen_rtx_PLUS (Pmode
,
11081 GEN_INT (-param_ptr_offset
))));
11082 m
->fs
.cfa_reg
= stack_pointer_rtx
;
11083 m
->fs
.cfa_offset
= param_ptr_offset
;
11084 m
->fs
.sp_offset
= param_ptr_offset
;
11085 m
->fs
.realigned
= false;
11087 add_reg_note (insn
, REG_CFA_DEF_CFA
,
11088 gen_rtx_PLUS (Pmode
, stack_pointer_rtx
,
11089 GEN_INT (param_ptr_offset
)));
11090 RTX_FRAME_RELATED_P (insn
) = 1;
11092 if (!call_used_regs
[REGNO (crtl
->drap_reg
)])
11093 ix86_emit_restore_reg_using_pop (crtl
->drap_reg
);
11096 /* At this point the stack pointer must be valid, and we must have
11097 restored all of the registers. We may not have deallocated the
11098 entire stack frame. We've delayed this until now because it may
11099 be possible to merge the local stack deallocation with the
11100 deallocation forced by ix86_static_chain_on_stack. */
11101 gcc_assert (m
->fs
.sp_valid
);
11102 gcc_assert (!m
->fs
.fp_valid
);
11103 gcc_assert (!m
->fs
.realigned
);
11104 if (m
->fs
.sp_offset
!= UNITS_PER_WORD
)
11106 pro_epilogue_adjust_stack (stack_pointer_rtx
, stack_pointer_rtx
,
11107 GEN_INT (m
->fs
.sp_offset
- UNITS_PER_WORD
),
11111 ix86_add_queued_cfa_restore_notes (get_last_insn ());
11113 /* Sibcall epilogues don't want a return instruction. */
11116 m
->fs
= frame_state_save
;
11120 if (crtl
->args
.pops_args
&& crtl
->args
.size
)
11122 rtx popc
= GEN_INT (crtl
->args
.pops_args
);
11124 /* i386 can only pop 64K bytes. If asked to pop more, pop return
11125 address, do explicit add, and jump indirectly to the caller. */
11127 if (crtl
->args
.pops_args
>= 65536)
11129 rtx ecx
= gen_rtx_REG (SImode
, CX_REG
);
11132 /* There is no "pascal" calling convention in any 64bit ABI. */
11133 gcc_assert (!TARGET_64BIT
);
11135 insn
= emit_insn (gen_pop (ecx
));
11136 m
->fs
.cfa_offset
-= UNITS_PER_WORD
;
11137 m
->fs
.sp_offset
-= UNITS_PER_WORD
;
11139 add_reg_note (insn
, REG_CFA_ADJUST_CFA
,
11140 copy_rtx (XVECEXP (PATTERN (insn
), 0, 1)));
11141 add_reg_note (insn
, REG_CFA_REGISTER
,
11142 gen_rtx_SET (VOIDmode
, ecx
, pc_rtx
));
11143 RTX_FRAME_RELATED_P (insn
) = 1;
11145 pro_epilogue_adjust_stack (stack_pointer_rtx
, stack_pointer_rtx
,
11147 emit_jump_insn (gen_simple_return_indirect_internal (ecx
));
11150 emit_jump_insn (gen_simple_return_pop_internal (popc
));
11153 emit_jump_insn (gen_simple_return_internal ());
11155 /* Restore the state back to the state from the prologue,
11156 so that it's correct for the next epilogue. */
11157 m
->fs
= frame_state_save
;
11160 /* Reset from the function's potential modifications. */
11163 ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED
,
11164 HOST_WIDE_INT size ATTRIBUTE_UNUSED
)
11166 if (pic_offset_table_rtx
)
11167 SET_REGNO (pic_offset_table_rtx
, REAL_PIC_OFFSET_TABLE_REGNUM
);
11169 /* Mach-O doesn't support labels at the end of objects, so if
11170 it looks like we might want one, insert a NOP. */
11172 rtx insn
= get_last_insn ();
11173 rtx deleted_debug_label
= NULL_RTX
;
11176 && NOTE_KIND (insn
) != NOTE_INSN_DELETED_LABEL
)
11178 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
11179 notes only, instead set their CODE_LABEL_NUMBER to -1,
11180 otherwise there would be code generation differences
11181 in between -g and -g0. */
11182 if (NOTE_P (insn
) && NOTE_KIND (insn
) == NOTE_INSN_DELETED_DEBUG_LABEL
)
11183 deleted_debug_label
= insn
;
11184 insn
= PREV_INSN (insn
);
11189 && NOTE_KIND (insn
) == NOTE_INSN_DELETED_LABEL
)))
11190 fputs ("\tnop\n", file
);
11191 else if (deleted_debug_label
)
11192 for (insn
= deleted_debug_label
; insn
; insn
= NEXT_INSN (insn
))
11193 if (NOTE_KIND (insn
) == NOTE_INSN_DELETED_DEBUG_LABEL
)
11194 CODE_LABEL_NUMBER (insn
) = -1;
11200 /* Return a scratch register to use in the split stack prologue. The
11201 split stack prologue is used for -fsplit-stack. It is the first
11202 instructions in the function, even before the regular prologue.
11203 The scratch register can be any caller-saved register which is not
11204 used for parameters or for the static chain. */
11206 static unsigned int
11207 split_stack_prologue_scratch_regno (void)
11213 bool is_fastcall
, is_thiscall
;
11216 is_fastcall
= (lookup_attribute ("fastcall",
11217 TYPE_ATTRIBUTES (TREE_TYPE (cfun
->decl
)))
11219 is_thiscall
= (lookup_attribute ("thiscall",
11220 TYPE_ATTRIBUTES (TREE_TYPE (cfun
->decl
)))
11222 regparm
= ix86_function_regparm (TREE_TYPE (cfun
->decl
), cfun
->decl
);
11226 if (DECL_STATIC_CHAIN (cfun
->decl
))
11228 sorry ("-fsplit-stack does not support fastcall with "
11229 "nested function");
11230 return INVALID_REGNUM
;
11234 else if (is_thiscall
)
11236 if (!DECL_STATIC_CHAIN (cfun
->decl
))
11240 else if (regparm
< 3)
11242 if (!DECL_STATIC_CHAIN (cfun
->decl
))
11248 sorry ("-fsplit-stack does not support 2 register "
11249 " parameters for a nested function");
11250 return INVALID_REGNUM
;
11257 /* FIXME: We could make this work by pushing a register
11258 around the addition and comparison. */
11259 sorry ("-fsplit-stack does not support 3 register parameters");
11260 return INVALID_REGNUM
;
11265 /* A SYMBOL_REF for the function which allocates new stackspace for
11268 static GTY(()) rtx split_stack_fn
;
11270 /* A SYMBOL_REF for the more stack function when using the large
11273 static GTY(()) rtx split_stack_fn_large
;
11275 /* Handle -fsplit-stack. These are the first instructions in the
11276 function, even before the regular prologue. */
11279 ix86_expand_split_stack_prologue (void)
11281 struct ix86_frame frame
;
11282 HOST_WIDE_INT allocate
;
11283 unsigned HOST_WIDE_INT args_size
;
11284 rtx label
, limit
, current
, jump_insn
, allocate_rtx
, call_insn
, call_fusage
;
11285 rtx scratch_reg
= NULL_RTX
;
11286 rtx varargs_label
= NULL_RTX
;
11289 gcc_assert (flag_split_stack
&& reload_completed
);
11291 ix86_finalize_stack_realign_flags ();
11292 ix86_compute_frame_layout (&frame
);
11293 allocate
= frame
.stack_pointer_offset
- INCOMING_FRAME_SP_OFFSET
;
11295 /* This is the label we will branch to if we have enough stack
11296 space. We expect the basic block reordering pass to reverse this
11297 branch if optimizing, so that we branch in the unlikely case. */
11298 label
= gen_label_rtx ();
11300 /* We need to compare the stack pointer minus the frame size with
11301 the stack boundary in the TCB. The stack boundary always gives
11302 us SPLIT_STACK_AVAILABLE bytes, so if we need less than that we
11303 can compare directly. Otherwise we need to do an addition. */
11305 limit
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, const0_rtx
),
11306 UNSPEC_STACK_CHECK
);
11307 limit
= gen_rtx_CONST (Pmode
, limit
);
11308 limit
= gen_rtx_MEM (Pmode
, limit
);
11309 if (allocate
< SPLIT_STACK_AVAILABLE
)
11310 current
= stack_pointer_rtx
;
11313 unsigned int scratch_regno
;
11316 /* We need a scratch register to hold the stack pointer minus
11317 the required frame size. Since this is the very start of the
11318 function, the scratch register can be any caller-saved
11319 register which is not used for parameters. */
11320 offset
= GEN_INT (- allocate
);
11321 scratch_regno
= split_stack_prologue_scratch_regno ();
11322 if (scratch_regno
== INVALID_REGNUM
)
11324 scratch_reg
= gen_rtx_REG (Pmode
, scratch_regno
);
11325 if (!TARGET_64BIT
|| x86_64_immediate_operand (offset
, Pmode
))
11327 /* We don't use ix86_gen_add3 in this case because it will
11328 want to split to lea, but when not optimizing the insn
11329 will not be split after this point. */
11330 emit_insn (gen_rtx_SET (VOIDmode
, scratch_reg
,
11331 gen_rtx_PLUS (Pmode
, stack_pointer_rtx
,
11336 emit_move_insn (scratch_reg
, offset
);
11337 emit_insn (ix86_gen_add3 (scratch_reg
, scratch_reg
,
11338 stack_pointer_rtx
));
11340 current
= scratch_reg
;
11343 ix86_expand_branch (GEU
, current
, limit
, label
);
11344 jump_insn
= get_last_insn ();
11345 JUMP_LABEL (jump_insn
) = label
;
11347 /* Mark the jump as very likely to be taken. */
11348 add_reg_note (jump_insn
, REG_BR_PROB
,
11349 GEN_INT (REG_BR_PROB_BASE
- REG_BR_PROB_BASE
/ 100));
11351 if (split_stack_fn
== NULL_RTX
)
11352 split_stack_fn
= gen_rtx_SYMBOL_REF (Pmode
, "__morestack");
11353 fn
= split_stack_fn
;
11355 /* Get more stack space. We pass in the desired stack space and the
11356 size of the arguments to copy to the new stack. In 32-bit mode
11357 we push the parameters; __morestack will return on a new stack
11358 anyhow. In 64-bit mode we pass the parameters in r10 and
11360 allocate_rtx
= GEN_INT (allocate
);
11361 args_size
= crtl
->args
.size
>= 0 ? crtl
->args
.size
: 0;
11362 call_fusage
= NULL_RTX
;
11367 reg10
= gen_rtx_REG (Pmode
, R10_REG
);
11368 reg11
= gen_rtx_REG (Pmode
, R11_REG
);
11370 /* If this function uses a static chain, it will be in %r10.
11371 Preserve it across the call to __morestack. */
11372 if (DECL_STATIC_CHAIN (cfun
->decl
))
11376 rax
= gen_rtx_REG (word_mode
, AX_REG
);
11377 emit_move_insn (rax
, gen_rtx_REG (word_mode
, R10_REG
));
11378 use_reg (&call_fusage
, rax
);
11381 if (ix86_cmodel
== CM_LARGE
|| ix86_cmodel
== CM_LARGE_PIC
)
11383 HOST_WIDE_INT argval
;
11385 gcc_assert (Pmode
== DImode
);
11386 /* When using the large model we need to load the address
11387 into a register, and we've run out of registers. So we
11388 switch to a different calling convention, and we call a
11389 different function: __morestack_large. We pass the
11390 argument size in the upper 32 bits of r10 and pass the
11391 frame size in the lower 32 bits. */
11392 gcc_assert ((allocate
& (HOST_WIDE_INT
) 0xffffffff) == allocate
);
11393 gcc_assert ((args_size
& 0xffffffff) == args_size
);
11395 if (split_stack_fn_large
== NULL_RTX
)
11396 split_stack_fn_large
=
11397 gen_rtx_SYMBOL_REF (Pmode
, "__morestack_large_model");
11399 if (ix86_cmodel
== CM_LARGE_PIC
)
11403 label
= gen_label_rtx ();
11404 emit_label (label
);
11405 LABEL_PRESERVE_P (label
) = 1;
11406 emit_insn (gen_set_rip_rex64 (reg10
, label
));
11407 emit_insn (gen_set_got_offset_rex64 (reg11
, label
));
11408 emit_insn (ix86_gen_add3 (reg10
, reg10
, reg11
));
11409 x
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, split_stack_fn_large
),
11411 x
= gen_rtx_CONST (Pmode
, x
);
11412 emit_move_insn (reg11
, x
);
11413 x
= gen_rtx_PLUS (Pmode
, reg10
, reg11
);
11414 x
= gen_const_mem (Pmode
, x
);
11415 emit_move_insn (reg11
, x
);
11418 emit_move_insn (reg11
, split_stack_fn_large
);
11422 argval
= ((args_size
<< 16) << 16) + allocate
;
11423 emit_move_insn (reg10
, GEN_INT (argval
));
11427 emit_move_insn (reg10
, allocate_rtx
);
11428 emit_move_insn (reg11
, GEN_INT (args_size
));
11429 use_reg (&call_fusage
, reg11
);
11432 use_reg (&call_fusage
, reg10
);
11436 emit_insn (gen_push (GEN_INT (args_size
)));
11437 emit_insn (gen_push (allocate_rtx
));
11439 call_insn
= ix86_expand_call (NULL_RTX
, gen_rtx_MEM (QImode
, fn
),
11440 GEN_INT (UNITS_PER_WORD
), constm1_rtx
,
11442 add_function_usage_to (call_insn
, call_fusage
);
11444 /* In order to make call/return prediction work right, we now need
11445 to execute a return instruction. See
11446 libgcc/config/i386/morestack.S for the details on how this works.
11448 For flow purposes gcc must not see this as a return
11449 instruction--we need control flow to continue at the subsequent
11450 label. Therefore, we use an unspec. */
11451 gcc_assert (crtl
->args
.pops_args
< 65536);
11452 emit_insn (gen_split_stack_return (GEN_INT (crtl
->args
.pops_args
)));
11454 /* If we are in 64-bit mode and this function uses a static chain,
11455 we saved %r10 in %rax before calling _morestack. */
11456 if (TARGET_64BIT
&& DECL_STATIC_CHAIN (cfun
->decl
))
11457 emit_move_insn (gen_rtx_REG (word_mode
, R10_REG
),
11458 gen_rtx_REG (word_mode
, AX_REG
));
11460 /* If this function calls va_start, we need to store a pointer to
11461 the arguments on the old stack, because they may not have been
11462 all copied to the new stack. At this point the old stack can be
11463 found at the frame pointer value used by __morestack, because
11464 __morestack has set that up before calling back to us. Here we
11465 store that pointer in a scratch register, and in
11466 ix86_expand_prologue we store the scratch register in a stack
11468 if (cfun
->machine
->split_stack_varargs_pointer
!= NULL_RTX
)
11470 unsigned int scratch_regno
;
11474 scratch_regno
= split_stack_prologue_scratch_regno ();
11475 scratch_reg
= gen_rtx_REG (Pmode
, scratch_regno
);
11476 frame_reg
= gen_rtx_REG (Pmode
, BP_REG
);
11480 return address within this function
11481 return address of caller of this function
11483 So we add three words to get to the stack arguments.
11487 return address within this function
11488 first argument to __morestack
11489 second argument to __morestack
11490 return address of caller of this function
11492 So we add five words to get to the stack arguments.
11494 words
= TARGET_64BIT
? 3 : 5;
11495 emit_insn (gen_rtx_SET (VOIDmode
, scratch_reg
,
11496 gen_rtx_PLUS (Pmode
, frame_reg
,
11497 GEN_INT (words
* UNITS_PER_WORD
))));
11499 varargs_label
= gen_label_rtx ();
11500 emit_jump_insn (gen_jump (varargs_label
));
11501 JUMP_LABEL (get_last_insn ()) = varargs_label
;
11506 emit_label (label
);
11507 LABEL_NUSES (label
) = 1;
11509 /* If this function calls va_start, we now have to set the scratch
11510 register for the case where we do not call __morestack. In this
11511 case we need to set it based on the stack pointer. */
11512 if (cfun
->machine
->split_stack_varargs_pointer
!= NULL_RTX
)
11514 emit_insn (gen_rtx_SET (VOIDmode
, scratch_reg
,
11515 gen_rtx_PLUS (Pmode
, stack_pointer_rtx
,
11516 GEN_INT (UNITS_PER_WORD
))));
11518 emit_label (varargs_label
);
11519 LABEL_NUSES (varargs_label
) = 1;
11523 /* We may have to tell the dataflow pass that the split stack prologue
11524 is initializing a scratch register. */
11527 ix86_live_on_entry (bitmap regs
)
11529 if (cfun
->machine
->split_stack_varargs_pointer
!= NULL_RTX
)
11531 gcc_assert (flag_split_stack
);
11532 bitmap_set_bit (regs
, split_stack_prologue_scratch_regno ());
11536 /* Determine if op is suitable SUBREG RTX for address. */
11539 ix86_address_subreg_operand (rtx op
)
11541 enum machine_mode mode
;
11546 mode
= GET_MODE (op
);
11548 if (GET_MODE_CLASS (mode
) != MODE_INT
)
11551 /* Don't allow SUBREGs that span more than a word. It can lead to spill
11552 failures when the register is one word out of a two word structure. */
11553 if (GET_MODE_SIZE (mode
) > UNITS_PER_WORD
)
11556 /* Allow only SUBREGs of non-eliminable hard registers. */
11557 return register_no_elim_operand (op
, mode
);
11560 /* Extract the parts of an RTL expression that is a valid memory address
11561 for an instruction. Return 0 if the structure of the address is
11562 grossly off. Return -1 if the address contains ASHIFT, so it is not
11563 strictly valid, but still used for computing length of lea instruction. */
11566 ix86_decompose_address (rtx addr
, struct ix86_address
*out
)
11568 rtx base
= NULL_RTX
, index
= NULL_RTX
, disp
= NULL_RTX
;
11569 rtx base_reg
, index_reg
;
11570 HOST_WIDE_INT scale
= 1;
11571 rtx scale_rtx
= NULL_RTX
;
11574 enum ix86_address_seg seg
= SEG_DEFAULT
;
11576 /* Allow zero-extended SImode addresses,
11577 they will be emitted with addr32 prefix. */
11578 if (TARGET_64BIT
&& GET_MODE (addr
) == DImode
)
11580 if (GET_CODE (addr
) == ZERO_EXTEND
11581 && GET_MODE (XEXP (addr
, 0)) == SImode
)
11583 addr
= XEXP (addr
, 0);
11584 if (CONST_INT_P (addr
))
11587 else if (GET_CODE (addr
) == AND
11588 && const_32bit_mask (XEXP (addr
, 1), DImode
))
11590 addr
= simplify_gen_subreg (SImode
, XEXP (addr
, 0), DImode
, 0);
11591 if (addr
== NULL_RTX
)
11594 if (CONST_INT_P (addr
))
11599 /* Allow SImode subregs of DImode addresses,
11600 they will be emitted with addr32 prefix. */
11601 if (TARGET_64BIT
&& GET_MODE (addr
) == SImode
)
11603 if (GET_CODE (addr
) == SUBREG
11604 && GET_MODE (SUBREG_REG (addr
)) == DImode
)
11606 addr
= SUBREG_REG (addr
);
11607 if (CONST_INT_P (addr
))
11614 else if (GET_CODE (addr
) == SUBREG
)
11616 if (ix86_address_subreg_operand (SUBREG_REG (addr
)))
11621 else if (GET_CODE (addr
) == PLUS
)
11623 rtx addends
[4], op
;
11631 addends
[n
++] = XEXP (op
, 1);
11634 while (GET_CODE (op
) == PLUS
);
11639 for (i
= n
; i
>= 0; --i
)
11642 switch (GET_CODE (op
))
11647 index
= XEXP (op
, 0);
11648 scale_rtx
= XEXP (op
, 1);
11654 index
= XEXP (op
, 0);
11655 tmp
= XEXP (op
, 1);
11656 if (!CONST_INT_P (tmp
))
11658 scale
= INTVAL (tmp
);
11659 if ((unsigned HOST_WIDE_INT
) scale
> 3)
11661 scale
= 1 << scale
;
11666 if (GET_CODE (op
) != UNSPEC
)
11671 if (XINT (op
, 1) == UNSPEC_TP
11672 && TARGET_TLS_DIRECT_SEG_REFS
11673 && seg
== SEG_DEFAULT
)
11674 seg
= TARGET_64BIT
? SEG_FS
: SEG_GS
;
11680 if (!ix86_address_subreg_operand (SUBREG_REG (op
)))
11707 else if (GET_CODE (addr
) == MULT
)
11709 index
= XEXP (addr
, 0); /* index*scale */
11710 scale_rtx
= XEXP (addr
, 1);
11712 else if (GET_CODE (addr
) == ASHIFT
)
11714 /* We're called for lea too, which implements ashift on occasion. */
11715 index
= XEXP (addr
, 0);
11716 tmp
= XEXP (addr
, 1);
11717 if (!CONST_INT_P (tmp
))
11719 scale
= INTVAL (tmp
);
11720 if ((unsigned HOST_WIDE_INT
) scale
> 3)
11722 scale
= 1 << scale
;
11725 else if (CONST_INT_P (addr
))
11727 if (!x86_64_immediate_operand (addr
, VOIDmode
))
11730 /* Constant addresses are sign extended to 64bit, we have to
11731 prevent addresses from 0x80000000 to 0xffffffff in x32 mode. */
11733 && val_signbit_known_set_p (SImode
, INTVAL (addr
)))
11739 disp
= addr
; /* displacement */
11745 else if (GET_CODE (index
) == SUBREG
11746 && ix86_address_subreg_operand (SUBREG_REG (index
)))
11752 /* Address override works only on the (%reg) part of %fs:(%reg). */
11753 if (seg
!= SEG_DEFAULT
11754 && ((base
&& GET_MODE (base
) != word_mode
)
11755 || (index
&& GET_MODE (index
) != word_mode
)))
11758 /* Extract the integral value of scale. */
11761 if (!CONST_INT_P (scale_rtx
))
11763 scale
= INTVAL (scale_rtx
);
11766 base_reg
= base
&& GET_CODE (base
) == SUBREG
? SUBREG_REG (base
) : base
;
11767 index_reg
= index
&& GET_CODE (index
) == SUBREG
? SUBREG_REG (index
) : index
;
11769 /* Avoid useless 0 displacement. */
11770 if (disp
== const0_rtx
&& (base
|| index
))
11773 /* Allow arg pointer and stack pointer as index if there is not scaling. */
11774 if (base_reg
&& index_reg
&& scale
== 1
11775 && (index_reg
== arg_pointer_rtx
11776 || index_reg
== frame_pointer_rtx
11777 || (REG_P (index_reg
) && REGNO (index_reg
) == STACK_POINTER_REGNUM
)))
11780 tmp
= base
, base
= index
, index
= tmp
;
11781 tmp
= base_reg
, base_reg
= index_reg
, index_reg
= tmp
;
11784 /* Special case: %ebp cannot be encoded as a base without a displacement.
11788 && (base_reg
== hard_frame_pointer_rtx
11789 || base_reg
== frame_pointer_rtx
11790 || base_reg
== arg_pointer_rtx
11791 || (REG_P (base_reg
)
11792 && (REGNO (base_reg
) == HARD_FRAME_POINTER_REGNUM
11793 || REGNO (base_reg
) == R13_REG
))))
11796 /* Special case: on K6, [%esi] makes the instruction vector decoded.
11797 Avoid this by transforming to [%esi+0].
11798 Reload calls address legitimization without cfun defined, so we need
11799 to test cfun for being non-NULL. */
11800 if (TARGET_K6
&& cfun
&& optimize_function_for_speed_p (cfun
)
11801 && base_reg
&& !index_reg
&& !disp
11802 && REG_P (base_reg
) && REGNO (base_reg
) == SI_REG
)
11805 /* Special case: encode reg+reg instead of reg*2. */
11806 if (!base
&& index
&& scale
== 2)
11807 base
= index
, base_reg
= index_reg
, scale
= 1;
11809 /* Special case: scaling cannot be encoded without base or displacement. */
11810 if (!base
&& !disp
&& index
&& scale
!= 1)
11814 out
->index
= index
;
11816 out
->scale
= scale
;
11822 /* Return cost of the memory address x.
11823 For i386, it is better to use a complex address than let gcc copy
11824 the address into a reg and make a new pseudo. But not if the address
11825 requires to two regs - that would mean more pseudos with longer
11828 ix86_address_cost (rtx x
, enum machine_mode mode ATTRIBUTE_UNUSED
,
11829 addr_space_t as ATTRIBUTE_UNUSED
,
11830 bool speed ATTRIBUTE_UNUSED
)
11832 struct ix86_address parts
;
11834 int ok
= ix86_decompose_address (x
, &parts
);
11838 if (parts
.base
&& GET_CODE (parts
.base
) == SUBREG
)
11839 parts
.base
= SUBREG_REG (parts
.base
);
11840 if (parts
.index
&& GET_CODE (parts
.index
) == SUBREG
)
11841 parts
.index
= SUBREG_REG (parts
.index
);
11843 /* Attempt to minimize number of registers in the address. */
11845 && (!REG_P (parts
.base
) || REGNO (parts
.base
) >= FIRST_PSEUDO_REGISTER
))
11847 && (!REG_P (parts
.index
)
11848 || REGNO (parts
.index
) >= FIRST_PSEUDO_REGISTER
)))
11852 && (!REG_P (parts
.base
) || REGNO (parts
.base
) >= FIRST_PSEUDO_REGISTER
)
11854 && (!REG_P (parts
.index
) || REGNO (parts
.index
) >= FIRST_PSEUDO_REGISTER
)
11855 && parts
.base
!= parts
.index
)
11858 /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
11859 since it's predecode logic can't detect the length of instructions
11860 and it degenerates to vector decoded. Increase cost of such
11861 addresses here. The penalty is minimally 2 cycles. It may be worthwhile
11862 to split such addresses or even refuse such addresses at all.
11864 Following addressing modes are affected:
11869 The first and last case may be avoidable by explicitly coding the zero in
11870 memory address, but I don't have AMD-K6 machine handy to check this
11874 && ((!parts
.disp
&& parts
.base
&& parts
.index
&& parts
.scale
!= 1)
11875 || (parts
.disp
&& !parts
.base
&& parts
.index
&& parts
.scale
!= 1)
11876 || (!parts
.disp
&& parts
.base
&& parts
.index
&& parts
.scale
== 1)))
11882 /* Allow {LABEL | SYMBOL}_REF - SYMBOL_REF-FOR-PICBASE for Mach-O as
11883 this is used for to form addresses to local data when -fPIC is in
11887 darwin_local_data_pic (rtx disp
)
11889 return (GET_CODE (disp
) == UNSPEC
11890 && XINT (disp
, 1) == UNSPEC_MACHOPIC_OFFSET
);
11893 /* Determine if a given RTX is a valid constant. We already know this
11894 satisfies CONSTANT_P. */
11897 ix86_legitimate_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED
, rtx x
)
11899 switch (GET_CODE (x
))
11904 if (GET_CODE (x
) == PLUS
)
11906 if (!CONST_INT_P (XEXP (x
, 1)))
11911 if (TARGET_MACHO
&& darwin_local_data_pic (x
))
11914 /* Only some unspecs are valid as "constants". */
11915 if (GET_CODE (x
) == UNSPEC
)
11916 switch (XINT (x
, 1))
11919 case UNSPEC_GOTOFF
:
11920 case UNSPEC_PLTOFF
:
11921 return TARGET_64BIT
;
11923 case UNSPEC_NTPOFF
:
11924 x
= XVECEXP (x
, 0, 0);
11925 return (GET_CODE (x
) == SYMBOL_REF
11926 && SYMBOL_REF_TLS_MODEL (x
) == TLS_MODEL_LOCAL_EXEC
);
11927 case UNSPEC_DTPOFF
:
11928 x
= XVECEXP (x
, 0, 0);
11929 return (GET_CODE (x
) == SYMBOL_REF
11930 && SYMBOL_REF_TLS_MODEL (x
) == TLS_MODEL_LOCAL_DYNAMIC
);
11935 /* We must have drilled down to a symbol. */
11936 if (GET_CODE (x
) == LABEL_REF
)
11938 if (GET_CODE (x
) != SYMBOL_REF
)
11943 /* TLS symbols are never valid. */
11944 if (SYMBOL_REF_TLS_MODEL (x
))
11947 /* DLLIMPORT symbols are never valid. */
11948 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
11949 && SYMBOL_REF_DLLIMPORT_P (x
))
11953 /* mdynamic-no-pic */
11954 if (MACHO_DYNAMIC_NO_PIC_P
)
11955 return machopic_symbol_defined_p (x
);
11960 if (GET_MODE (x
) == TImode
11961 && x
!= CONST0_RTX (TImode
)
11967 if (!standard_sse_constant_p (x
))
11974 /* Otherwise we handle everything else in the move patterns. */
11978 /* Determine if it's legal to put X into the constant pool. This
11979 is not possible for the address of thread-local symbols, which
11980 is checked above. */
11983 ix86_cannot_force_const_mem (enum machine_mode mode
, rtx x
)
11985 /* We can always put integral constants and vectors in memory. */
11986 switch (GET_CODE (x
))
11996 return !ix86_legitimate_constant_p (mode
, x
);
12000 /* Nonzero if the constant value X is a legitimate general operand
12001 when generating PIC code. It is given that flag_pic is on and
12002 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
12005 legitimate_pic_operand_p (rtx x
)
12009 switch (GET_CODE (x
))
12012 inner
= XEXP (x
, 0);
12013 if (GET_CODE (inner
) == PLUS
12014 && CONST_INT_P (XEXP (inner
, 1)))
12015 inner
= XEXP (inner
, 0);
12017 /* Only some unspecs are valid as "constants". */
12018 if (GET_CODE (inner
) == UNSPEC
)
12019 switch (XINT (inner
, 1))
12022 case UNSPEC_GOTOFF
:
12023 case UNSPEC_PLTOFF
:
12024 return TARGET_64BIT
;
12026 x
= XVECEXP (inner
, 0, 0);
12027 return (GET_CODE (x
) == SYMBOL_REF
12028 && SYMBOL_REF_TLS_MODEL (x
) == TLS_MODEL_LOCAL_EXEC
);
12029 case UNSPEC_MACHOPIC_OFFSET
:
12030 return legitimate_pic_address_disp_p (x
);
12038 return legitimate_pic_address_disp_p (x
);
12045 /* Determine if a given CONST RTX is a valid memory displacement
12049 legitimate_pic_address_disp_p (rtx disp
)
12053 /* In 64bit mode we can allow direct addresses of symbols and labels
12054 when they are not dynamic symbols. */
12057 rtx op0
= disp
, op1
;
12059 switch (GET_CODE (disp
))
12065 if (GET_CODE (XEXP (disp
, 0)) != PLUS
)
12067 op0
= XEXP (XEXP (disp
, 0), 0);
12068 op1
= XEXP (XEXP (disp
, 0), 1);
12069 if (!CONST_INT_P (op1
)
12070 || INTVAL (op1
) >= 16*1024*1024
12071 || INTVAL (op1
) < -16*1024*1024)
12073 if (GET_CODE (op0
) == LABEL_REF
)
12075 if (GET_CODE (op0
) == CONST
12076 && GET_CODE (XEXP (op0
, 0)) == UNSPEC
12077 && XINT (XEXP (op0
, 0), 1) == UNSPEC_PCREL
)
12079 if (GET_CODE (op0
) == UNSPEC
12080 && XINT (op0
, 1) == UNSPEC_PCREL
)
12082 if (GET_CODE (op0
) != SYMBOL_REF
)
12087 /* TLS references should always be enclosed in UNSPEC. */
12088 if (SYMBOL_REF_TLS_MODEL (op0
))
12090 if (!SYMBOL_REF_FAR_ADDR_P (op0
) && SYMBOL_REF_LOCAL_P (op0
)
12091 && ix86_cmodel
!= CM_LARGE_PIC
)
12099 if (GET_CODE (disp
) != CONST
)
12101 disp
= XEXP (disp
, 0);
12105 /* We are unsafe to allow PLUS expressions. This limit allowed distance
12106 of GOT tables. We should not need these anyway. */
12107 if (GET_CODE (disp
) != UNSPEC
12108 || (XINT (disp
, 1) != UNSPEC_GOTPCREL
12109 && XINT (disp
, 1) != UNSPEC_GOTOFF
12110 && XINT (disp
, 1) != UNSPEC_PCREL
12111 && XINT (disp
, 1) != UNSPEC_PLTOFF
))
12114 if (GET_CODE (XVECEXP (disp
, 0, 0)) != SYMBOL_REF
12115 && GET_CODE (XVECEXP (disp
, 0, 0)) != LABEL_REF
)
12121 if (GET_CODE (disp
) == PLUS
)
12123 if (!CONST_INT_P (XEXP (disp
, 1)))
12125 disp
= XEXP (disp
, 0);
12129 if (TARGET_MACHO
&& darwin_local_data_pic (disp
))
12132 if (GET_CODE (disp
) != UNSPEC
)
12135 switch (XINT (disp
, 1))
12140 /* We need to check for both symbols and labels because VxWorks loads
12141 text labels with @GOT rather than @GOTOFF. See gotoff_operand for
12143 return (GET_CODE (XVECEXP (disp
, 0, 0)) == SYMBOL_REF
12144 || GET_CODE (XVECEXP (disp
, 0, 0)) == LABEL_REF
);
12145 case UNSPEC_GOTOFF
:
12146 /* Refuse GOTOFF in 64bit mode since it is always 64bit when used.
12147 While ABI specify also 32bit relocation but we don't produce it in
12148 small PIC model at all. */
12149 if ((GET_CODE (XVECEXP (disp
, 0, 0)) == SYMBOL_REF
12150 || GET_CODE (XVECEXP (disp
, 0, 0)) == LABEL_REF
)
12152 return gotoff_operand (XVECEXP (disp
, 0, 0), Pmode
);
12154 case UNSPEC_GOTTPOFF
:
12155 case UNSPEC_GOTNTPOFF
:
12156 case UNSPEC_INDNTPOFF
:
12159 disp
= XVECEXP (disp
, 0, 0);
12160 return (GET_CODE (disp
) == SYMBOL_REF
12161 && SYMBOL_REF_TLS_MODEL (disp
) == TLS_MODEL_INITIAL_EXEC
);
12162 case UNSPEC_NTPOFF
:
12163 disp
= XVECEXP (disp
, 0, 0);
12164 return (GET_CODE (disp
) == SYMBOL_REF
12165 && SYMBOL_REF_TLS_MODEL (disp
) == TLS_MODEL_LOCAL_EXEC
);
12166 case UNSPEC_DTPOFF
:
12167 disp
= XVECEXP (disp
, 0, 0);
12168 return (GET_CODE (disp
) == SYMBOL_REF
12169 && SYMBOL_REF_TLS_MODEL (disp
) == TLS_MODEL_LOCAL_DYNAMIC
);
12175 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
12176 replace the input X, or the original X if no replacement is called for.
12177 The output parameter *WIN is 1 if the calling macro should goto WIN,
12178 0 if it should not. */
12181 ix86_legitimize_reload_address (rtx x
,
12182 enum machine_mode mode ATTRIBUTE_UNUSED
,
12183 int opnum
, int type
,
12184 int ind_levels ATTRIBUTE_UNUSED
)
12186 /* Reload can generate:
12188 (plus:DI (plus:DI (unspec:DI [(const_int 0 [0])] UNSPEC_TP)
12192 This RTX is rejected from ix86_legitimate_address_p due to
12193 non-strictness of base register 97. Following this rejection,
12194 reload pushes all three components into separate registers,
12195 creating invalid memory address RTX.
12197 Following code reloads only the invalid part of the
12198 memory address RTX. */
12200 if (GET_CODE (x
) == PLUS
12201 && REG_P (XEXP (x
, 1))
12202 && GET_CODE (XEXP (x
, 0)) == PLUS
12203 && REG_P (XEXP (XEXP (x
, 0), 1)))
12206 bool something_reloaded
= false;
12208 base
= XEXP (XEXP (x
, 0), 1);
12209 if (!REG_OK_FOR_BASE_STRICT_P (base
))
12211 push_reload (base
, NULL_RTX
, &XEXP (XEXP (x
, 0), 1), NULL
,
12212 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
12213 opnum
, (enum reload_type
) type
);
12214 something_reloaded
= true;
12217 index
= XEXP (x
, 1);
12218 if (!REG_OK_FOR_INDEX_STRICT_P (index
))
12220 push_reload (index
, NULL_RTX
, &XEXP (x
, 1), NULL
,
12221 INDEX_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
12222 opnum
, (enum reload_type
) type
);
12223 something_reloaded
= true;
12226 gcc_assert (something_reloaded
);
12233 /* Recognizes RTL expressions that are valid memory addresses for an
12234 instruction. The MODE argument is the machine mode for the MEM
12235 expression that wants to use this address.
12237 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
12238 convert common non-canonical forms to canonical form so that they will
12242 ix86_legitimate_address_p (enum machine_mode mode ATTRIBUTE_UNUSED
,
12243 rtx addr
, bool strict
)
12245 struct ix86_address parts
;
12246 rtx base
, index
, disp
;
12247 HOST_WIDE_INT scale
;
12249 if (ix86_decompose_address (addr
, &parts
) <= 0)
12250 /* Decomposition failed. */
12254 index
= parts
.index
;
12256 scale
= parts
.scale
;
12258 /* Validate base register. */
12265 else if (GET_CODE (base
) == SUBREG
&& REG_P (SUBREG_REG (base
)))
12266 reg
= SUBREG_REG (base
);
12268 /* Base is not a register. */
12271 if (GET_MODE (base
) != SImode
&& GET_MODE (base
) != DImode
)
12274 if ((strict
&& ! REG_OK_FOR_BASE_STRICT_P (reg
))
12275 || (! strict
&& ! REG_OK_FOR_BASE_NONSTRICT_P (reg
)))
12276 /* Base is not valid. */
12280 /* Validate index register. */
12287 else if (GET_CODE (index
) == SUBREG
&& REG_P (SUBREG_REG (index
)))
12288 reg
= SUBREG_REG (index
);
12290 /* Index is not a register. */
12293 if (GET_MODE (index
) != SImode
&& GET_MODE (index
) != DImode
)
12296 if ((strict
&& ! REG_OK_FOR_INDEX_STRICT_P (reg
))
12297 || (! strict
&& ! REG_OK_FOR_INDEX_NONSTRICT_P (reg
)))
12298 /* Index is not valid. */
12302 /* Index and base should have the same mode. */
12304 && GET_MODE (base
) != GET_MODE (index
))
12307 /* Validate scale factor. */
12311 /* Scale without index. */
12314 if (scale
!= 2 && scale
!= 4 && scale
!= 8)
12315 /* Scale is not a valid multiplier. */
12319 /* Validate displacement. */
12322 if (GET_CODE (disp
) == CONST
12323 && GET_CODE (XEXP (disp
, 0)) == UNSPEC
12324 && XINT (XEXP (disp
, 0), 1) != UNSPEC_MACHOPIC_OFFSET
)
12325 switch (XINT (XEXP (disp
, 0), 1))
12327 /* Refuse GOTOFF and GOT in 64bit mode since it is always 64bit when
12328 used. While ABI specify also 32bit relocations, we don't produce
12329 them at all and use IP relative instead. */
12331 case UNSPEC_GOTOFF
:
12332 gcc_assert (flag_pic
);
12334 goto is_legitimate_pic
;
12336 /* 64bit address unspec. */
12339 case UNSPEC_GOTPCREL
:
12341 gcc_assert (flag_pic
);
12342 goto is_legitimate_pic
;
12344 case UNSPEC_GOTTPOFF
:
12345 case UNSPEC_GOTNTPOFF
:
12346 case UNSPEC_INDNTPOFF
:
12347 case UNSPEC_NTPOFF
:
12348 case UNSPEC_DTPOFF
:
12351 case UNSPEC_STACK_CHECK
:
12352 gcc_assert (flag_split_stack
);
12356 /* Invalid address unspec. */
12360 else if (SYMBOLIC_CONST (disp
)
12364 && MACHOPIC_INDIRECT
12365 && !machopic_operand_p (disp
)
12371 if (TARGET_64BIT
&& (index
|| base
))
12373 /* foo@dtpoff(%rX) is ok. */
12374 if (GET_CODE (disp
) != CONST
12375 || GET_CODE (XEXP (disp
, 0)) != PLUS
12376 || GET_CODE (XEXP (XEXP (disp
, 0), 0)) != UNSPEC
12377 || !CONST_INT_P (XEXP (XEXP (disp
, 0), 1))
12378 || (XINT (XEXP (XEXP (disp
, 0), 0), 1) != UNSPEC_DTPOFF
12379 && XINT (XEXP (XEXP (disp
, 0), 0), 1) != UNSPEC_NTPOFF
))
12380 /* Non-constant pic memory reference. */
12383 else if ((!TARGET_MACHO
|| flag_pic
)
12384 && ! legitimate_pic_address_disp_p (disp
))
12385 /* Displacement is an invalid pic construct. */
12388 else if (MACHO_DYNAMIC_NO_PIC_P
12389 && !ix86_legitimate_constant_p (Pmode
, disp
))
12390 /* displacment must be referenced via non_lazy_pointer */
12394 /* This code used to verify that a symbolic pic displacement
12395 includes the pic_offset_table_rtx register.
12397 While this is good idea, unfortunately these constructs may
12398 be created by "adds using lea" optimization for incorrect
12407 This code is nonsensical, but results in addressing
12408 GOT table with pic_offset_table_rtx base. We can't
12409 just refuse it easily, since it gets matched by
12410 "addsi3" pattern, that later gets split to lea in the
12411 case output register differs from input. While this
12412 can be handled by separate addsi pattern for this case
12413 that never results in lea, this seems to be easier and
12414 correct fix for crash to disable this test. */
12416 else if (GET_CODE (disp
) != LABEL_REF
12417 && !CONST_INT_P (disp
)
12418 && (GET_CODE (disp
) != CONST
12419 || !ix86_legitimate_constant_p (Pmode
, disp
))
12420 && (GET_CODE (disp
) != SYMBOL_REF
12421 || !ix86_legitimate_constant_p (Pmode
, disp
)))
12422 /* Displacement is not constant. */
12424 else if (TARGET_64BIT
12425 && !x86_64_immediate_operand (disp
, VOIDmode
))
12426 /* Displacement is out of range. */
12430 /* Everything looks valid. */
12434 /* Determine if a given RTX is a valid constant address. */
12437 constant_address_p (rtx x
)
12439 return CONSTANT_P (x
) && ix86_legitimate_address_p (Pmode
, x
, 1);
12442 /* Return a unique alias set for the GOT. */
12444 static alias_set_type
12445 ix86_GOT_alias_set (void)
12447 static alias_set_type set
= -1;
12449 set
= new_alias_set ();
12453 /* Return a legitimate reference for ORIG (an address) using the
12454 register REG. If REG is 0, a new pseudo is generated.
12456 There are two types of references that must be handled:
12458 1. Global data references must load the address from the GOT, via
12459 the PIC reg. An insn is emitted to do this load, and the reg is
12462 2. Static data references, constant pool addresses, and code labels
12463 compute the address as an offset from the GOT, whose base is in
12464 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
12465 differentiate them from global data objects. The returned
12466 address is the PIC reg + an unspec constant.
12468 TARGET_LEGITIMATE_ADDRESS_P rejects symbolic references unless the PIC
12469 reg also appears in the address. */
12472 legitimize_pic_address (rtx orig
, rtx reg
)
12475 rtx new_rtx
= orig
;
12478 if (TARGET_MACHO
&& !TARGET_64BIT
)
12481 reg
= gen_reg_rtx (Pmode
);
12482 /* Use the generic Mach-O PIC machinery. */
12483 return machopic_legitimize_pic_address (orig
, GET_MODE (orig
), reg
);
12487 if (TARGET_64BIT
&& legitimate_pic_address_disp_p (addr
))
12489 else if (TARGET_64BIT
12490 && ix86_cmodel
!= CM_SMALL_PIC
12491 && gotoff_operand (addr
, Pmode
))
12494 /* This symbol may be referenced via a displacement from the PIC
12495 base address (@GOTOFF). */
12497 if (reload_in_progress
)
12498 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM
, true);
12499 if (GET_CODE (addr
) == CONST
)
12500 addr
= XEXP (addr
, 0);
12501 if (GET_CODE (addr
) == PLUS
)
12503 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, XEXP (addr
, 0)),
12505 new_rtx
= gen_rtx_PLUS (Pmode
, new_rtx
, XEXP (addr
, 1));
12508 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
), UNSPEC_GOTOFF
);
12509 new_rtx
= gen_rtx_CONST (Pmode
, new_rtx
);
12511 tmpreg
= gen_reg_rtx (Pmode
);
12514 emit_move_insn (tmpreg
, new_rtx
);
12518 new_rtx
= expand_simple_binop (Pmode
, PLUS
, reg
, pic_offset_table_rtx
,
12519 tmpreg
, 1, OPTAB_DIRECT
);
12522 else new_rtx
= gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
, tmpreg
);
12524 else if (!TARGET_64BIT
&& gotoff_operand (addr
, Pmode
))
12526 /* This symbol may be referenced via a displacement from the PIC
12527 base address (@GOTOFF). */
12529 if (reload_in_progress
)
12530 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM
, true);
12531 if (GET_CODE (addr
) == CONST
)
12532 addr
= XEXP (addr
, 0);
12533 if (GET_CODE (addr
) == PLUS
)
12535 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, XEXP (addr
, 0)),
12537 new_rtx
= gen_rtx_PLUS (Pmode
, new_rtx
, XEXP (addr
, 1));
12540 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
), UNSPEC_GOTOFF
);
12541 new_rtx
= gen_rtx_CONST (Pmode
, new_rtx
);
12542 new_rtx
= gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
, new_rtx
);
12546 emit_move_insn (reg
, new_rtx
);
12550 else if ((GET_CODE (addr
) == SYMBOL_REF
&& SYMBOL_REF_TLS_MODEL (addr
) == 0)
12551 /* We can't use @GOTOFF for text labels on VxWorks;
12552 see gotoff_operand. */
12553 || (TARGET_VXWORKS_RTP
&& GET_CODE (addr
) == LABEL_REF
))
12555 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
)
12557 if (GET_CODE (addr
) == SYMBOL_REF
&& SYMBOL_REF_DLLIMPORT_P (addr
))
12558 return legitimize_dllimport_symbol (addr
, true);
12559 if (GET_CODE (addr
) == CONST
&& GET_CODE (XEXP (addr
, 0)) == PLUS
12560 && GET_CODE (XEXP (XEXP (addr
, 0), 0)) == SYMBOL_REF
12561 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (addr
, 0), 0)))
12563 rtx t
= legitimize_dllimport_symbol (XEXP (XEXP (addr
, 0), 0), true);
12564 return gen_rtx_PLUS (Pmode
, t
, XEXP (XEXP (addr
, 0), 1));
12568 /* For x64 PE-COFF there is no GOT table. So we use address
12570 if (TARGET_64BIT
&& DEFAULT_ABI
== MS_ABI
)
12572 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
), UNSPEC_PCREL
);
12573 new_rtx
= gen_rtx_CONST (Pmode
, new_rtx
);
12576 reg
= gen_reg_rtx (Pmode
);
12577 emit_move_insn (reg
, new_rtx
);
12580 else if (TARGET_64BIT
&& ix86_cmodel
!= CM_LARGE_PIC
)
12582 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
), UNSPEC_GOTPCREL
);
12583 new_rtx
= gen_rtx_CONST (Pmode
, new_rtx
);
12584 new_rtx
= gen_const_mem (Pmode
, new_rtx
);
12585 set_mem_alias_set (new_rtx
, ix86_GOT_alias_set ());
12588 reg
= gen_reg_rtx (Pmode
);
12589 /* Use directly gen_movsi, otherwise the address is loaded
12590 into register for CSE. We don't want to CSE this addresses,
12591 instead we CSE addresses from the GOT table, so skip this. */
12592 emit_insn (gen_movsi (reg
, new_rtx
));
12597 /* This symbol must be referenced via a load from the
12598 Global Offset Table (@GOT). */
12600 if (reload_in_progress
)
12601 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM
, true);
12602 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
), UNSPEC_GOT
);
12603 new_rtx
= gen_rtx_CONST (Pmode
, new_rtx
);
12605 new_rtx
= force_reg (Pmode
, new_rtx
);
12606 new_rtx
= gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
, new_rtx
);
12607 new_rtx
= gen_const_mem (Pmode
, new_rtx
);
12608 set_mem_alias_set (new_rtx
, ix86_GOT_alias_set ());
12611 reg
= gen_reg_rtx (Pmode
);
12612 emit_move_insn (reg
, new_rtx
);
12618 if (CONST_INT_P (addr
)
12619 && !x86_64_immediate_operand (addr
, VOIDmode
))
12623 emit_move_insn (reg
, addr
);
12627 new_rtx
= force_reg (Pmode
, addr
);
12629 else if (GET_CODE (addr
) == CONST
)
12631 addr
= XEXP (addr
, 0);
12633 /* We must match stuff we generate before. Assume the only
12634 unspecs that can get here are ours. Not that we could do
12635 anything with them anyway.... */
12636 if (GET_CODE (addr
) == UNSPEC
12637 || (GET_CODE (addr
) == PLUS
12638 && GET_CODE (XEXP (addr
, 0)) == UNSPEC
))
12640 gcc_assert (GET_CODE (addr
) == PLUS
);
12642 if (GET_CODE (addr
) == PLUS
)
12644 rtx op0
= XEXP (addr
, 0), op1
= XEXP (addr
, 1);
12646 /* Check first to see if this is a constant offset from a @GOTOFF
12647 symbol reference. */
12648 if (gotoff_operand (op0
, Pmode
)
12649 && CONST_INT_P (op1
))
12653 if (reload_in_progress
)
12654 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM
, true);
12655 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, op0
),
12657 new_rtx
= gen_rtx_PLUS (Pmode
, new_rtx
, op1
);
12658 new_rtx
= gen_rtx_CONST (Pmode
, new_rtx
);
12659 new_rtx
= gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
, new_rtx
);
12663 emit_move_insn (reg
, new_rtx
);
12669 if (INTVAL (op1
) < -16*1024*1024
12670 || INTVAL (op1
) >= 16*1024*1024)
12672 if (!x86_64_immediate_operand (op1
, Pmode
))
12673 op1
= force_reg (Pmode
, op1
);
12674 new_rtx
= gen_rtx_PLUS (Pmode
, force_reg (Pmode
, op0
), op1
);
12680 rtx base
= legitimize_pic_address (op0
, reg
);
12681 enum machine_mode mode
= GET_MODE (base
);
12683 = legitimize_pic_address (op1
, base
== reg
? NULL_RTX
: reg
);
12685 if (CONST_INT_P (new_rtx
))
12687 if (INTVAL (new_rtx
) < -16*1024*1024
12688 || INTVAL (new_rtx
) >= 16*1024*1024)
12690 if (!x86_64_immediate_operand (new_rtx
, mode
))
12691 new_rtx
= force_reg (mode
, new_rtx
);
12693 = gen_rtx_PLUS (mode
, force_reg (mode
, base
), new_rtx
);
12696 new_rtx
= plus_constant (mode
, base
, INTVAL (new_rtx
));
12700 if (GET_CODE (new_rtx
) == PLUS
12701 && CONSTANT_P (XEXP (new_rtx
, 1)))
12703 base
= gen_rtx_PLUS (mode
, base
, XEXP (new_rtx
, 0));
12704 new_rtx
= XEXP (new_rtx
, 1);
12706 new_rtx
= gen_rtx_PLUS (mode
, base
, new_rtx
);
12714 /* Load the thread pointer. If TO_REG is true, force it into a register. */
12717 get_thread_pointer (enum machine_mode tp_mode
, bool to_reg
)
12719 rtx tp
= gen_rtx_UNSPEC (ptr_mode
, gen_rtvec (1, const0_rtx
), UNSPEC_TP
);
12721 if (GET_MODE (tp
) != tp_mode
)
12723 gcc_assert (GET_MODE (tp
) == SImode
);
12724 gcc_assert (tp_mode
== DImode
);
12726 tp
= gen_rtx_ZERO_EXTEND (tp_mode
, tp
);
12730 tp
= copy_to_mode_reg (tp_mode
, tp
);
12735 /* Construct the SYMBOL_REF for the tls_get_addr function. */
12737 static GTY(()) rtx ix86_tls_symbol
;
12740 ix86_tls_get_addr (void)
12742 if (!ix86_tls_symbol
)
12745 = ((TARGET_ANY_GNU_TLS
&& !TARGET_64BIT
)
12746 ? "___tls_get_addr" : "__tls_get_addr");
12748 ix86_tls_symbol
= gen_rtx_SYMBOL_REF (Pmode
, sym
);
12751 return ix86_tls_symbol
;
12754 /* Construct the SYMBOL_REF for the _TLS_MODULE_BASE_ symbol. */
12756 static GTY(()) rtx ix86_tls_module_base_symbol
;
12759 ix86_tls_module_base (void)
12761 if (!ix86_tls_module_base_symbol
)
12763 ix86_tls_module_base_symbol
12764 = gen_rtx_SYMBOL_REF (Pmode
, "_TLS_MODULE_BASE_");
12766 SYMBOL_REF_FLAGS (ix86_tls_module_base_symbol
)
12767 |= TLS_MODEL_GLOBAL_DYNAMIC
<< SYMBOL_FLAG_TLS_SHIFT
;
12770 return ix86_tls_module_base_symbol
;
12773 /* A subroutine of ix86_legitimize_address and ix86_expand_move. FOR_MOV is
12774 false if we expect this to be used for a memory address and true if
12775 we expect to load the address into a register. */
12778 legitimize_tls_address (rtx x
, enum tls_model model
, bool for_mov
)
12780 rtx dest
, base
, off
;
12781 rtx pic
= NULL_RTX
, tp
= NULL_RTX
;
12782 enum machine_mode tp_mode
= Pmode
;
12787 case TLS_MODEL_GLOBAL_DYNAMIC
:
12788 dest
= gen_reg_rtx (Pmode
);
12793 pic
= pic_offset_table_rtx
;
12796 pic
= gen_reg_rtx (Pmode
);
12797 emit_insn (gen_set_got (pic
));
12801 if (TARGET_GNU2_TLS
)
12804 emit_insn (gen_tls_dynamic_gnu2_64 (dest
, x
));
12806 emit_insn (gen_tls_dynamic_gnu2_32 (dest
, x
, pic
));
12808 tp
= get_thread_pointer (Pmode
, true);
12809 dest
= force_reg (Pmode
, gen_rtx_PLUS (Pmode
, tp
, dest
));
12811 if (GET_MODE (x
) != Pmode
)
12812 x
= gen_rtx_ZERO_EXTEND (Pmode
, x
);
12814 set_unique_reg_note (get_last_insn (), REG_EQUAL
, x
);
12818 rtx caddr
= ix86_tls_get_addr ();
12822 rtx rax
= gen_rtx_REG (Pmode
, AX_REG
);
12827 (ix86_gen_tls_global_dynamic_64 (rax
, x
, caddr
));
12828 insns
= get_insns ();
12831 if (GET_MODE (x
) != Pmode
)
12832 x
= gen_rtx_ZERO_EXTEND (Pmode
, x
);
12834 RTL_CONST_CALL_P (insns
) = 1;
12835 emit_libcall_block (insns
, dest
, rax
, x
);
12838 emit_insn (gen_tls_global_dynamic_32 (dest
, x
, pic
, caddr
));
12842 case TLS_MODEL_LOCAL_DYNAMIC
:
12843 base
= gen_reg_rtx (Pmode
);
12848 pic
= pic_offset_table_rtx
;
12851 pic
= gen_reg_rtx (Pmode
);
12852 emit_insn (gen_set_got (pic
));
12856 if (TARGET_GNU2_TLS
)
12858 rtx tmp
= ix86_tls_module_base ();
12861 emit_insn (gen_tls_dynamic_gnu2_64 (base
, tmp
));
12863 emit_insn (gen_tls_dynamic_gnu2_32 (base
, tmp
, pic
));
12865 tp
= get_thread_pointer (Pmode
, true);
12866 set_unique_reg_note (get_last_insn (), REG_EQUAL
,
12867 gen_rtx_MINUS (Pmode
, tmp
, tp
));
12871 rtx caddr
= ix86_tls_get_addr ();
12875 rtx rax
= gen_rtx_REG (Pmode
, AX_REG
);
12880 (ix86_gen_tls_local_dynamic_base_64 (rax
, caddr
));
12881 insns
= get_insns ();
12884 /* Attach a unique REG_EQUAL, to allow the RTL optimizers to
12885 share the LD_BASE result with other LD model accesses. */
12886 eqv
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, const0_rtx
),
12887 UNSPEC_TLS_LD_BASE
);
12889 RTL_CONST_CALL_P (insns
) = 1;
12890 emit_libcall_block (insns
, base
, rax
, eqv
);
12893 emit_insn (gen_tls_local_dynamic_base_32 (base
, pic
, caddr
));
12896 off
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, x
), UNSPEC_DTPOFF
);
12897 off
= gen_rtx_CONST (Pmode
, off
);
12899 dest
= force_reg (Pmode
, gen_rtx_PLUS (Pmode
, base
, off
));
12901 if (TARGET_GNU2_TLS
)
12903 dest
= force_reg (Pmode
, gen_rtx_PLUS (Pmode
, dest
, tp
));
12905 if (GET_MODE (x
) != Pmode
)
12906 x
= gen_rtx_ZERO_EXTEND (Pmode
, x
);
12908 set_unique_reg_note (get_last_insn (), REG_EQUAL
, x
);
12912 case TLS_MODEL_INITIAL_EXEC
:
12915 if (TARGET_SUN_TLS
&& !TARGET_X32
)
12917 /* The Sun linker took the AMD64 TLS spec literally
12918 and can only handle %rax as destination of the
12919 initial executable code sequence. */
12921 dest
= gen_reg_rtx (DImode
);
12922 emit_insn (gen_tls_initial_exec_64_sun (dest
, x
));
12926 /* Generate DImode references to avoid %fs:(%reg32)
12927 problems and linker IE->LE relaxation bug. */
12930 type
= UNSPEC_GOTNTPOFF
;
12934 if (reload_in_progress
)
12935 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM
, true);
12936 pic
= pic_offset_table_rtx
;
12937 type
= TARGET_ANY_GNU_TLS
? UNSPEC_GOTNTPOFF
: UNSPEC_GOTTPOFF
;
12939 else if (!TARGET_ANY_GNU_TLS
)
12941 pic
= gen_reg_rtx (Pmode
);
12942 emit_insn (gen_set_got (pic
));
12943 type
= UNSPEC_GOTTPOFF
;
12948 type
= UNSPEC_INDNTPOFF
;
12951 off
= gen_rtx_UNSPEC (tp_mode
, gen_rtvec (1, x
), type
);
12952 off
= gen_rtx_CONST (tp_mode
, off
);
12954 off
= gen_rtx_PLUS (tp_mode
, pic
, off
);
12955 off
= gen_const_mem (tp_mode
, off
);
12956 set_mem_alias_set (off
, ix86_GOT_alias_set ());
12958 if (TARGET_64BIT
|| TARGET_ANY_GNU_TLS
)
12960 base
= get_thread_pointer (tp_mode
,
12961 for_mov
|| !TARGET_TLS_DIRECT_SEG_REFS
);
12962 off
= force_reg (tp_mode
, off
);
12963 return gen_rtx_PLUS (tp_mode
, base
, off
);
12967 base
= get_thread_pointer (Pmode
, true);
12968 dest
= gen_reg_rtx (Pmode
);
12969 emit_insn (ix86_gen_sub3 (dest
, base
, off
));
12973 case TLS_MODEL_LOCAL_EXEC
:
12974 off
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, x
),
12975 (TARGET_64BIT
|| TARGET_ANY_GNU_TLS
)
12976 ? UNSPEC_NTPOFF
: UNSPEC_TPOFF
);
12977 off
= gen_rtx_CONST (Pmode
, off
);
12979 if (TARGET_64BIT
|| TARGET_ANY_GNU_TLS
)
12981 base
= get_thread_pointer (Pmode
,
12982 for_mov
|| !TARGET_TLS_DIRECT_SEG_REFS
);
12983 return gen_rtx_PLUS (Pmode
, base
, off
);
12987 base
= get_thread_pointer (Pmode
, true);
12988 dest
= gen_reg_rtx (Pmode
);
12989 emit_insn (ix86_gen_sub3 (dest
, base
, off
));
12994 gcc_unreachable ();
13000 /* Create or return the unique __imp_DECL dllimport symbol corresponding
13003 static GTY((if_marked ("tree_map_marked_p"), param_is (struct tree_map
)))
13004 htab_t dllimport_map
;
13007 get_dllimport_decl (tree decl
)
13009 struct tree_map
*h
, in
;
13012 const char *prefix
;
13013 size_t namelen
, prefixlen
;
13018 if (!dllimport_map
)
13019 dllimport_map
= htab_create_ggc (512, tree_map_hash
, tree_map_eq
, 0);
13021 in
.hash
= htab_hash_pointer (decl
);
13022 in
.base
.from
= decl
;
13023 loc
= htab_find_slot_with_hash (dllimport_map
, &in
, in
.hash
, INSERT
);
13024 h
= (struct tree_map
*) *loc
;
13028 *loc
= h
= ggc_alloc_tree_map ();
13030 h
->base
.from
= decl
;
13031 h
->to
= to
= build_decl (DECL_SOURCE_LOCATION (decl
),
13032 VAR_DECL
, NULL
, ptr_type_node
);
13033 DECL_ARTIFICIAL (to
) = 1;
13034 DECL_IGNORED_P (to
) = 1;
13035 DECL_EXTERNAL (to
) = 1;
13036 TREE_READONLY (to
) = 1;
13038 name
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
));
13039 name
= targetm
.strip_name_encoding (name
);
13040 prefix
= name
[0] == FASTCALL_PREFIX
|| user_label_prefix
[0] == 0
13041 ? "*__imp_" : "*__imp__";
13042 namelen
= strlen (name
);
13043 prefixlen
= strlen (prefix
);
13044 imp_name
= (char *) alloca (namelen
+ prefixlen
+ 1);
13045 memcpy (imp_name
, prefix
, prefixlen
);
13046 memcpy (imp_name
+ prefixlen
, name
, namelen
+ 1);
13048 name
= ggc_alloc_string (imp_name
, namelen
+ prefixlen
);
13049 rtl
= gen_rtx_SYMBOL_REF (Pmode
, name
);
13050 SET_SYMBOL_REF_DECL (rtl
, to
);
13051 SYMBOL_REF_FLAGS (rtl
) = SYMBOL_FLAG_LOCAL
;
13053 rtl
= gen_const_mem (Pmode
, rtl
);
13054 set_mem_alias_set (rtl
, ix86_GOT_alias_set ());
13056 SET_DECL_RTL (to
, rtl
);
13057 SET_DECL_ASSEMBLER_NAME (to
, get_identifier (name
));
13062 /* Expand SYMBOL into its corresponding dllimport symbol. WANT_REG is
13063 true if we require the result be a register. */
13066 legitimize_dllimport_symbol (rtx symbol
, bool want_reg
)
13071 gcc_assert (SYMBOL_REF_DECL (symbol
));
13072 imp_decl
= get_dllimport_decl (SYMBOL_REF_DECL (symbol
));
13074 x
= DECL_RTL (imp_decl
);
13076 x
= force_reg (Pmode
, x
);
13080 /* Try machine-dependent ways of modifying an illegitimate address
13081 to be legitimate. If we find one, return the new, valid address.
13082 This macro is used in only one place: `memory_address' in explow.c.
13084 OLDX is the address as it was before break_out_memory_refs was called.
13085 In some cases it is useful to look at this to decide what needs to be done.
13087 It is always safe for this macro to do nothing. It exists to recognize
13088 opportunities to optimize the output.
13090 For the 80386, we handle X+REG by loading X into a register R and
13091 using R+REG. R will go in a general reg and indexing will be used.
13092 However, if REG is a broken-out memory address or multiplication,
13093 nothing needs to be done because REG can certainly go in a general reg.
13095 When -fpic is used, special handling is needed for symbolic references.
13096 See comments by legitimize_pic_address in i386.c for details. */
13099 ix86_legitimize_address (rtx x
, rtx oldx ATTRIBUTE_UNUSED
,
13100 enum machine_mode mode
)
13105 log
= GET_CODE (x
) == SYMBOL_REF
? SYMBOL_REF_TLS_MODEL (x
) : 0;
13107 return legitimize_tls_address (x
, (enum tls_model
) log
, false);
13108 if (GET_CODE (x
) == CONST
13109 && GET_CODE (XEXP (x
, 0)) == PLUS
13110 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == SYMBOL_REF
13111 && (log
= SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x
, 0), 0))))
13113 rtx t
= legitimize_tls_address (XEXP (XEXP (x
, 0), 0),
13114 (enum tls_model
) log
, false);
13115 return gen_rtx_PLUS (Pmode
, t
, XEXP (XEXP (x
, 0), 1));
13118 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
)
13120 if (GET_CODE (x
) == SYMBOL_REF
&& SYMBOL_REF_DLLIMPORT_P (x
))
13121 return legitimize_dllimport_symbol (x
, true);
13122 if (GET_CODE (x
) == CONST
13123 && GET_CODE (XEXP (x
, 0)) == PLUS
13124 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == SYMBOL_REF
13125 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (x
, 0), 0)))
13127 rtx t
= legitimize_dllimport_symbol (XEXP (XEXP (x
, 0), 0), true);
13128 return gen_rtx_PLUS (Pmode
, t
, XEXP (XEXP (x
, 0), 1));
13132 if (flag_pic
&& SYMBOLIC_CONST (x
))
13133 return legitimize_pic_address (x
, 0);
13136 if (MACHO_DYNAMIC_NO_PIC_P
&& SYMBOLIC_CONST (x
))
13137 return machopic_indirect_data_reference (x
, 0);
13140 /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
13141 if (GET_CODE (x
) == ASHIFT
13142 && CONST_INT_P (XEXP (x
, 1))
13143 && (unsigned HOST_WIDE_INT
) INTVAL (XEXP (x
, 1)) < 4)
13146 log
= INTVAL (XEXP (x
, 1));
13147 x
= gen_rtx_MULT (Pmode
, force_reg (Pmode
, XEXP (x
, 0)),
13148 GEN_INT (1 << log
));
13151 if (GET_CODE (x
) == PLUS
)
13153 /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
13155 if (GET_CODE (XEXP (x
, 0)) == ASHIFT
13156 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
13157 && (unsigned HOST_WIDE_INT
) INTVAL (XEXP (XEXP (x
, 0), 1)) < 4)
13160 log
= INTVAL (XEXP (XEXP (x
, 0), 1));
13161 XEXP (x
, 0) = gen_rtx_MULT (Pmode
,
13162 force_reg (Pmode
, XEXP (XEXP (x
, 0), 0)),
13163 GEN_INT (1 << log
));
13166 if (GET_CODE (XEXP (x
, 1)) == ASHIFT
13167 && CONST_INT_P (XEXP (XEXP (x
, 1), 1))
13168 && (unsigned HOST_WIDE_INT
) INTVAL (XEXP (XEXP (x
, 1), 1)) < 4)
13171 log
= INTVAL (XEXP (XEXP (x
, 1), 1));
13172 XEXP (x
, 1) = gen_rtx_MULT (Pmode
,
13173 force_reg (Pmode
, XEXP (XEXP (x
, 1), 0)),
13174 GEN_INT (1 << log
));
13177 /* Put multiply first if it isn't already. */
13178 if (GET_CODE (XEXP (x
, 1)) == MULT
)
13180 rtx tmp
= XEXP (x
, 0);
13181 XEXP (x
, 0) = XEXP (x
, 1);
13186 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
13187 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
13188 created by virtual register instantiation, register elimination, and
13189 similar optimizations. */
13190 if (GET_CODE (XEXP (x
, 0)) == MULT
&& GET_CODE (XEXP (x
, 1)) == PLUS
)
13193 x
= gen_rtx_PLUS (Pmode
,
13194 gen_rtx_PLUS (Pmode
, XEXP (x
, 0),
13195 XEXP (XEXP (x
, 1), 0)),
13196 XEXP (XEXP (x
, 1), 1));
13200 (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
13201 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
13202 else if (GET_CODE (x
) == PLUS
&& GET_CODE (XEXP (x
, 0)) == PLUS
13203 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == MULT
13204 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == PLUS
13205 && CONSTANT_P (XEXP (x
, 1)))
13208 rtx other
= NULL_RTX
;
13210 if (CONST_INT_P (XEXP (x
, 1)))
13212 constant
= XEXP (x
, 1);
13213 other
= XEXP (XEXP (XEXP (x
, 0), 1), 1);
13215 else if (CONST_INT_P (XEXP (XEXP (XEXP (x
, 0), 1), 1)))
13217 constant
= XEXP (XEXP (XEXP (x
, 0), 1), 1);
13218 other
= XEXP (x
, 1);
13226 x
= gen_rtx_PLUS (Pmode
,
13227 gen_rtx_PLUS (Pmode
, XEXP (XEXP (x
, 0), 0),
13228 XEXP (XEXP (XEXP (x
, 0), 1), 0)),
13229 plus_constant (Pmode
, other
,
13230 INTVAL (constant
)));
13234 if (changed
&& ix86_legitimate_address_p (mode
, x
, false))
13237 if (GET_CODE (XEXP (x
, 0)) == MULT
)
13240 XEXP (x
, 0) = force_operand (XEXP (x
, 0), 0);
13243 if (GET_CODE (XEXP (x
, 1)) == MULT
)
13246 XEXP (x
, 1) = force_operand (XEXP (x
, 1), 0);
13250 && REG_P (XEXP (x
, 1))
13251 && REG_P (XEXP (x
, 0)))
13254 if (flag_pic
&& SYMBOLIC_CONST (XEXP (x
, 1)))
13257 x
= legitimize_pic_address (x
, 0);
13260 if (changed
&& ix86_legitimate_address_p (mode
, x
, false))
13263 if (REG_P (XEXP (x
, 0)))
13265 rtx temp
= gen_reg_rtx (Pmode
);
13266 rtx val
= force_operand (XEXP (x
, 1), temp
);
13269 val
= convert_to_mode (Pmode
, val
, 1);
13270 emit_move_insn (temp
, val
);
13273 XEXP (x
, 1) = temp
;
13277 else if (REG_P (XEXP (x
, 1)))
13279 rtx temp
= gen_reg_rtx (Pmode
);
13280 rtx val
= force_operand (XEXP (x
, 0), temp
);
13283 val
= convert_to_mode (Pmode
, val
, 1);
13284 emit_move_insn (temp
, val
);
13287 XEXP (x
, 0) = temp
;
13295 /* Print an integer constant expression in assembler syntax. Addition
13296 and subtraction are the only arithmetic that may appear in these
13297 expressions. FILE is the stdio stream to write to, X is the rtx, and
13298 CODE is the operand print code from the output string. */
13301 output_pic_addr_const (FILE *file
, rtx x
, int code
)
13305 switch (GET_CODE (x
))
13308 gcc_assert (flag_pic
);
13313 if (TARGET_64BIT
|| ! TARGET_MACHO_BRANCH_ISLANDS
)
13314 output_addr_const (file
, x
);
13317 const char *name
= XSTR (x
, 0);
13319 /* Mark the decl as referenced so that cgraph will
13320 output the function. */
13321 if (SYMBOL_REF_DECL (x
))
13322 mark_decl_referenced (SYMBOL_REF_DECL (x
));
13325 if (MACHOPIC_INDIRECT
13326 && machopic_classify_symbol (x
) == MACHOPIC_UNDEFINED_FUNCTION
)
13327 name
= machopic_indirection_name (x
, /*stub_p=*/true);
13329 assemble_name (file
, name
);
13331 if (!TARGET_MACHO
&& !(TARGET_64BIT
&& DEFAULT_ABI
== MS_ABI
)
13332 && code
== 'P' && ! SYMBOL_REF_LOCAL_P (x
))
13333 fputs ("@PLT", file
);
13340 ASM_GENERATE_INTERNAL_LABEL (buf
, "L", CODE_LABEL_NUMBER (x
));
13341 assemble_name (asm_out_file
, buf
);
13345 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (x
));
13349 /* This used to output parentheses around the expression,
13350 but that does not work on the 386 (either ATT or BSD assembler). */
13351 output_pic_addr_const (file
, XEXP (x
, 0), code
);
13355 if (GET_MODE (x
) == VOIDmode
)
13357 /* We can use %d if the number is <32 bits and positive. */
13358 if (CONST_DOUBLE_HIGH (x
) || CONST_DOUBLE_LOW (x
) < 0)
13359 fprintf (file
, "0x%lx%08lx",
13360 (unsigned long) CONST_DOUBLE_HIGH (x
),
13361 (unsigned long) CONST_DOUBLE_LOW (x
));
13363 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, CONST_DOUBLE_LOW (x
));
13366 /* We can't handle floating point constants;
13367 TARGET_PRINT_OPERAND must handle them. */
13368 output_operand_lossage ("floating constant misused");
13372 /* Some assemblers need integer constants to appear first. */
13373 if (CONST_INT_P (XEXP (x
, 0)))
13375 output_pic_addr_const (file
, XEXP (x
, 0), code
);
13377 output_pic_addr_const (file
, XEXP (x
, 1), code
);
13381 gcc_assert (CONST_INT_P (XEXP (x
, 1)));
13382 output_pic_addr_const (file
, XEXP (x
, 1), code
);
13384 output_pic_addr_const (file
, XEXP (x
, 0), code
);
13390 putc (ASSEMBLER_DIALECT
== ASM_INTEL
? '(' : '[', file
);
13391 output_pic_addr_const (file
, XEXP (x
, 0), code
);
13393 output_pic_addr_const (file
, XEXP (x
, 1), code
);
13395 putc (ASSEMBLER_DIALECT
== ASM_INTEL
? ')' : ']', file
);
13399 if (XINT (x
, 1) == UNSPEC_STACK_CHECK
)
13401 bool f
= i386_asm_output_addr_const_extra (file
, x
);
13406 gcc_assert (XVECLEN (x
, 0) == 1);
13407 output_pic_addr_const (file
, XVECEXP (x
, 0, 0), code
);
13408 switch (XINT (x
, 1))
13411 fputs ("@GOT", file
);
13413 case UNSPEC_GOTOFF
:
13414 fputs ("@GOTOFF", file
);
13416 case UNSPEC_PLTOFF
:
13417 fputs ("@PLTOFF", file
);
13420 fputs (ASSEMBLER_DIALECT
== ASM_ATT
?
13421 "(%rip)" : "[rip]", file
);
13423 case UNSPEC_GOTPCREL
:
13424 fputs (ASSEMBLER_DIALECT
== ASM_ATT
?
13425 "@GOTPCREL(%rip)" : "@GOTPCREL[rip]", file
);
13427 case UNSPEC_GOTTPOFF
:
13428 /* FIXME: This might be @TPOFF in Sun ld too. */
13429 fputs ("@gottpoff", file
);
13432 fputs ("@tpoff", file
);
13434 case UNSPEC_NTPOFF
:
13436 fputs ("@tpoff", file
);
13438 fputs ("@ntpoff", file
);
13440 case UNSPEC_DTPOFF
:
13441 fputs ("@dtpoff", file
);
13443 case UNSPEC_GOTNTPOFF
:
13445 fputs (ASSEMBLER_DIALECT
== ASM_ATT
?
13446 "@gottpoff(%rip)": "@gottpoff[rip]", file
);
13448 fputs ("@gotntpoff", file
);
13450 case UNSPEC_INDNTPOFF
:
13451 fputs ("@indntpoff", file
);
13454 case UNSPEC_MACHOPIC_OFFSET
:
13456 machopic_output_function_base_name (file
);
13460 output_operand_lossage ("invalid UNSPEC as operand");
13466 output_operand_lossage ("invalid expression as operand");
13470 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
13471 We need to emit DTP-relative relocations. */
13473 static void ATTRIBUTE_UNUSED
13474 i386_output_dwarf_dtprel (FILE *file
, int size
, rtx x
)
13476 fputs (ASM_LONG
, file
);
13477 output_addr_const (file
, x
);
13478 fputs ("@dtpoff", file
);
13484 fputs (", 0", file
);
13487 gcc_unreachable ();
13491 /* Return true if X is a representation of the PIC register. This copes
13492 with calls from ix86_find_base_term, where the register might have
13493 been replaced by a cselib value. */
13496 ix86_pic_register_p (rtx x
)
13498 if (GET_CODE (x
) == VALUE
&& CSELIB_VAL_PTR (x
))
13499 return (pic_offset_table_rtx
13500 && rtx_equal_for_cselib_p (x
, pic_offset_table_rtx
));
13502 return REG_P (x
) && REGNO (x
) == PIC_OFFSET_TABLE_REGNUM
;
13505 /* Helper function for ix86_delegitimize_address.
13506 Attempt to delegitimize TLS local-exec accesses. */
13509 ix86_delegitimize_tls_address (rtx orig_x
)
13511 rtx x
= orig_x
, unspec
;
13512 struct ix86_address addr
;
13514 if (!TARGET_TLS_DIRECT_SEG_REFS
)
13518 if (GET_CODE (x
) != PLUS
|| GET_MODE (x
) != Pmode
)
13520 if (ix86_decompose_address (x
, &addr
) == 0
13521 || addr
.seg
!= (TARGET_64BIT
? SEG_FS
: SEG_GS
)
13522 || addr
.disp
== NULL_RTX
13523 || GET_CODE (addr
.disp
) != CONST
)
13525 unspec
= XEXP (addr
.disp
, 0);
13526 if (GET_CODE (unspec
) == PLUS
&& CONST_INT_P (XEXP (unspec
, 1)))
13527 unspec
= XEXP (unspec
, 0);
13528 if (GET_CODE (unspec
) != UNSPEC
|| XINT (unspec
, 1) != UNSPEC_NTPOFF
)
13530 x
= XVECEXP (unspec
, 0, 0);
13531 gcc_assert (GET_CODE (x
) == SYMBOL_REF
);
13532 if (unspec
!= XEXP (addr
.disp
, 0))
13533 x
= gen_rtx_PLUS (Pmode
, x
, XEXP (XEXP (addr
.disp
, 0), 1));
13536 rtx idx
= addr
.index
;
13537 if (addr
.scale
!= 1)
13538 idx
= gen_rtx_MULT (Pmode
, idx
, GEN_INT (addr
.scale
));
13539 x
= gen_rtx_PLUS (Pmode
, idx
, x
);
13542 x
= gen_rtx_PLUS (Pmode
, addr
.base
, x
);
13543 if (MEM_P (orig_x
))
13544 x
= replace_equiv_address_nv (orig_x
, x
);
13548 /* In the name of slightly smaller debug output, and to cater to
13549 general assembler lossage, recognize PIC+GOTOFF and turn it back
13550 into a direct symbol reference.
13552 On Darwin, this is necessary to avoid a crash, because Darwin
13553 has a different PIC label for each routine but the DWARF debugging
13554 information is not associated with any particular routine, so it's
13555 necessary to remove references to the PIC label from RTL stored by
13556 the DWARF output code. */
13559 ix86_delegitimize_address (rtx x
)
13561 rtx orig_x
= delegitimize_mem_from_attrs (x
);
13562 /* addend is NULL or some rtx if x is something+GOTOFF where
13563 something doesn't include the PIC register. */
13564 rtx addend
= NULL_RTX
;
13565 /* reg_addend is NULL or a multiple of some register. */
13566 rtx reg_addend
= NULL_RTX
;
13567 /* const_addend is NULL or a const_int. */
13568 rtx const_addend
= NULL_RTX
;
13569 /* This is the result, or NULL. */
13570 rtx result
= NULL_RTX
;
13579 if (GET_CODE (x
) == CONST
13580 && GET_CODE (XEXP (x
, 0)) == PLUS
13581 && GET_MODE (XEXP (x
, 0)) == Pmode
13582 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
13583 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == UNSPEC
13584 && XINT (XEXP (XEXP (x
, 0), 0), 1) == UNSPEC_PCREL
)
13586 rtx x2
= XVECEXP (XEXP (XEXP (x
, 0), 0), 0, 0);
13587 x
= gen_rtx_PLUS (Pmode
, XEXP (XEXP (x
, 0), 1), x2
);
13588 if (MEM_P (orig_x
))
13589 x
= replace_equiv_address_nv (orig_x
, x
);
13592 if (GET_CODE (x
) != CONST
13593 || GET_CODE (XEXP (x
, 0)) != UNSPEC
13594 || (XINT (XEXP (x
, 0), 1) != UNSPEC_GOTPCREL
13595 && XINT (XEXP (x
, 0), 1) != UNSPEC_PCREL
)
13596 || (!MEM_P (orig_x
) && XINT (XEXP (x
, 0), 1) != UNSPEC_PCREL
))
13597 return ix86_delegitimize_tls_address (orig_x
);
13598 x
= XVECEXP (XEXP (x
, 0), 0, 0);
13599 if (GET_MODE (orig_x
) != GET_MODE (x
) && MEM_P (orig_x
))
13601 x
= simplify_gen_subreg (GET_MODE (orig_x
), x
,
13609 if (GET_CODE (x
) != PLUS
13610 || GET_CODE (XEXP (x
, 1)) != CONST
)
13611 return ix86_delegitimize_tls_address (orig_x
);
13613 if (ix86_pic_register_p (XEXP (x
, 0)))
13614 /* %ebx + GOT/GOTOFF */
13616 else if (GET_CODE (XEXP (x
, 0)) == PLUS
)
13618 /* %ebx + %reg * scale + GOT/GOTOFF */
13619 reg_addend
= XEXP (x
, 0);
13620 if (ix86_pic_register_p (XEXP (reg_addend
, 0)))
13621 reg_addend
= XEXP (reg_addend
, 1);
13622 else if (ix86_pic_register_p (XEXP (reg_addend
, 1)))
13623 reg_addend
= XEXP (reg_addend
, 0);
13626 reg_addend
= NULL_RTX
;
13627 addend
= XEXP (x
, 0);
13631 addend
= XEXP (x
, 0);
13633 x
= XEXP (XEXP (x
, 1), 0);
13634 if (GET_CODE (x
) == PLUS
13635 && CONST_INT_P (XEXP (x
, 1)))
13637 const_addend
= XEXP (x
, 1);
13641 if (GET_CODE (x
) == UNSPEC
13642 && ((XINT (x
, 1) == UNSPEC_GOT
&& MEM_P (orig_x
) && !addend
)
13643 || (XINT (x
, 1) == UNSPEC_GOTOFF
&& !MEM_P (orig_x
))))
13644 result
= XVECEXP (x
, 0, 0);
13646 if (TARGET_MACHO
&& darwin_local_data_pic (x
)
13647 && !MEM_P (orig_x
))
13648 result
= XVECEXP (x
, 0, 0);
13651 return ix86_delegitimize_tls_address (orig_x
);
13654 result
= gen_rtx_CONST (Pmode
, gen_rtx_PLUS (Pmode
, result
, const_addend
));
13656 result
= gen_rtx_PLUS (Pmode
, reg_addend
, result
);
13659 /* If the rest of original X doesn't involve the PIC register, add
13660 addend and subtract pic_offset_table_rtx. This can happen e.g.
13662 leal (%ebx, %ecx, 4), %ecx
13664 movl foo@GOTOFF(%ecx), %edx
13665 in which case we return (%ecx - %ebx) + foo. */
13666 if (pic_offset_table_rtx
)
13667 result
= gen_rtx_PLUS (Pmode
, gen_rtx_MINUS (Pmode
, copy_rtx (addend
),
13668 pic_offset_table_rtx
),
13673 if (GET_MODE (orig_x
) != Pmode
&& MEM_P (orig_x
))
13675 result
= simplify_gen_subreg (GET_MODE (orig_x
), result
, Pmode
, 0);
13676 if (result
== NULL_RTX
)
13682 /* If X is a machine specific address (i.e. a symbol or label being
13683 referenced as a displacement from the GOT implemented using an
13684 UNSPEC), then return the base term. Otherwise return X. */
13687 ix86_find_base_term (rtx x
)
13693 if (GET_CODE (x
) != CONST
)
13695 term
= XEXP (x
, 0);
13696 if (GET_CODE (term
) == PLUS
13697 && (CONST_INT_P (XEXP (term
, 1))
13698 || GET_CODE (XEXP (term
, 1)) == CONST_DOUBLE
))
13699 term
= XEXP (term
, 0);
13700 if (GET_CODE (term
) != UNSPEC
13701 || (XINT (term
, 1) != UNSPEC_GOTPCREL
13702 && XINT (term
, 1) != UNSPEC_PCREL
))
13705 return XVECEXP (term
, 0, 0);
13708 return ix86_delegitimize_address (x
);
13712 put_condition_code (enum rtx_code code
, enum machine_mode mode
, bool reverse
,
13713 bool fp
, FILE *file
)
13715 const char *suffix
;
13717 if (mode
== CCFPmode
|| mode
== CCFPUmode
)
13719 code
= ix86_fp_compare_code_to_integer (code
);
13723 code
= reverse_condition (code
);
13774 gcc_assert (mode
== CCmode
|| mode
== CCNOmode
|| mode
== CCGCmode
);
13778 /* ??? Use "nbe" instead of "a" for fcmov lossage on some assemblers.
13779 Those same assemblers have the same but opposite lossage on cmov. */
13780 if (mode
== CCmode
)
13781 suffix
= fp
? "nbe" : "a";
13782 else if (mode
== CCCmode
)
13785 gcc_unreachable ();
13801 gcc_unreachable ();
13805 gcc_assert (mode
== CCmode
|| mode
== CCCmode
);
13822 gcc_unreachable ();
13826 /* ??? As above. */
13827 gcc_assert (mode
== CCmode
|| mode
== CCCmode
);
13828 suffix
= fp
? "nb" : "ae";
13831 gcc_assert (mode
== CCmode
|| mode
== CCGCmode
|| mode
== CCNOmode
);
13835 /* ??? As above. */
13836 if (mode
== CCmode
)
13838 else if (mode
== CCCmode
)
13839 suffix
= fp
? "nb" : "ae";
13841 gcc_unreachable ();
13844 suffix
= fp
? "u" : "p";
13847 suffix
= fp
? "nu" : "np";
13850 gcc_unreachable ();
13852 fputs (suffix
, file
);
13855 /* Print the name of register X to FILE based on its machine mode and number.
13856 If CODE is 'w', pretend the mode is HImode.
13857 If CODE is 'b', pretend the mode is QImode.
13858 If CODE is 'k', pretend the mode is SImode.
13859 If CODE is 'q', pretend the mode is DImode.
13860 If CODE is 'x', pretend the mode is V4SFmode.
13861 If CODE is 't', pretend the mode is V8SFmode.
13862 If CODE is 'h', pretend the reg is the 'high' byte register.
13863 If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op.
13864 If CODE is 'd', duplicate the operand for AVX instruction.
13868 print_reg (rtx x
, int code
, FILE *file
)
13871 unsigned int regno
;
13872 bool duplicated
= code
== 'd' && TARGET_AVX
;
13874 if (ASSEMBLER_DIALECT
== ASM_ATT
)
13879 gcc_assert (TARGET_64BIT
);
13880 fputs ("rip", file
);
13884 regno
= true_regnum (x
);
13885 gcc_assert (regno
!= ARG_POINTER_REGNUM
13886 && regno
!= FRAME_POINTER_REGNUM
13887 && regno
!= FLAGS_REG
13888 && regno
!= FPSR_REG
13889 && regno
!= FPCR_REG
);
13891 if (code
== 'w' || MMX_REG_P (x
))
13893 else if (code
== 'b')
13895 else if (code
== 'k')
13897 else if (code
== 'q')
13899 else if (code
== 'y')
13901 else if (code
== 'h')
13903 else if (code
== 'x')
13905 else if (code
== 't')
13908 code
= GET_MODE_SIZE (GET_MODE (x
));
13910 /* Irritatingly, AMD extended registers use different naming convention
13911 from the normal registers: "r%d[bwd]" */
13912 if (REX_INT_REGNO_P (regno
))
13914 gcc_assert (TARGET_64BIT
);
13916 fprint_ul (file
, regno
- FIRST_REX_INT_REG
+ 8);
13920 error ("extended registers have no high halves");
13935 error ("unsupported operand size for extended register");
13945 if (STACK_TOP_P (x
))
13954 if (! ANY_FP_REG_P (x
))
13955 putc (code
== 8 && TARGET_64BIT
? 'r' : 'e', file
);
13960 reg
= hi_reg_name
[regno
];
13963 if (regno
>= ARRAY_SIZE (qi_reg_name
))
13965 reg
= qi_reg_name
[regno
];
13968 if (regno
>= ARRAY_SIZE (qi_high_reg_name
))
13970 reg
= qi_high_reg_name
[regno
];
13975 gcc_assert (!duplicated
);
13977 fputs (hi_reg_name
[regno
] + 1, file
);
13982 gcc_unreachable ();
13988 if (ASSEMBLER_DIALECT
== ASM_ATT
)
13989 fprintf (file
, ", %%%s", reg
);
13991 fprintf (file
, ", %s", reg
);
13995 /* Locate some local-dynamic symbol still in use by this function
13996 so that we can print its name in some tls_local_dynamic_base
14000 get_some_local_dynamic_name_1 (rtx
*px
, void *data ATTRIBUTE_UNUSED
)
14004 if (GET_CODE (x
) == SYMBOL_REF
14005 && SYMBOL_REF_TLS_MODEL (x
) == TLS_MODEL_LOCAL_DYNAMIC
)
14007 cfun
->machine
->some_ld_name
= XSTR (x
, 0);
14014 static const char *
14015 get_some_local_dynamic_name (void)
14019 if (cfun
->machine
->some_ld_name
)
14020 return cfun
->machine
->some_ld_name
;
14022 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
14023 if (NONDEBUG_INSN_P (insn
)
14024 && for_each_rtx (&PATTERN (insn
), get_some_local_dynamic_name_1
, 0))
14025 return cfun
->machine
->some_ld_name
;
14030 /* Meaning of CODE:
14031 L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
14032 C -- print opcode suffix for set/cmov insn.
14033 c -- like C, but print reversed condition
14034 F,f -- likewise, but for floating-point.
14035 O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
14037 R -- print the prefix for register names.
14038 z -- print the opcode suffix for the size of the current operand.
14039 Z -- likewise, with special suffixes for x87 instructions.
14040 * -- print a star (in certain assembler syntax)
14041 A -- print an absolute memory reference.
14042 E -- print address with DImode register names if TARGET_64BIT.
14043 w -- print the operand as if it's a "word" (HImode) even if it isn't.
14044 s -- print a shift double count, followed by the assemblers argument
14046 b -- print the QImode name of the register for the indicated operand.
14047 %b0 would print %al if operands[0] is reg 0.
14048 w -- likewise, print the HImode name of the register.
14049 k -- likewise, print the SImode name of the register.
14050 q -- likewise, print the DImode name of the register.
14051 x -- likewise, print the V4SFmode name of the register.
14052 t -- likewise, print the V8SFmode name of the register.
14053 h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
14054 y -- print "st(0)" instead of "st" as a register.
14055 d -- print duplicated register operand for AVX instruction.
14056 D -- print condition for SSE cmp instruction.
14057 P -- if PIC, print an @PLT suffix.
14058 p -- print raw symbol name.
14059 X -- don't print any sort of PIC '@' suffix for a symbol.
14060 & -- print some in-use local-dynamic symbol name.
14061 H -- print a memory address offset by 8; used for sse high-parts
14062 Y -- print condition for XOP pcom* instruction.
14063 + -- print a branch hint as 'cs' or 'ds' prefix
14064 ; -- print a semicolon (after prefixes due to bug in older gas).
14065 ~ -- print "i" if TARGET_AVX2, "f" otherwise.
14066 @ -- print a segment register of thread base pointer load
14067 ^ -- print addr32 prefix if TARGET_64BIT and Pmode != word_mode
14071 ix86_print_operand (FILE *file
, rtx x
, int code
)
14078 switch (ASSEMBLER_DIALECT
)
14085 /* Intel syntax. For absolute addresses, registers should not
14086 be surrounded by braces. */
14090 ix86_print_operand (file
, x
, 0);
14097 gcc_unreachable ();
14100 ix86_print_operand (file
, x
, 0);
14104 /* Wrap address in an UNSPEC to declare special handling. */
14106 x
= gen_rtx_UNSPEC (DImode
, gen_rtvec (1, x
), UNSPEC_LEA_ADDR
);
14108 output_address (x
);
14112 if (ASSEMBLER_DIALECT
== ASM_ATT
)
14117 if (ASSEMBLER_DIALECT
== ASM_ATT
)
14122 if (ASSEMBLER_DIALECT
== ASM_ATT
)
14127 if (ASSEMBLER_DIALECT
== ASM_ATT
)
14132 if (ASSEMBLER_DIALECT
== ASM_ATT
)
14137 if (ASSEMBLER_DIALECT
== ASM_ATT
)
14142 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
14143 if (ASSEMBLER_DIALECT
!= ASM_ATT
)
14146 switch (GET_MODE_SIZE (GET_MODE (x
)))
14161 output_operand_lossage
14162 ("invalid operand size for operand code 'O'");
14171 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_INT
)
14173 /* Opcodes don't get size suffixes if using Intel opcodes. */
14174 if (ASSEMBLER_DIALECT
== ASM_INTEL
)
14177 switch (GET_MODE_SIZE (GET_MODE (x
)))
14196 output_operand_lossage
14197 ("invalid operand size for operand code 'z'");
14202 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_FLOAT
)
14204 (0, "non-integer operand used with operand code 'z'");
14208 /* 387 opcodes don't get size suffixes if using Intel opcodes. */
14209 if (ASSEMBLER_DIALECT
== ASM_INTEL
)
14212 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_INT
)
14214 switch (GET_MODE_SIZE (GET_MODE (x
)))
14217 #ifdef HAVE_AS_IX86_FILDS
14227 #ifdef HAVE_AS_IX86_FILDQ
14230 fputs ("ll", file
);
14238 else if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_FLOAT
)
14240 /* 387 opcodes don't get size suffixes
14241 if the operands are registers. */
14242 if (STACK_REG_P (x
))
14245 switch (GET_MODE_SIZE (GET_MODE (x
)))
14266 output_operand_lossage
14267 ("invalid operand type used with operand code 'Z'");
14271 output_operand_lossage
14272 ("invalid operand size for operand code 'Z'");
14290 if (CONST_INT_P (x
) || ! SHIFT_DOUBLE_OMITS_COUNT
)
14292 ix86_print_operand (file
, x
, 0);
14293 fputs (", ", file
);
14298 switch (GET_CODE (x
))
14301 fputs ("neq", file
);
14304 fputs ("eq", file
);
14308 fputs (INTEGRAL_MODE_P (GET_MODE (x
)) ? "ge" : "unlt", file
);
14312 fputs (INTEGRAL_MODE_P (GET_MODE (x
)) ? "gt" : "unle", file
);
14316 fputs ("le", file
);
14320 fputs ("lt", file
);
14323 fputs ("unord", file
);
14326 fputs ("ord", file
);
14329 fputs ("ueq", file
);
14332 fputs ("nlt", file
);
14335 fputs ("nle", file
);
14338 fputs ("ule", file
);
14341 fputs ("ult", file
);
14344 fputs ("une", file
);
14347 output_operand_lossage ("operand is not a condition code, "
14348 "invalid operand code 'Y'");
14354 /* Little bit of braindamage here. The SSE compare instructions
14355 does use completely different names for the comparisons that the
14356 fp conditional moves. */
14357 switch (GET_CODE (x
))
14362 fputs ("eq_us", file
);
14366 fputs ("eq", file
);
14371 fputs ("nge", file
);
14375 fputs ("lt", file
);
14380 fputs ("ngt", file
);
14384 fputs ("le", file
);
14387 fputs ("unord", file
);
14392 fputs ("neq_oq", file
);
14396 fputs ("neq", file
);
14401 fputs ("ge", file
);
14405 fputs ("nlt", file
);
14410 fputs ("gt", file
);
14414 fputs ("nle", file
);
14417 fputs ("ord", file
);
14420 output_operand_lossage ("operand is not a condition code, "
14421 "invalid operand code 'D'");
14428 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
14429 if (ASSEMBLER_DIALECT
== ASM_ATT
)
14435 if (!COMPARISON_P (x
))
14437 output_operand_lossage ("operand is not a condition code, "
14438 "invalid operand code '%c'", code
);
14441 put_condition_code (GET_CODE (x
), GET_MODE (XEXP (x
, 0)),
14442 code
== 'c' || code
== 'f',
14443 code
== 'F' || code
== 'f',
14448 if (!offsettable_memref_p (x
))
14450 output_operand_lossage ("operand is not an offsettable memory "
14451 "reference, invalid operand code 'H'");
14454 /* It doesn't actually matter what mode we use here, as we're
14455 only going to use this for printing. */
14456 x
= adjust_address_nv (x
, DImode
, 8);
14460 gcc_assert (CONST_INT_P (x
));
14462 if (INTVAL (x
) & IX86_HLE_ACQUIRE
)
14463 #ifdef HAVE_AS_IX86_HLE
14464 fputs ("xacquire ", file
);
14466 fputs ("\n" ASM_BYTE
"0xf2\n\t", file
);
14468 else if (INTVAL (x
) & IX86_HLE_RELEASE
)
14469 #ifdef HAVE_AS_IX86_HLE
14470 fputs ("xrelease ", file
);
14472 fputs ("\n" ASM_BYTE
"0xf3\n\t", file
);
14474 /* We do not want to print value of the operand. */
14478 if (ASSEMBLER_DIALECT
== ASM_ATT
)
14484 const char *name
= get_some_local_dynamic_name ();
14486 output_operand_lossage ("'%%&' used without any "
14487 "local dynamic TLS references");
14489 assemble_name (file
, name
);
14498 || optimize_function_for_size_p (cfun
)
14499 || !TARGET_BRANCH_PREDICTION_HINTS
)
14502 x
= find_reg_note (current_output_insn
, REG_BR_PROB
, 0);
14505 int pred_val
= INTVAL (XEXP (x
, 0));
14507 if (pred_val
< REG_BR_PROB_BASE
* 45 / 100
14508 || pred_val
> REG_BR_PROB_BASE
* 55 / 100)
14510 bool taken
= pred_val
> REG_BR_PROB_BASE
/ 2;
14512 = final_forward_branch_p (current_output_insn
) == 0;
14514 /* Emit hints only in the case default branch prediction
14515 heuristics would fail. */
14516 if (taken
!= cputaken
)
14518 /* We use 3e (DS) prefix for taken branches and
14519 2e (CS) prefix for not taken branches. */
14521 fputs ("ds ; ", file
);
14523 fputs ("cs ; ", file
);
14531 #ifndef HAVE_AS_IX86_REP_LOCK_PREFIX
14537 if (ASSEMBLER_DIALECT
== ASM_ATT
)
14540 /* The kernel uses a different segment register for performance
14541 reasons; a system call would not have to trash the userspace
14542 segment register, which would be expensive. */
14543 if (TARGET_64BIT
&& ix86_cmodel
!= CM_KERNEL
)
14544 fputs ("fs", file
);
14546 fputs ("gs", file
);
14550 putc (TARGET_AVX2
? 'i' : 'f', file
);
14554 if (TARGET_64BIT
&& Pmode
!= word_mode
)
14555 fputs ("addr32 ", file
);
14559 output_operand_lossage ("invalid operand code '%c'", code
);
14564 print_reg (x
, code
, file
);
14566 else if (MEM_P (x
))
14568 /* No `byte ptr' prefix for call instructions or BLKmode operands. */
14569 if (ASSEMBLER_DIALECT
== ASM_INTEL
&& code
!= 'X' && code
!= 'P'
14570 && GET_MODE (x
) != BLKmode
)
14573 switch (GET_MODE_SIZE (GET_MODE (x
)))
14575 case 1: size
= "BYTE"; break;
14576 case 2: size
= "WORD"; break;
14577 case 4: size
= "DWORD"; break;
14578 case 8: size
= "QWORD"; break;
14579 case 12: size
= "TBYTE"; break;
14581 if (GET_MODE (x
) == XFmode
)
14586 case 32: size
= "YMMWORD"; break;
14588 gcc_unreachable ();
14591 /* Check for explicit size override (codes 'b', 'w', 'k',
14595 else if (code
== 'w')
14597 else if (code
== 'k')
14599 else if (code
== 'q')
14601 else if (code
== 'x')
14604 fputs (size
, file
);
14605 fputs (" PTR ", file
);
14609 /* Avoid (%rip) for call operands. */
14610 if (CONSTANT_ADDRESS_P (x
) && code
== 'P'
14611 && !CONST_INT_P (x
))
14612 output_addr_const (file
, x
);
14613 else if (this_is_asm_operands
&& ! address_operand (x
, VOIDmode
))
14614 output_operand_lossage ("invalid constraints for operand");
14616 output_address (x
);
14619 else if (GET_CODE (x
) == CONST_DOUBLE
&& GET_MODE (x
) == SFmode
)
14624 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
14625 REAL_VALUE_TO_TARGET_SINGLE (r
, l
);
14627 if (ASSEMBLER_DIALECT
== ASM_ATT
)
14629 /* Sign extend 32bit SFmode immediate to 8 bytes. */
14631 fprintf (file
, "0x%08" HOST_LONG_LONG_FORMAT
"x",
14632 (unsigned long long) (int) l
);
14634 fprintf (file
, "0x%08x", (unsigned int) l
);
14637 else if (GET_CODE (x
) == CONST_DOUBLE
&& GET_MODE (x
) == DFmode
)
14642 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
14643 REAL_VALUE_TO_TARGET_DOUBLE (r
, l
);
14645 if (ASSEMBLER_DIALECT
== ASM_ATT
)
14647 fprintf (file
, "0x%lx%08lx", l
[1] & 0xffffffff, l
[0] & 0xffffffff);
14650 /* These float cases don't actually occur as immediate operands. */
14651 else if (GET_CODE (x
) == CONST_DOUBLE
&& GET_MODE (x
) == XFmode
)
14655 real_to_decimal (dstr
, CONST_DOUBLE_REAL_VALUE (x
), sizeof (dstr
), 0, 1);
14656 fputs (dstr
, file
);
14661 /* We have patterns that allow zero sets of memory, for instance.
14662 In 64-bit mode, we should probably support all 8-byte vectors,
14663 since we can in fact encode that into an immediate. */
14664 if (GET_CODE (x
) == CONST_VECTOR
)
14666 gcc_assert (x
== CONST0_RTX (GET_MODE (x
)));
14670 if (code
!= 'P' && code
!= 'p')
14672 if (CONST_INT_P (x
) || GET_CODE (x
) == CONST_DOUBLE
)
14674 if (ASSEMBLER_DIALECT
== ASM_ATT
)
14677 else if (GET_CODE (x
) == CONST
|| GET_CODE (x
) == SYMBOL_REF
14678 || GET_CODE (x
) == LABEL_REF
)
14680 if (ASSEMBLER_DIALECT
== ASM_ATT
)
14683 fputs ("OFFSET FLAT:", file
);
14686 if (CONST_INT_P (x
))
14687 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (x
));
14688 else if (flag_pic
|| MACHOPIC_INDIRECT
)
14689 output_pic_addr_const (file
, x
, code
);
14691 output_addr_const (file
, x
);
14696 ix86_print_operand_punct_valid_p (unsigned char code
)
14698 return (code
== '@' || code
== '*' || code
== '+' || code
== '&'
14699 || code
== ';' || code
== '~' || code
== '^');
14702 /* Print a memory operand whose address is ADDR. */
14705 ix86_print_operand_address (FILE *file
, rtx addr
)
14707 struct ix86_address parts
;
14708 rtx base
, index
, disp
;
14714 if (GET_CODE (addr
) == UNSPEC
&& XINT (addr
, 1) == UNSPEC_VSIBADDR
)
14716 ok
= ix86_decompose_address (XVECEXP (addr
, 0, 0), &parts
);
14717 gcc_assert (parts
.index
== NULL_RTX
);
14718 parts
.index
= XVECEXP (addr
, 0, 1);
14719 parts
.scale
= INTVAL (XVECEXP (addr
, 0, 2));
14720 addr
= XVECEXP (addr
, 0, 0);
14723 else if (GET_CODE (addr
) == UNSPEC
&& XINT (addr
, 1) == UNSPEC_LEA_ADDR
)
14725 gcc_assert (TARGET_64BIT
);
14726 ok
= ix86_decompose_address (XVECEXP (addr
, 0, 0), &parts
);
14730 ok
= ix86_decompose_address (addr
, &parts
);
14735 index
= parts
.index
;
14737 scale
= parts
.scale
;
14745 if (ASSEMBLER_DIALECT
== ASM_ATT
)
14747 fputs ((parts
.seg
== SEG_FS
? "fs:" : "gs:"), file
);
14750 gcc_unreachable ();
14753 /* Use one byte shorter RIP relative addressing for 64bit mode. */
14754 if (TARGET_64BIT
&& !base
&& !index
)
14758 if (GET_CODE (disp
) == CONST
14759 && GET_CODE (XEXP (disp
, 0)) == PLUS
14760 && CONST_INT_P (XEXP (XEXP (disp
, 0), 1)))
14761 symbol
= XEXP (XEXP (disp
, 0), 0);
14763 if (GET_CODE (symbol
) == LABEL_REF
14764 || (GET_CODE (symbol
) == SYMBOL_REF
14765 && SYMBOL_REF_TLS_MODEL (symbol
) == 0))
14768 if (!base
&& !index
)
14770 /* Displacement only requires special attention. */
14772 if (CONST_INT_P (disp
))
14774 if (ASSEMBLER_DIALECT
== ASM_INTEL
&& parts
.seg
== SEG_DEFAULT
)
14775 fputs ("ds:", file
);
14776 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (disp
));
14779 output_pic_addr_const (file
, disp
, 0);
14781 output_addr_const (file
, disp
);
14785 /* Print SImode register names to force addr32 prefix. */
14786 if (SImode_address_operand (addr
, VOIDmode
))
14788 #ifdef ENABLE_CHECKING
14789 gcc_assert (TARGET_64BIT
);
14790 switch (GET_CODE (addr
))
14793 gcc_assert (GET_MODE (addr
) == SImode
);
14794 gcc_assert (GET_MODE (SUBREG_REG (addr
)) == DImode
);
14798 gcc_assert (GET_MODE (addr
) == DImode
);
14801 gcc_unreachable ();
14804 gcc_assert (!code
);
14810 && CONST_INT_P (disp
)
14811 && INTVAL (disp
) < -16*1024*1024)
14813 /* X32 runs in 64-bit mode, where displacement, DISP, in
14814 address DISP(%r64), is encoded as 32-bit immediate sign-
14815 extended from 32-bit to 64-bit. For -0x40000300(%r64),
14816 address is %r64 + 0xffffffffbffffd00. When %r64 <
14817 0x40000300, like 0x37ffe064, address is 0xfffffffff7ffdd64,
14818 which is invalid for x32. The correct address is %r64
14819 - 0x40000300 == 0xf7ffdd64. To properly encode
14820 -0x40000300(%r64) for x32, we zero-extend negative
14821 displacement by forcing addr32 prefix which truncates
14822 0xfffffffff7ffdd64 to 0xf7ffdd64. In theory, we should
14823 zero-extend all negative displacements, including -1(%rsp).
14824 However, for small negative displacements, sign-extension
14825 won't cause overflow. We only zero-extend negative
14826 displacements if they < -16*1024*1024, which is also used
14827 to check legitimate address displacements for PIC. */
14831 if (ASSEMBLER_DIALECT
== ASM_ATT
)
14836 output_pic_addr_const (file
, disp
, 0);
14837 else if (GET_CODE (disp
) == LABEL_REF
)
14838 output_asm_label (disp
);
14840 output_addr_const (file
, disp
);
14845 print_reg (base
, code
, file
);
14849 print_reg (index
, vsib
? 0 : code
, file
);
14850 if (scale
!= 1 || vsib
)
14851 fprintf (file
, ",%d", scale
);
14857 rtx offset
= NULL_RTX
;
14861 /* Pull out the offset of a symbol; print any symbol itself. */
14862 if (GET_CODE (disp
) == CONST
14863 && GET_CODE (XEXP (disp
, 0)) == PLUS
14864 && CONST_INT_P (XEXP (XEXP (disp
, 0), 1)))
14866 offset
= XEXP (XEXP (disp
, 0), 1);
14867 disp
= gen_rtx_CONST (VOIDmode
,
14868 XEXP (XEXP (disp
, 0), 0));
14872 output_pic_addr_const (file
, disp
, 0);
14873 else if (GET_CODE (disp
) == LABEL_REF
)
14874 output_asm_label (disp
);
14875 else if (CONST_INT_P (disp
))
14878 output_addr_const (file
, disp
);
14884 print_reg (base
, code
, file
);
14887 if (INTVAL (offset
) >= 0)
14889 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (offset
));
14893 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (offset
));
14900 print_reg (index
, vsib
? 0 : code
, file
);
14901 if (scale
!= 1 || vsib
)
14902 fprintf (file
, "*%d", scale
);
14909 /* Implementation of TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA. */
14912 i386_asm_output_addr_const_extra (FILE *file
, rtx x
)
14916 if (GET_CODE (x
) != UNSPEC
)
14919 op
= XVECEXP (x
, 0, 0);
14920 switch (XINT (x
, 1))
14922 case UNSPEC_GOTTPOFF
:
14923 output_addr_const (file
, op
);
14924 /* FIXME: This might be @TPOFF in Sun ld. */
14925 fputs ("@gottpoff", file
);
14928 output_addr_const (file
, op
);
14929 fputs ("@tpoff", file
);
14931 case UNSPEC_NTPOFF
:
14932 output_addr_const (file
, op
);
14934 fputs ("@tpoff", file
);
14936 fputs ("@ntpoff", file
);
14938 case UNSPEC_DTPOFF
:
14939 output_addr_const (file
, op
);
14940 fputs ("@dtpoff", file
);
14942 case UNSPEC_GOTNTPOFF
:
14943 output_addr_const (file
, op
);
14945 fputs (ASSEMBLER_DIALECT
== ASM_ATT
?
14946 "@gottpoff(%rip)" : "@gottpoff[rip]", file
);
14948 fputs ("@gotntpoff", file
);
14950 case UNSPEC_INDNTPOFF
:
14951 output_addr_const (file
, op
);
14952 fputs ("@indntpoff", file
);
14955 case UNSPEC_MACHOPIC_OFFSET
:
14956 output_addr_const (file
, op
);
14958 machopic_output_function_base_name (file
);
14962 case UNSPEC_STACK_CHECK
:
14966 gcc_assert (flag_split_stack
);
14968 #ifdef TARGET_THREAD_SPLIT_STACK_OFFSET
14969 offset
= TARGET_THREAD_SPLIT_STACK_OFFSET
;
14971 gcc_unreachable ();
14974 fprintf (file
, "%s:%d", TARGET_64BIT
? "%fs" : "%gs", offset
);
14985 /* Split one or more double-mode RTL references into pairs of half-mode
14986 references. The RTL can be REG, offsettable MEM, integer constant, or
14987 CONST_DOUBLE. "operands" is a pointer to an array of double-mode RTLs to
14988 split and "num" is its length. lo_half and hi_half are output arrays
14989 that parallel "operands". */
14992 split_double_mode (enum machine_mode mode
, rtx operands
[],
14993 int num
, rtx lo_half
[], rtx hi_half
[])
14995 enum machine_mode half_mode
;
15001 half_mode
= DImode
;
15004 half_mode
= SImode
;
15007 gcc_unreachable ();
15010 byte
= GET_MODE_SIZE (half_mode
);
15014 rtx op
= operands
[num
];
15016 /* simplify_subreg refuse to split volatile memory addresses,
15017 but we still have to handle it. */
15020 lo_half
[num
] = adjust_address (op
, half_mode
, 0);
15021 hi_half
[num
] = adjust_address (op
, half_mode
, byte
);
15025 lo_half
[num
] = simplify_gen_subreg (half_mode
, op
,
15026 GET_MODE (op
) == VOIDmode
15027 ? mode
: GET_MODE (op
), 0);
15028 hi_half
[num
] = simplify_gen_subreg (half_mode
, op
,
15029 GET_MODE (op
) == VOIDmode
15030 ? mode
: GET_MODE (op
), byte
);
15035 /* Output code to perform a 387 binary operation in INSN, one of PLUS,
15036 MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
15037 is the expression of the binary operation. The output may either be
15038 emitted here, or returned to the caller, like all output_* functions.
15040 There is no guarantee that the operands are the same mode, as they
15041 might be within FLOAT or FLOAT_EXTEND expressions. */
15043 #ifndef SYSV386_COMPAT
15044 /* Set to 1 for compatibility with brain-damaged assemblers. No-one
15045 wants to fix the assemblers because that causes incompatibility
15046 with gcc. No-one wants to fix gcc because that causes
15047 incompatibility with assemblers... You can use the option of
15048 -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
15049 #define SYSV386_COMPAT 1
15053 output_387_binary_op (rtx insn
, rtx
*operands
)
15055 static char buf
[40];
15058 int is_sse
= SSE_REG_P (operands
[0]) || SSE_REG_P (operands
[1]) || SSE_REG_P (operands
[2]);
15060 #ifdef ENABLE_CHECKING
15061 /* Even if we do not want to check the inputs, this documents input
15062 constraints. Which helps in understanding the following code. */
15063 if (STACK_REG_P (operands
[0])
15064 && ((REG_P (operands
[1])
15065 && REGNO (operands
[0]) == REGNO (operands
[1])
15066 && (STACK_REG_P (operands
[2]) || MEM_P (operands
[2])))
15067 || (REG_P (operands
[2])
15068 && REGNO (operands
[0]) == REGNO (operands
[2])
15069 && (STACK_REG_P (operands
[1]) || MEM_P (operands
[1]))))
15070 && (STACK_TOP_P (operands
[1]) || STACK_TOP_P (operands
[2])))
15073 gcc_assert (is_sse
);
15076 switch (GET_CODE (operands
[3]))
15079 if (GET_MODE_CLASS (GET_MODE (operands
[1])) == MODE_INT
15080 || GET_MODE_CLASS (GET_MODE (operands
[2])) == MODE_INT
)
15088 if (GET_MODE_CLASS (GET_MODE (operands
[1])) == MODE_INT
15089 || GET_MODE_CLASS (GET_MODE (operands
[2])) == MODE_INT
)
15097 if (GET_MODE_CLASS (GET_MODE (operands
[1])) == MODE_INT
15098 || GET_MODE_CLASS (GET_MODE (operands
[2])) == MODE_INT
)
15106 if (GET_MODE_CLASS (GET_MODE (operands
[1])) == MODE_INT
15107 || GET_MODE_CLASS (GET_MODE (operands
[2])) == MODE_INT
)
15115 gcc_unreachable ();
15122 strcpy (buf
, ssep
);
15123 if (GET_MODE (operands
[0]) == SFmode
)
15124 strcat (buf
, "ss\t{%2, %1, %0|%0, %1, %2}");
15126 strcat (buf
, "sd\t{%2, %1, %0|%0, %1, %2}");
15130 strcpy (buf
, ssep
+ 1);
15131 if (GET_MODE (operands
[0]) == SFmode
)
15132 strcat (buf
, "ss\t{%2, %0|%0, %2}");
15134 strcat (buf
, "sd\t{%2, %0|%0, %2}");
15140 switch (GET_CODE (operands
[3]))
15144 if (REG_P (operands
[2]) && REGNO (operands
[0]) == REGNO (operands
[2]))
15146 rtx temp
= operands
[2];
15147 operands
[2] = operands
[1];
15148 operands
[1] = temp
;
15151 /* know operands[0] == operands[1]. */
15153 if (MEM_P (operands
[2]))
15159 if (find_regno_note (insn
, REG_DEAD
, REGNO (operands
[2])))
15161 if (STACK_TOP_P (operands
[0]))
15162 /* How is it that we are storing to a dead operand[2]?
15163 Well, presumably operands[1] is dead too. We can't
15164 store the result to st(0) as st(0) gets popped on this
15165 instruction. Instead store to operands[2] (which I
15166 think has to be st(1)). st(1) will be popped later.
15167 gcc <= 2.8.1 didn't have this check and generated
15168 assembly code that the Unixware assembler rejected. */
15169 p
= "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
15171 p
= "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
15175 if (STACK_TOP_P (operands
[0]))
15176 p
= "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
15178 p
= "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
15183 if (MEM_P (operands
[1]))
15189 if (MEM_P (operands
[2]))
15195 if (find_regno_note (insn
, REG_DEAD
, REGNO (operands
[2])))
15198 /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
15199 derived assemblers, confusingly reverse the direction of
15200 the operation for fsub{r} and fdiv{r} when the
15201 destination register is not st(0). The Intel assembler
15202 doesn't have this brain damage. Read !SYSV386_COMPAT to
15203 figure out what the hardware really does. */
15204 if (STACK_TOP_P (operands
[0]))
15205 p
= "{p\t%0, %2|rp\t%2, %0}";
15207 p
= "{rp\t%2, %0|p\t%0, %2}";
15209 if (STACK_TOP_P (operands
[0]))
15210 /* As above for fmul/fadd, we can't store to st(0). */
15211 p
= "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
15213 p
= "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
15218 if (find_regno_note (insn
, REG_DEAD
, REGNO (operands
[1])))
15221 if (STACK_TOP_P (operands
[0]))
15222 p
= "{rp\t%0, %1|p\t%1, %0}";
15224 p
= "{p\t%1, %0|rp\t%0, %1}";
15226 if (STACK_TOP_P (operands
[0]))
15227 p
= "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
15229 p
= "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
15234 if (STACK_TOP_P (operands
[0]))
15236 if (STACK_TOP_P (operands
[1]))
15237 p
= "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
15239 p
= "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
15242 else if (STACK_TOP_P (operands
[1]))
15245 p
= "{\t%1, %0|r\t%0, %1}";
15247 p
= "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
15253 p
= "{r\t%2, %0|\t%0, %2}";
15255 p
= "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
15261 gcc_unreachable ();
15268 /* Check if a 256bit AVX register is referenced inside of EXP. */
15271 ix86_check_avx256_register (rtx
*pexp
, void *data ATTRIBUTE_UNUSED
)
15275 if (GET_CODE (exp
) == SUBREG
)
15276 exp
= SUBREG_REG (exp
);
15279 && VALID_AVX256_REG_OR_OI_MODE (GET_MODE (exp
)))
15285 /* Return needed mode for entity in optimize_mode_switching pass. */
15288 ix86_avx_u128_mode_needed (rtx insn
)
15294 /* Needed mode is set to AVX_U128_CLEAN if there are
15295 no 256bit modes used in function arguments. */
15296 for (link
= CALL_INSN_FUNCTION_USAGE (insn
);
15298 link
= XEXP (link
, 1))
15300 if (GET_CODE (XEXP (link
, 0)) == USE
)
15302 rtx arg
= XEXP (XEXP (link
, 0), 0);
15304 if (ix86_check_avx256_register (&arg
, NULL
))
15305 return AVX_U128_ANY
;
15309 return AVX_U128_CLEAN
;
15312 /* Require DIRTY mode if a 256bit AVX register is referenced. Hardware
15313 changes state only when a 256bit register is written to, but we need
15314 to prevent the compiler from moving optimal insertion point above
15315 eventual read from 256bit register. */
15316 if (for_each_rtx (&PATTERN (insn
), ix86_check_avx256_register
, NULL
))
15317 return AVX_U128_DIRTY
;
15319 return AVX_U128_ANY
;
15322 /* Return mode that i387 must be switched into
15323 prior to the execution of insn. */
15326 ix86_i387_mode_needed (int entity
, rtx insn
)
15328 enum attr_i387_cw mode
;
15330 /* The mode UNINITIALIZED is used to store control word after a
15331 function call or ASM pattern. The mode ANY specify that function
15332 has no requirements on the control word and make no changes in the
15333 bits we are interested in. */
15336 || (NONJUMP_INSN_P (insn
)
15337 && (asm_noperands (PATTERN (insn
)) >= 0
15338 || GET_CODE (PATTERN (insn
)) == ASM_INPUT
)))
15339 return I387_CW_UNINITIALIZED
;
15341 if (recog_memoized (insn
) < 0)
15342 return I387_CW_ANY
;
15344 mode
= get_attr_i387_cw (insn
);
15349 if (mode
== I387_CW_TRUNC
)
15354 if (mode
== I387_CW_FLOOR
)
15359 if (mode
== I387_CW_CEIL
)
15364 if (mode
== I387_CW_MASK_PM
)
15369 gcc_unreachable ();
15372 return I387_CW_ANY
;
15375 /* Return mode that entity must be switched into
15376 prior to the execution of insn. */
15379 ix86_mode_needed (int entity
, rtx insn
)
15384 return ix86_avx_u128_mode_needed (insn
);
15389 return ix86_i387_mode_needed (entity
, insn
);
15391 gcc_unreachable ();
15396 /* Check if a 256bit AVX register is referenced in stores. */
15399 ix86_check_avx256_stores (rtx dest
, const_rtx set ATTRIBUTE_UNUSED
, void *data
)
15401 if (ix86_check_avx256_register (&dest
, NULL
))
15403 bool *used
= (bool *) data
;
15408 /* Calculate mode of upper 128bit AVX registers after the insn. */
15411 ix86_avx_u128_mode_after (int mode
, rtx insn
)
15413 rtx pat
= PATTERN (insn
);
15415 if (vzeroupper_operation (pat
, VOIDmode
)
15416 || vzeroall_operation (pat
, VOIDmode
))
15417 return AVX_U128_CLEAN
;
15419 /* We know that state is clean after CALL insn if there are no
15420 256bit registers used in the function return register. */
15423 bool avx_reg256_found
= false;
15424 note_stores (pat
, ix86_check_avx256_stores
, &avx_reg256_found
);
15425 if (!avx_reg256_found
)
15426 return AVX_U128_CLEAN
;
15429 /* Otherwise, return current mode. Remember that if insn
15430 references AVX 256bit registers, the mode was already changed
15431 to DIRTY from MODE_NEEDED. */
15435 /* Return the mode that an insn results in. */
15438 ix86_mode_after (int entity
, int mode
, rtx insn
)
15443 return ix86_avx_u128_mode_after (mode
, insn
);
15450 gcc_unreachable ();
15455 ix86_avx_u128_mode_entry (void)
15459 /* Entry mode is set to AVX_U128_DIRTY if there are
15460 256bit modes used in function arguments. */
15461 for (arg
= DECL_ARGUMENTS (current_function_decl
); arg
;
15462 arg
= TREE_CHAIN (arg
))
15464 rtx incoming
= DECL_INCOMING_RTL (arg
);
15466 if (incoming
&& ix86_check_avx256_register (&incoming
, NULL
))
15467 return AVX_U128_DIRTY
;
15470 return AVX_U128_CLEAN
;
15473 /* Return a mode that ENTITY is assumed to be
15474 switched to at function entry. */
15477 ix86_mode_entry (int entity
)
15482 return ix86_avx_u128_mode_entry ();
15487 return I387_CW_ANY
;
15489 gcc_unreachable ();
15494 ix86_avx_u128_mode_exit (void)
15496 rtx reg
= crtl
->return_rtx
;
15498 /* Exit mode is set to AVX_U128_DIRTY if there are
15499 256bit modes used in the function return register. */
15500 if (reg
&& ix86_check_avx256_register (®
, NULL
))
15501 return AVX_U128_DIRTY
;
15503 return AVX_U128_CLEAN
;
15506 /* Return a mode that ENTITY is assumed to be
15507 switched to at function exit. */
15510 ix86_mode_exit (int entity
)
15515 return ix86_avx_u128_mode_exit ();
15520 return I387_CW_ANY
;
15522 gcc_unreachable ();
15526 /* Output code to initialize control word copies used by trunc?f?i and
15527 rounding patterns. CURRENT_MODE is set to current control word,
15528 while NEW_MODE is set to new control word. */
15531 emit_i387_cw_initialization (int mode
)
15533 rtx stored_mode
= assign_386_stack_local (HImode
, SLOT_CW_STORED
);
15536 enum ix86_stack_slot slot
;
15538 rtx reg
= gen_reg_rtx (HImode
);
15540 emit_insn (gen_x86_fnstcw_1 (stored_mode
));
15541 emit_move_insn (reg
, copy_rtx (stored_mode
));
15543 if (TARGET_64BIT
|| TARGET_PARTIAL_REG_STALL
15544 || optimize_function_for_size_p (cfun
))
15548 case I387_CW_TRUNC
:
15549 /* round toward zero (truncate) */
15550 emit_insn (gen_iorhi3 (reg
, reg
, GEN_INT (0x0c00)));
15551 slot
= SLOT_CW_TRUNC
;
15554 case I387_CW_FLOOR
:
15555 /* round down toward -oo */
15556 emit_insn (gen_andhi3 (reg
, reg
, GEN_INT (~0x0c00)));
15557 emit_insn (gen_iorhi3 (reg
, reg
, GEN_INT (0x0400)));
15558 slot
= SLOT_CW_FLOOR
;
15562 /* round up toward +oo */
15563 emit_insn (gen_andhi3 (reg
, reg
, GEN_INT (~0x0c00)));
15564 emit_insn (gen_iorhi3 (reg
, reg
, GEN_INT (0x0800)));
15565 slot
= SLOT_CW_CEIL
;
15568 case I387_CW_MASK_PM
:
15569 /* mask precision exception for nearbyint() */
15570 emit_insn (gen_iorhi3 (reg
, reg
, GEN_INT (0x0020)));
15571 slot
= SLOT_CW_MASK_PM
;
15575 gcc_unreachable ();
15582 case I387_CW_TRUNC
:
15583 /* round toward zero (truncate) */
15584 emit_insn (gen_movsi_insv_1 (reg
, GEN_INT (0xc)));
15585 slot
= SLOT_CW_TRUNC
;
15588 case I387_CW_FLOOR
:
15589 /* round down toward -oo */
15590 emit_insn (gen_movsi_insv_1 (reg
, GEN_INT (0x4)));
15591 slot
= SLOT_CW_FLOOR
;
15595 /* round up toward +oo */
15596 emit_insn (gen_movsi_insv_1 (reg
, GEN_INT (0x8)));
15597 slot
= SLOT_CW_CEIL
;
15600 case I387_CW_MASK_PM
:
15601 /* mask precision exception for nearbyint() */
15602 emit_insn (gen_iorhi3 (reg
, reg
, GEN_INT (0x0020)));
15603 slot
= SLOT_CW_MASK_PM
;
15607 gcc_unreachable ();
15611 gcc_assert (slot
< MAX_386_STACK_LOCALS
);
15613 new_mode
= assign_386_stack_local (HImode
, slot
);
15614 emit_move_insn (new_mode
, reg
);
15617 /* Emit vzeroupper. */
15620 ix86_avx_emit_vzeroupper (HARD_REG_SET regs_live
)
15624 /* Cancel automatic vzeroupper insertion if there are
15625 live call-saved SSE registers at the insertion point. */
15627 for (i
= FIRST_SSE_REG
; i
<= LAST_SSE_REG
; i
++)
15628 if (TEST_HARD_REG_BIT (regs_live
, i
) && !call_used_regs
[i
])
15632 for (i
= FIRST_REX_SSE_REG
; i
<= LAST_REX_SSE_REG
; i
++)
15633 if (TEST_HARD_REG_BIT (regs_live
, i
) && !call_used_regs
[i
])
15636 emit_insn (gen_avx_vzeroupper ());
15639 /* Generate one or more insns to set ENTITY to MODE. */
15642 ix86_emit_mode_set (int entity
, int mode
, HARD_REG_SET regs_live
)
15647 if (mode
== AVX_U128_CLEAN
)
15648 ix86_avx_emit_vzeroupper (regs_live
);
15654 if (mode
!= I387_CW_ANY
15655 && mode
!= I387_CW_UNINITIALIZED
)
15656 emit_i387_cw_initialization (mode
);
15659 gcc_unreachable ();
15663 /* Output code for INSN to convert a float to a signed int. OPERANDS
15664 are the insn operands. The output may be [HSD]Imode and the input
15665 operand may be [SDX]Fmode. */
15668 output_fix_trunc (rtx insn
, rtx
*operands
, bool fisttp
)
15670 int stack_top_dies
= find_regno_note (insn
, REG_DEAD
, FIRST_STACK_REG
) != 0;
15671 int dimode_p
= GET_MODE (operands
[0]) == DImode
;
15672 int round_mode
= get_attr_i387_cw (insn
);
15674 /* Jump through a hoop or two for DImode, since the hardware has no
15675 non-popping instruction. We used to do this a different way, but
15676 that was somewhat fragile and broke with post-reload splitters. */
15677 if ((dimode_p
|| fisttp
) && !stack_top_dies
)
15678 output_asm_insn ("fld\t%y1", operands
);
15680 gcc_assert (STACK_TOP_P (operands
[1]));
15681 gcc_assert (MEM_P (operands
[0]));
15682 gcc_assert (GET_MODE (operands
[1]) != TFmode
);
15685 output_asm_insn ("fisttp%Z0\t%0", operands
);
15688 if (round_mode
!= I387_CW_ANY
)
15689 output_asm_insn ("fldcw\t%3", operands
);
15690 if (stack_top_dies
|| dimode_p
)
15691 output_asm_insn ("fistp%Z0\t%0", operands
);
15693 output_asm_insn ("fist%Z0\t%0", operands
);
15694 if (round_mode
!= I387_CW_ANY
)
15695 output_asm_insn ("fldcw\t%2", operands
);
15701 /* Output code for x87 ffreep insn. The OPNO argument, which may only
15702 have the values zero or one, indicates the ffreep insn's operand
15703 from the OPERANDS array. */
15705 static const char *
15706 output_387_ffreep (rtx
*operands ATTRIBUTE_UNUSED
, int opno
)
15708 if (TARGET_USE_FFREEP
)
15709 #ifdef HAVE_AS_IX86_FFREEP
15710 return opno
? "ffreep\t%y1" : "ffreep\t%y0";
15713 static char retval
[32];
15714 int regno
= REGNO (operands
[opno
]);
15716 gcc_assert (STACK_REGNO_P (regno
));
15718 regno
-= FIRST_STACK_REG
;
15720 snprintf (retval
, sizeof (retval
), ASM_SHORT
"0xc%ddf", regno
);
15725 return opno
? "fstp\t%y1" : "fstp\t%y0";
15729 /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
15730 should be used. UNORDERED_P is true when fucom should be used. */
15733 output_fp_compare (rtx insn
, rtx
*operands
, bool eflags_p
, bool unordered_p
)
15735 int stack_top_dies
;
15736 rtx cmp_op0
, cmp_op1
;
15737 int is_sse
= SSE_REG_P (operands
[0]) || SSE_REG_P (operands
[1]);
15741 cmp_op0
= operands
[0];
15742 cmp_op1
= operands
[1];
15746 cmp_op0
= operands
[1];
15747 cmp_op1
= operands
[2];
15752 if (GET_MODE (operands
[0]) == SFmode
)
15754 return "%vucomiss\t{%1, %0|%0, %1}";
15756 return "%vcomiss\t{%1, %0|%0, %1}";
15759 return "%vucomisd\t{%1, %0|%0, %1}";
15761 return "%vcomisd\t{%1, %0|%0, %1}";
15764 gcc_assert (STACK_TOP_P (cmp_op0
));
15766 stack_top_dies
= find_regno_note (insn
, REG_DEAD
, FIRST_STACK_REG
) != 0;
15768 if (cmp_op1
== CONST0_RTX (GET_MODE (cmp_op1
)))
15770 if (stack_top_dies
)
15772 output_asm_insn ("ftst\n\tfnstsw\t%0", operands
);
15773 return output_387_ffreep (operands
, 1);
15776 return "ftst\n\tfnstsw\t%0";
15779 if (STACK_REG_P (cmp_op1
)
15781 && find_regno_note (insn
, REG_DEAD
, REGNO (cmp_op1
))
15782 && REGNO (cmp_op1
) != FIRST_STACK_REG
)
15784 /* If both the top of the 387 stack dies, and the other operand
15785 is also a stack register that dies, then this must be a
15786 `fcompp' float compare */
15790 /* There is no double popping fcomi variant. Fortunately,
15791 eflags is immune from the fstp's cc clobbering. */
15793 output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands
);
15795 output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands
);
15796 return output_387_ffreep (operands
, 0);
15801 return "fucompp\n\tfnstsw\t%0";
15803 return "fcompp\n\tfnstsw\t%0";
15808 /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */
15810 static const char * const alt
[16] =
15812 "fcom%Z2\t%y2\n\tfnstsw\t%0",
15813 "fcomp%Z2\t%y2\n\tfnstsw\t%0",
15814 "fucom%Z2\t%y2\n\tfnstsw\t%0",
15815 "fucomp%Z2\t%y2\n\tfnstsw\t%0",
15817 "ficom%Z2\t%y2\n\tfnstsw\t%0",
15818 "ficomp%Z2\t%y2\n\tfnstsw\t%0",
15822 "fcomi\t{%y1, %0|%0, %y1}",
15823 "fcomip\t{%y1, %0|%0, %y1}",
15824 "fucomi\t{%y1, %0|%0, %y1}",
15825 "fucomip\t{%y1, %0|%0, %y1}",
15836 mask
= eflags_p
<< 3;
15837 mask
|= (GET_MODE_CLASS (GET_MODE (cmp_op1
)) == MODE_INT
) << 2;
15838 mask
|= unordered_p
<< 1;
15839 mask
|= stack_top_dies
;
15841 gcc_assert (mask
< 16);
15850 ix86_output_addr_vec_elt (FILE *file
, int value
)
15852 const char *directive
= ASM_LONG
;
15856 directive
= ASM_QUAD
;
15858 gcc_assert (!TARGET_64BIT
);
15861 fprintf (file
, "%s%s%d\n", directive
, LPREFIX
, value
);
15865 ix86_output_addr_diff_elt (FILE *file
, int value
, int rel
)
15867 const char *directive
= ASM_LONG
;
15870 if (TARGET_64BIT
&& CASE_VECTOR_MODE
== DImode
)
15871 directive
= ASM_QUAD
;
15873 gcc_assert (!TARGET_64BIT
);
15875 /* We can't use @GOTOFF for text labels on VxWorks; see gotoff_operand. */
15876 if (TARGET_64BIT
|| TARGET_VXWORKS_RTP
)
15877 fprintf (file
, "%s%s%d-%s%d\n",
15878 directive
, LPREFIX
, value
, LPREFIX
, rel
);
15879 else if (HAVE_AS_GOTOFF_IN_DATA
)
15880 fprintf (file
, ASM_LONG
"%s%d@GOTOFF\n", LPREFIX
, value
);
15882 else if (TARGET_MACHO
)
15884 fprintf (file
, ASM_LONG
"%s%d-", LPREFIX
, value
);
15885 machopic_output_function_base_name (file
);
15890 asm_fprintf (file
, ASM_LONG
"%U%s+[.-%s%d]\n",
15891 GOT_SYMBOL_NAME
, LPREFIX
, value
);
15894 /* Generate either "mov $0, reg" or "xor reg, reg", as appropriate
15898 ix86_expand_clear (rtx dest
)
15902 /* We play register width games, which are only valid after reload. */
15903 gcc_assert (reload_completed
);
15905 /* Avoid HImode and its attendant prefix byte. */
15906 if (GET_MODE_SIZE (GET_MODE (dest
)) < 4)
15907 dest
= gen_rtx_REG (SImode
, REGNO (dest
));
15908 tmp
= gen_rtx_SET (VOIDmode
, dest
, const0_rtx
);
15910 /* This predicate should match that for movsi_xor and movdi_xor_rex64. */
15911 if (!TARGET_USE_MOV0
|| optimize_insn_for_speed_p ())
15913 rtx clob
= gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (CCmode
, FLAGS_REG
));
15914 tmp
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, tmp
, clob
));
15920 /* X is an unchanging MEM. If it is a constant pool reference, return
15921 the constant pool rtx, else NULL. */
15924 maybe_get_pool_constant (rtx x
)
15926 x
= ix86_delegitimize_address (XEXP (x
, 0));
15928 if (GET_CODE (x
) == SYMBOL_REF
&& CONSTANT_POOL_ADDRESS_P (x
))
15929 return get_pool_constant (x
);
15935 ix86_expand_move (enum machine_mode mode
, rtx operands
[])
15938 enum tls_model model
;
15943 if (GET_CODE (op1
) == SYMBOL_REF
)
15945 model
= SYMBOL_REF_TLS_MODEL (op1
);
15948 op1
= legitimize_tls_address (op1
, model
, true);
15949 op1
= force_operand (op1
, op0
);
15952 op1
= convert_to_mode (mode
, op1
, 1);
15954 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
15955 && SYMBOL_REF_DLLIMPORT_P (op1
))
15956 op1
= legitimize_dllimport_symbol (op1
, false);
15958 else if (GET_CODE (op1
) == CONST
15959 && GET_CODE (XEXP (op1
, 0)) == PLUS
15960 && GET_CODE (XEXP (XEXP (op1
, 0), 0)) == SYMBOL_REF
)
15962 rtx addend
= XEXP (XEXP (op1
, 0), 1);
15963 rtx symbol
= XEXP (XEXP (op1
, 0), 0);
15966 model
= SYMBOL_REF_TLS_MODEL (symbol
);
15968 tmp
= legitimize_tls_address (symbol
, model
, true);
15969 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
15970 && SYMBOL_REF_DLLIMPORT_P (symbol
))
15971 tmp
= legitimize_dllimport_symbol (symbol
, true);
15975 tmp
= force_operand (tmp
, NULL
);
15976 tmp
= expand_simple_binop (Pmode
, PLUS
, tmp
, addend
,
15977 op0
, 1, OPTAB_DIRECT
);
15980 op1
= convert_to_mode (mode
, tmp
, 1);
15984 if ((flag_pic
|| MACHOPIC_INDIRECT
)
15985 && symbolic_operand (op1
, mode
))
15987 if (TARGET_MACHO
&& !TARGET_64BIT
)
15990 /* dynamic-no-pic */
15991 if (MACHOPIC_INDIRECT
)
15993 rtx temp
= ((reload_in_progress
15994 || ((op0
&& REG_P (op0
))
15996 ? op0
: gen_reg_rtx (Pmode
));
15997 op1
= machopic_indirect_data_reference (op1
, temp
);
15999 op1
= machopic_legitimize_pic_address (op1
, mode
,
16000 temp
== op1
? 0 : temp
);
16002 if (op0
!= op1
&& GET_CODE (op0
) != MEM
)
16004 rtx insn
= gen_rtx_SET (VOIDmode
, op0
, op1
);
16008 if (GET_CODE (op0
) == MEM
)
16009 op1
= force_reg (Pmode
, op1
);
16013 if (GET_CODE (temp
) != REG
)
16014 temp
= gen_reg_rtx (Pmode
);
16015 temp
= legitimize_pic_address (op1
, temp
);
16020 /* dynamic-no-pic */
16026 op1
= force_reg (mode
, op1
);
16027 else if (!(TARGET_64BIT
&& x86_64_movabs_operand (op1
, DImode
)))
16029 rtx reg
= can_create_pseudo_p () ? NULL_RTX
: op0
;
16030 op1
= legitimize_pic_address (op1
, reg
);
16033 op1
= convert_to_mode (mode
, op1
, 1);
16040 && (PUSH_ROUNDING (GET_MODE_SIZE (mode
)) != GET_MODE_SIZE (mode
)
16041 || !push_operand (op0
, mode
))
16043 op1
= force_reg (mode
, op1
);
16045 if (push_operand (op0
, mode
)
16046 && ! general_no_elim_operand (op1
, mode
))
16047 op1
= copy_to_mode_reg (mode
, op1
);
16049 /* Force large constants in 64bit compilation into register
16050 to get them CSEed. */
16051 if (can_create_pseudo_p ()
16052 && (mode
== DImode
) && TARGET_64BIT
16053 && immediate_operand (op1
, mode
)
16054 && !x86_64_zext_immediate_operand (op1
, VOIDmode
)
16055 && !register_operand (op0
, mode
)
16057 op1
= copy_to_mode_reg (mode
, op1
);
16059 if (can_create_pseudo_p ()
16060 && FLOAT_MODE_P (mode
)
16061 && GET_CODE (op1
) == CONST_DOUBLE
)
16063 /* If we are loading a floating point constant to a register,
16064 force the value to memory now, since we'll get better code
16065 out the back end. */
16067 op1
= validize_mem (force_const_mem (mode
, op1
));
16068 if (!register_operand (op0
, mode
))
16070 rtx temp
= gen_reg_rtx (mode
);
16071 emit_insn (gen_rtx_SET (VOIDmode
, temp
, op1
));
16072 emit_move_insn (op0
, temp
);
16078 emit_insn (gen_rtx_SET (VOIDmode
, op0
, op1
));
16082 ix86_expand_vector_move (enum machine_mode mode
, rtx operands
[])
16084 rtx op0
= operands
[0], op1
= operands
[1];
16085 unsigned int align
= GET_MODE_ALIGNMENT (mode
);
16087 /* Force constants other than zero into memory. We do not know how
16088 the instructions used to build constants modify the upper 64 bits
16089 of the register, once we have that information we may be able
16090 to handle some of them more efficiently. */
16091 if (can_create_pseudo_p ()
16092 && register_operand (op0
, mode
)
16093 && (CONSTANT_P (op1
)
16094 || (GET_CODE (op1
) == SUBREG
16095 && CONSTANT_P (SUBREG_REG (op1
))))
16096 && !standard_sse_constant_p (op1
))
16097 op1
= validize_mem (force_const_mem (mode
, op1
));
16099 /* We need to check memory alignment for SSE mode since attribute
16100 can make operands unaligned. */
16101 if (can_create_pseudo_p ()
16102 && SSE_REG_MODE_P (mode
)
16103 && ((MEM_P (op0
) && (MEM_ALIGN (op0
) < align
))
16104 || (MEM_P (op1
) && (MEM_ALIGN (op1
) < align
))))
16108 /* ix86_expand_vector_move_misalign() does not like constants ... */
16109 if (CONSTANT_P (op1
)
16110 || (GET_CODE (op1
) == SUBREG
16111 && CONSTANT_P (SUBREG_REG (op1
))))
16112 op1
= validize_mem (force_const_mem (mode
, op1
));
16114 /* ... nor both arguments in memory. */
16115 if (!register_operand (op0
, mode
)
16116 && !register_operand (op1
, mode
))
16117 op1
= force_reg (mode
, op1
);
16119 tmp
[0] = op0
; tmp
[1] = op1
;
16120 ix86_expand_vector_move_misalign (mode
, tmp
);
16124 /* Make operand1 a register if it isn't already. */
16125 if (can_create_pseudo_p ()
16126 && !register_operand (op0
, mode
)
16127 && !register_operand (op1
, mode
))
16129 emit_move_insn (op0
, force_reg (GET_MODE (op0
), op1
));
16133 emit_insn (gen_rtx_SET (VOIDmode
, op0
, op1
));
16136 /* Split 32-byte AVX unaligned load and store if needed. */
16139 ix86_avx256_split_vector_move_misalign (rtx op0
, rtx op1
)
16142 rtx (*extract
) (rtx
, rtx
, rtx
);
16143 rtx (*load_unaligned
) (rtx
, rtx
);
16144 rtx (*store_unaligned
) (rtx
, rtx
);
16145 enum machine_mode mode
;
16147 switch (GET_MODE (op0
))
16150 gcc_unreachable ();
16152 extract
= gen_avx_vextractf128v32qi
;
16153 load_unaligned
= gen_avx_loaddqu256
;
16154 store_unaligned
= gen_avx_storedqu256
;
16158 extract
= gen_avx_vextractf128v8sf
;
16159 load_unaligned
= gen_avx_loadups256
;
16160 store_unaligned
= gen_avx_storeups256
;
16164 extract
= gen_avx_vextractf128v4df
;
16165 load_unaligned
= gen_avx_loadupd256
;
16166 store_unaligned
= gen_avx_storeupd256
;
16173 if (TARGET_AVX256_SPLIT_UNALIGNED_LOAD
)
16175 rtx r
= gen_reg_rtx (mode
);
16176 m
= adjust_address (op1
, mode
, 0);
16177 emit_move_insn (r
, m
);
16178 m
= adjust_address (op1
, mode
, 16);
16179 r
= gen_rtx_VEC_CONCAT (GET_MODE (op0
), r
, m
);
16180 emit_move_insn (op0
, r
);
16183 emit_insn (load_unaligned (op0
, op1
));
16185 else if (MEM_P (op0
))
16187 if (TARGET_AVX256_SPLIT_UNALIGNED_STORE
)
16189 m
= adjust_address (op0
, mode
, 0);
16190 emit_insn (extract (m
, op1
, const0_rtx
));
16191 m
= adjust_address (op0
, mode
, 16);
16192 emit_insn (extract (m
, op1
, const1_rtx
));
16195 emit_insn (store_unaligned (op0
, op1
));
16198 gcc_unreachable ();
16201 /* Implement the movmisalign patterns for SSE. Non-SSE modes go
16202 straight to ix86_expand_vector_move. */
16203 /* Code generation for scalar reg-reg moves of single and double precision data:
16204 if (x86_sse_partial_reg_dependency == true | x86_sse_split_regs == true)
16208 if (x86_sse_partial_reg_dependency == true)
16213 Code generation for scalar loads of double precision data:
16214 if (x86_sse_split_regs == true)
16215 movlpd mem, reg (gas syntax)
16219 Code generation for unaligned packed loads of single precision data
16220 (x86_sse_unaligned_move_optimal overrides x86_sse_partial_reg_dependency):
16221 if (x86_sse_unaligned_move_optimal)
16224 if (x86_sse_partial_reg_dependency == true)
16236 Code generation for unaligned packed loads of double precision data
16237 (x86_sse_unaligned_move_optimal overrides x86_sse_split_regs):
16238 if (x86_sse_unaligned_move_optimal)
16241 if (x86_sse_split_regs == true)
16254 ix86_expand_vector_move_misalign (enum machine_mode mode
, rtx operands
[])
16262 && GET_MODE_SIZE (mode
) == 32)
16264 switch (GET_MODE_CLASS (mode
))
16266 case MODE_VECTOR_INT
:
16268 op0
= gen_lowpart (V32QImode
, op0
);
16269 op1
= gen_lowpart (V32QImode
, op1
);
16272 case MODE_VECTOR_FLOAT
:
16273 ix86_avx256_split_vector_move_misalign (op0
, op1
);
16277 gcc_unreachable ();
16285 /* ??? If we have typed data, then it would appear that using
16286 movdqu is the only way to get unaligned data loaded with
16288 if (TARGET_SSE2
&& GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
16290 op0
= gen_lowpart (V16QImode
, op0
);
16291 op1
= gen_lowpart (V16QImode
, op1
);
16292 /* We will eventually emit movups based on insn attributes. */
16293 emit_insn (gen_sse2_loaddqu (op0
, op1
));
16295 else if (TARGET_SSE2
&& mode
== V2DFmode
)
16300 || TARGET_SSE_UNALIGNED_LOAD_OPTIMAL
16301 || TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL
16302 || optimize_function_for_size_p (cfun
))
16304 /* We will eventually emit movups based on insn attributes. */
16305 emit_insn (gen_sse2_loadupd (op0
, op1
));
16309 /* When SSE registers are split into halves, we can avoid
16310 writing to the top half twice. */
16311 if (TARGET_SSE_SPLIT_REGS
)
16313 emit_clobber (op0
);
16318 /* ??? Not sure about the best option for the Intel chips.
16319 The following would seem to satisfy; the register is
16320 entirely cleared, breaking the dependency chain. We
16321 then store to the upper half, with a dependency depth
16322 of one. A rumor has it that Intel recommends two movsd
16323 followed by an unpacklpd, but this is unconfirmed. And
16324 given that the dependency depth of the unpacklpd would
16325 still be one, I'm not sure why this would be better. */
16326 zero
= CONST0_RTX (V2DFmode
);
16329 m
= adjust_address (op1
, DFmode
, 0);
16330 emit_insn (gen_sse2_loadlpd (op0
, zero
, m
));
16331 m
= adjust_address (op1
, DFmode
, 8);
16332 emit_insn (gen_sse2_loadhpd (op0
, op0
, m
));
16337 || TARGET_SSE_UNALIGNED_LOAD_OPTIMAL
16338 || TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL
16339 || optimize_function_for_size_p (cfun
))
16341 op0
= gen_lowpart (V4SFmode
, op0
);
16342 op1
= gen_lowpart (V4SFmode
, op1
);
16343 emit_insn (gen_sse_loadups (op0
, op1
));
16347 if (TARGET_SSE_PARTIAL_REG_DEPENDENCY
)
16348 emit_move_insn (op0
, CONST0_RTX (mode
));
16350 emit_clobber (op0
);
16352 if (mode
!= V4SFmode
)
16353 op0
= gen_lowpart (V4SFmode
, op0
);
16355 m
= adjust_address (op1
, V2SFmode
, 0);
16356 emit_insn (gen_sse_loadlps (op0
, op0
, m
));
16357 m
= adjust_address (op1
, V2SFmode
, 8);
16358 emit_insn (gen_sse_loadhps (op0
, op0
, m
));
16361 else if (MEM_P (op0
))
16363 if (TARGET_SSE2
&& GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
16365 op0
= gen_lowpart (V16QImode
, op0
);
16366 op1
= gen_lowpart (V16QImode
, op1
);
16367 /* We will eventually emit movups based on insn attributes. */
16368 emit_insn (gen_sse2_storedqu (op0
, op1
));
16370 else if (TARGET_SSE2
&& mode
== V2DFmode
)
16373 || TARGET_SSE_UNALIGNED_STORE_OPTIMAL
16374 || TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL
16375 || optimize_function_for_size_p (cfun
))
16376 /* We will eventually emit movups based on insn attributes. */
16377 emit_insn (gen_sse2_storeupd (op0
, op1
));
16380 m
= adjust_address (op0
, DFmode
, 0);
16381 emit_insn (gen_sse2_storelpd (m
, op1
));
16382 m
= adjust_address (op0
, DFmode
, 8);
16383 emit_insn (gen_sse2_storehpd (m
, op1
));
16388 if (mode
!= V4SFmode
)
16389 op1
= gen_lowpart (V4SFmode
, op1
);
16392 || TARGET_SSE_UNALIGNED_STORE_OPTIMAL
16393 || TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL
16394 || optimize_function_for_size_p (cfun
))
16396 op0
= gen_lowpart (V4SFmode
, op0
);
16397 emit_insn (gen_sse_storeups (op0
, op1
));
16401 m
= adjust_address (op0
, V2SFmode
, 0);
16402 emit_insn (gen_sse_storelps (m
, op1
));
16403 m
= adjust_address (op0
, V2SFmode
, 8);
16404 emit_insn (gen_sse_storehps (m
, op1
));
16409 gcc_unreachable ();
16412 /* Expand a push in MODE. This is some mode for which we do not support
16413 proper push instructions, at least from the registers that we expect
16414 the value to live in. */
16417 ix86_expand_push (enum machine_mode mode
, rtx x
)
16421 tmp
= expand_simple_binop (Pmode
, PLUS
, stack_pointer_rtx
,
16422 GEN_INT (-GET_MODE_SIZE (mode
)),
16423 stack_pointer_rtx
, 1, OPTAB_DIRECT
);
16424 if (tmp
!= stack_pointer_rtx
)
16425 emit_move_insn (stack_pointer_rtx
, tmp
);
16427 tmp
= gen_rtx_MEM (mode
, stack_pointer_rtx
);
16429 /* When we push an operand onto stack, it has to be aligned at least
16430 at the function argument boundary. However since we don't have
16431 the argument type, we can't determine the actual argument
16433 emit_move_insn (tmp
, x
);
16436 /* Helper function of ix86_fixup_binary_operands to canonicalize
16437 operand order. Returns true if the operands should be swapped. */
16440 ix86_swap_binary_operands_p (enum rtx_code code
, enum machine_mode mode
,
16443 rtx dst
= operands
[0];
16444 rtx src1
= operands
[1];
16445 rtx src2
= operands
[2];
16447 /* If the operation is not commutative, we can't do anything. */
16448 if (GET_RTX_CLASS (code
) != RTX_COMM_ARITH
)
16451 /* Highest priority is that src1 should match dst. */
16452 if (rtx_equal_p (dst
, src1
))
16454 if (rtx_equal_p (dst
, src2
))
16457 /* Next highest priority is that immediate constants come second. */
16458 if (immediate_operand (src2
, mode
))
16460 if (immediate_operand (src1
, mode
))
16463 /* Lowest priority is that memory references should come second. */
16473 /* Fix up OPERANDS to satisfy ix86_binary_operator_ok. Return the
16474 destination to use for the operation. If different from the true
16475 destination in operands[0], a copy operation will be required. */
16478 ix86_fixup_binary_operands (enum rtx_code code
, enum machine_mode mode
,
16481 rtx dst
= operands
[0];
16482 rtx src1
= operands
[1];
16483 rtx src2
= operands
[2];
16485 /* Canonicalize operand order. */
16486 if (ix86_swap_binary_operands_p (code
, mode
, operands
))
16490 /* It is invalid to swap operands of different modes. */
16491 gcc_assert (GET_MODE (src1
) == GET_MODE (src2
));
16498 /* Both source operands cannot be in memory. */
16499 if (MEM_P (src1
) && MEM_P (src2
))
16501 /* Optimization: Only read from memory once. */
16502 if (rtx_equal_p (src1
, src2
))
16504 src2
= force_reg (mode
, src2
);
16508 src2
= force_reg (mode
, src2
);
16511 /* If the destination is memory, and we do not have matching source
16512 operands, do things in registers. */
16513 if (MEM_P (dst
) && !rtx_equal_p (dst
, src1
))
16514 dst
= gen_reg_rtx (mode
);
16516 /* Source 1 cannot be a constant. */
16517 if (CONSTANT_P (src1
))
16518 src1
= force_reg (mode
, src1
);
16520 /* Source 1 cannot be a non-matching memory. */
16521 if (MEM_P (src1
) && !rtx_equal_p (dst
, src1
))
16522 src1
= force_reg (mode
, src1
);
16524 /* Improve address combine. */
16526 && GET_MODE_CLASS (mode
) == MODE_INT
16528 src2
= force_reg (mode
, src2
);
16530 operands
[1] = src1
;
16531 operands
[2] = src2
;
16535 /* Similarly, but assume that the destination has already been
16536 set up properly. */
16539 ix86_fixup_binary_operands_no_copy (enum rtx_code code
,
16540 enum machine_mode mode
, rtx operands
[])
16542 rtx dst
= ix86_fixup_binary_operands (code
, mode
, operands
);
16543 gcc_assert (dst
== operands
[0]);
16546 /* Attempt to expand a binary operator. Make the expansion closer to the
16547 actual machine, then just general_operand, which will allow 3 separate
16548 memory references (one output, two input) in a single insn. */
16551 ix86_expand_binary_operator (enum rtx_code code
, enum machine_mode mode
,
16554 rtx src1
, src2
, dst
, op
, clob
;
16556 dst
= ix86_fixup_binary_operands (code
, mode
, operands
);
16557 src1
= operands
[1];
16558 src2
= operands
[2];
16560 /* Emit the instruction. */
16562 op
= gen_rtx_SET (VOIDmode
, dst
, gen_rtx_fmt_ee (code
, mode
, src1
, src2
));
16563 if (reload_in_progress
)
16565 /* Reload doesn't know about the flags register, and doesn't know that
16566 it doesn't want to clobber it. We can only do this with PLUS. */
16567 gcc_assert (code
== PLUS
);
16570 else if (reload_completed
16572 && !rtx_equal_p (dst
, src1
))
16574 /* This is going to be an LEA; avoid splitting it later. */
16579 clob
= gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (CCmode
, FLAGS_REG
));
16580 emit_insn (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, op
, clob
)));
16583 /* Fix up the destination if needed. */
16584 if (dst
!= operands
[0])
16585 emit_move_insn (operands
[0], dst
);
16588 /* Expand vector logical operation CODE (AND, IOR, XOR) in MODE with
16589 the given OPERANDS. */
16592 ix86_expand_vector_logical_operator (enum rtx_code code
, enum machine_mode mode
,
16595 rtx op1
= NULL_RTX
, op2
= NULL_RTX
;
16596 if (GET_CODE (operands
[1]) == SUBREG
)
16601 else if (GET_CODE (operands
[2]) == SUBREG
)
16606 /* Optimize (__m128i) d | (__m128i) e and similar code
16607 when d and e are float vectors into float vector logical
16608 insn. In C/C++ without using intrinsics there is no other way
16609 to express vector logical operation on float vectors than
16610 to cast them temporarily to integer vectors. */
16612 && !TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL
16613 && ((GET_CODE (op2
) == SUBREG
|| GET_CODE (op2
) == CONST_VECTOR
))
16614 && GET_MODE_CLASS (GET_MODE (SUBREG_REG (op1
))) == MODE_VECTOR_FLOAT
16615 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (op1
))) == GET_MODE_SIZE (mode
)
16616 && SUBREG_BYTE (op1
) == 0
16617 && (GET_CODE (op2
) == CONST_VECTOR
16618 || (GET_MODE (SUBREG_REG (op1
)) == GET_MODE (SUBREG_REG (op2
))
16619 && SUBREG_BYTE (op2
) == 0))
16620 && can_create_pseudo_p ())
16623 switch (GET_MODE (SUBREG_REG (op1
)))
16629 dst
= gen_reg_rtx (GET_MODE (SUBREG_REG (op1
)));
16630 if (GET_CODE (op2
) == CONST_VECTOR
)
16632 op2
= gen_lowpart (GET_MODE (dst
), op2
);
16633 op2
= force_reg (GET_MODE (dst
), op2
);
16638 op2
= SUBREG_REG (operands
[2]);
16639 if (!nonimmediate_operand (op2
, GET_MODE (dst
)))
16640 op2
= force_reg (GET_MODE (dst
), op2
);
16642 op1
= SUBREG_REG (op1
);
16643 if (!nonimmediate_operand (op1
, GET_MODE (dst
)))
16644 op1
= force_reg (GET_MODE (dst
), op1
);
16645 emit_insn (gen_rtx_SET (VOIDmode
, dst
,
16646 gen_rtx_fmt_ee (code
, GET_MODE (dst
),
16648 emit_move_insn (operands
[0], gen_lowpart (mode
, dst
));
16654 if (!nonimmediate_operand (operands
[1], mode
))
16655 operands
[1] = force_reg (mode
, operands
[1]);
16656 if (!nonimmediate_operand (operands
[2], mode
))
16657 operands
[2] = force_reg (mode
, operands
[2]);
16658 ix86_fixup_binary_operands_no_copy (code
, mode
, operands
);
16659 emit_insn (gen_rtx_SET (VOIDmode
, operands
[0],
16660 gen_rtx_fmt_ee (code
, mode
, operands
[1],
16664 /* Return TRUE or FALSE depending on whether the binary operator meets the
16665 appropriate constraints. */
16668 ix86_binary_operator_ok (enum rtx_code code
, enum machine_mode mode
,
16671 rtx dst
= operands
[0];
16672 rtx src1
= operands
[1];
16673 rtx src2
= operands
[2];
16675 /* Both source operands cannot be in memory. */
16676 if (MEM_P (src1
) && MEM_P (src2
))
16679 /* Canonicalize operand order for commutative operators. */
16680 if (ix86_swap_binary_operands_p (code
, mode
, operands
))
16687 /* If the destination is memory, we must have a matching source operand. */
16688 if (MEM_P (dst
) && !rtx_equal_p (dst
, src1
))
16691 /* Source 1 cannot be a constant. */
16692 if (CONSTANT_P (src1
))
16695 /* Source 1 cannot be a non-matching memory. */
16696 if (MEM_P (src1
) && !rtx_equal_p (dst
, src1
))
16697 /* Support "andhi/andsi/anddi" as a zero-extending move. */
16698 return (code
== AND
16701 || (TARGET_64BIT
&& mode
== DImode
))
16702 && satisfies_constraint_L (src2
));
16707 /* Attempt to expand a unary operator. Make the expansion closer to the
16708 actual machine, then just general_operand, which will allow 2 separate
16709 memory references (one output, one input) in a single insn. */
16712 ix86_expand_unary_operator (enum rtx_code code
, enum machine_mode mode
,
16715 int matching_memory
;
16716 rtx src
, dst
, op
, clob
;
16721 /* If the destination is memory, and we do not have matching source
16722 operands, do things in registers. */
16723 matching_memory
= 0;
16726 if (rtx_equal_p (dst
, src
))
16727 matching_memory
= 1;
16729 dst
= gen_reg_rtx (mode
);
16732 /* When source operand is memory, destination must match. */
16733 if (MEM_P (src
) && !matching_memory
)
16734 src
= force_reg (mode
, src
);
16736 /* Emit the instruction. */
16738 op
= gen_rtx_SET (VOIDmode
, dst
, gen_rtx_fmt_e (code
, mode
, src
));
16739 if (reload_in_progress
|| code
== NOT
)
16741 /* Reload doesn't know about the flags register, and doesn't know that
16742 it doesn't want to clobber it. */
16743 gcc_assert (code
== NOT
);
16748 clob
= gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (CCmode
, FLAGS_REG
));
16749 emit_insn (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, op
, clob
)));
16752 /* Fix up the destination if needed. */
16753 if (dst
!= operands
[0])
16754 emit_move_insn (operands
[0], dst
);
16757 /* Split 32bit/64bit divmod with 8bit unsigned divmod if dividend and
16758 divisor are within the range [0-255]. */
16761 ix86_split_idivmod (enum machine_mode mode
, rtx operands
[],
16764 rtx end_label
, qimode_label
;
16765 rtx insn
, div
, mod
;
16766 rtx scratch
, tmp0
, tmp1
, tmp2
;
16767 rtx (*gen_divmod4_1
) (rtx
, rtx
, rtx
, rtx
);
16768 rtx (*gen_zero_extend
) (rtx
, rtx
);
16769 rtx (*gen_test_ccno_1
) (rtx
, rtx
);
16774 gen_divmod4_1
= signed_p
? gen_divmodsi4_1
: gen_udivmodsi4_1
;
16775 gen_test_ccno_1
= gen_testsi_ccno_1
;
16776 gen_zero_extend
= gen_zero_extendqisi2
;
16779 gen_divmod4_1
= signed_p
? gen_divmoddi4_1
: gen_udivmoddi4_1
;
16780 gen_test_ccno_1
= gen_testdi_ccno_1
;
16781 gen_zero_extend
= gen_zero_extendqidi2
;
16784 gcc_unreachable ();
16787 end_label
= gen_label_rtx ();
16788 qimode_label
= gen_label_rtx ();
16790 scratch
= gen_reg_rtx (mode
);
16792 /* Use 8bit unsigned divimod if dividend and divisor are within
16793 the range [0-255]. */
16794 emit_move_insn (scratch
, operands
[2]);
16795 scratch
= expand_simple_binop (mode
, IOR
, scratch
, operands
[3],
16796 scratch
, 1, OPTAB_DIRECT
);
16797 emit_insn (gen_test_ccno_1 (scratch
, GEN_INT (-0x100)));
16798 tmp0
= gen_rtx_REG (CCNOmode
, FLAGS_REG
);
16799 tmp0
= gen_rtx_EQ (VOIDmode
, tmp0
, const0_rtx
);
16800 tmp0
= gen_rtx_IF_THEN_ELSE (VOIDmode
, tmp0
,
16801 gen_rtx_LABEL_REF (VOIDmode
, qimode_label
),
16803 insn
= emit_jump_insn (gen_rtx_SET (VOIDmode
, pc_rtx
, tmp0
));
16804 predict_jump (REG_BR_PROB_BASE
* 50 / 100);
16805 JUMP_LABEL (insn
) = qimode_label
;
16807 /* Generate original signed/unsigned divimod. */
16808 div
= gen_divmod4_1 (operands
[0], operands
[1],
16809 operands
[2], operands
[3]);
16812 /* Branch to the end. */
16813 emit_jump_insn (gen_jump (end_label
));
16816 /* Generate 8bit unsigned divide. */
16817 emit_label (qimode_label
);
16818 /* Don't use operands[0] for result of 8bit divide since not all
16819 registers support QImode ZERO_EXTRACT. */
16820 tmp0
= simplify_gen_subreg (HImode
, scratch
, mode
, 0);
16821 tmp1
= simplify_gen_subreg (HImode
, operands
[2], mode
, 0);
16822 tmp2
= simplify_gen_subreg (QImode
, operands
[3], mode
, 0);
16823 emit_insn (gen_udivmodhiqi3 (tmp0
, tmp1
, tmp2
));
16827 div
= gen_rtx_DIV (SImode
, operands
[2], operands
[3]);
16828 mod
= gen_rtx_MOD (SImode
, operands
[2], operands
[3]);
16832 div
= gen_rtx_UDIV (SImode
, operands
[2], operands
[3]);
16833 mod
= gen_rtx_UMOD (SImode
, operands
[2], operands
[3]);
16836 /* Extract remainder from AH. */
16837 tmp1
= gen_rtx_ZERO_EXTRACT (mode
, tmp0
, GEN_INT (8), GEN_INT (8));
16838 if (REG_P (operands
[1]))
16839 insn
= emit_move_insn (operands
[1], tmp1
);
16842 /* Need a new scratch register since the old one has result
16844 scratch
= gen_reg_rtx (mode
);
16845 emit_move_insn (scratch
, tmp1
);
16846 insn
= emit_move_insn (operands
[1], scratch
);
16848 set_unique_reg_note (insn
, REG_EQUAL
, mod
);
16850 /* Zero extend quotient from AL. */
16851 tmp1
= gen_lowpart (QImode
, tmp0
);
16852 insn
= emit_insn (gen_zero_extend (operands
[0], tmp1
));
16853 set_unique_reg_note (insn
, REG_EQUAL
, div
);
16855 emit_label (end_label
);
16858 #define LEA_MAX_STALL (3)
16859 #define LEA_SEARCH_THRESHOLD (LEA_MAX_STALL << 1)
16861 /* Increase given DISTANCE in half-cycles according to
16862 dependencies between PREV and NEXT instructions.
16863 Add 1 half-cycle if there is no dependency and
16864 go to next cycle if there is some dependecy. */
16866 static unsigned int
16867 increase_distance (rtx prev
, rtx next
, unsigned int distance
)
16872 if (!prev
|| !next
)
16873 return distance
+ (distance
& 1) + 2;
16875 if (!DF_INSN_USES (next
) || !DF_INSN_DEFS (prev
))
16876 return distance
+ 1;
16878 for (use_rec
= DF_INSN_USES (next
); *use_rec
; use_rec
++)
16879 for (def_rec
= DF_INSN_DEFS (prev
); *def_rec
; def_rec
++)
16880 if (!DF_REF_IS_ARTIFICIAL (*def_rec
)
16881 && DF_REF_REGNO (*use_rec
) == DF_REF_REGNO (*def_rec
))
16882 return distance
+ (distance
& 1) + 2;
16884 return distance
+ 1;
16887 /* Function checks if instruction INSN defines register number
16888 REGNO1 or REGNO2. */
16891 insn_defines_reg (unsigned int regno1
, unsigned int regno2
,
16896 for (def_rec
= DF_INSN_DEFS (insn
); *def_rec
; def_rec
++)
16897 if (DF_REF_REG_DEF_P (*def_rec
)
16898 && !DF_REF_IS_ARTIFICIAL (*def_rec
)
16899 && (regno1
== DF_REF_REGNO (*def_rec
)
16900 || regno2
== DF_REF_REGNO (*def_rec
)))
16908 /* Function checks if instruction INSN uses register number
16909 REGNO as a part of address expression. */
16912 insn_uses_reg_mem (unsigned int regno
, rtx insn
)
16916 for (use_rec
= DF_INSN_USES (insn
); *use_rec
; use_rec
++)
16917 if (DF_REF_REG_MEM_P (*use_rec
) && regno
== DF_REF_REGNO (*use_rec
))
16923 /* Search backward for non-agu definition of register number REGNO1
16924 or register number REGNO2 in basic block starting from instruction
16925 START up to head of basic block or instruction INSN.
16927 Function puts true value into *FOUND var if definition was found
16928 and false otherwise.
16930 Distance in half-cycles between START and found instruction or head
16931 of BB is added to DISTANCE and returned. */
16934 distance_non_agu_define_in_bb (unsigned int regno1
, unsigned int regno2
,
16935 rtx insn
, int distance
,
16936 rtx start
, bool *found
)
16938 basic_block bb
= start
? BLOCK_FOR_INSN (start
) : NULL
;
16946 && distance
< LEA_SEARCH_THRESHOLD
)
16948 if (NONDEBUG_INSN_P (prev
) && NONJUMP_INSN_P (prev
))
16950 distance
= increase_distance (prev
, next
, distance
);
16951 if (insn_defines_reg (regno1
, regno2
, prev
))
16953 if (recog_memoized (prev
) < 0
16954 || get_attr_type (prev
) != TYPE_LEA
)
16963 if (prev
== BB_HEAD (bb
))
16966 prev
= PREV_INSN (prev
);
16972 /* Search backward for non-agu definition of register number REGNO1
16973 or register number REGNO2 in INSN's basic block until
16974 1. Pass LEA_SEARCH_THRESHOLD instructions, or
16975 2. Reach neighbour BBs boundary, or
16976 3. Reach agu definition.
16977 Returns the distance between the non-agu definition point and INSN.
16978 If no definition point, returns -1. */
16981 distance_non_agu_define (unsigned int regno1
, unsigned int regno2
,
16984 basic_block bb
= BLOCK_FOR_INSN (insn
);
16986 bool found
= false;
16988 if (insn
!= BB_HEAD (bb
))
16989 distance
= distance_non_agu_define_in_bb (regno1
, regno2
, insn
,
16990 distance
, PREV_INSN (insn
),
16993 if (!found
&& distance
< LEA_SEARCH_THRESHOLD
)
16997 bool simple_loop
= false;
16999 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
17002 simple_loop
= true;
17007 distance
= distance_non_agu_define_in_bb (regno1
, regno2
,
17009 BB_END (bb
), &found
);
17012 int shortest_dist
= -1;
17013 bool found_in_bb
= false;
17015 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
17018 = distance_non_agu_define_in_bb (regno1
, regno2
,
17024 if (shortest_dist
< 0)
17025 shortest_dist
= bb_dist
;
17026 else if (bb_dist
> 0)
17027 shortest_dist
= MIN (bb_dist
, shortest_dist
);
17033 distance
= shortest_dist
;
17037 /* get_attr_type may modify recog data. We want to make sure
17038 that recog data is valid for instruction INSN, on which
17039 distance_non_agu_define is called. INSN is unchanged here. */
17040 extract_insn_cached (insn
);
17045 return distance
>> 1;
17048 /* Return the distance in half-cycles between INSN and the next
17049 insn that uses register number REGNO in memory address added
17050 to DISTANCE. Return -1 if REGNO0 is set.
17052 Put true value into *FOUND if register usage was found and
17054 Put true value into *REDEFINED if register redefinition was
17055 found and false otherwise. */
17058 distance_agu_use_in_bb (unsigned int regno
,
17059 rtx insn
, int distance
, rtx start
,
17060 bool *found
, bool *redefined
)
17062 basic_block bb
= start
? BLOCK_FOR_INSN (start
) : NULL
;
17067 *redefined
= false;
17071 && distance
< LEA_SEARCH_THRESHOLD
)
17073 if (NONDEBUG_INSN_P (next
) && NONJUMP_INSN_P (next
))
17075 distance
= increase_distance(prev
, next
, distance
);
17076 if (insn_uses_reg_mem (regno
, next
))
17078 /* Return DISTANCE if OP0 is used in memory
17079 address in NEXT. */
17084 if (insn_defines_reg (regno
, INVALID_REGNUM
, next
))
17086 /* Return -1 if OP0 is set in NEXT. */
17094 if (next
== BB_END (bb
))
17097 next
= NEXT_INSN (next
);
17103 /* Return the distance between INSN and the next insn that uses
17104 register number REGNO0 in memory address. Return -1 if no such
17105 a use is found within LEA_SEARCH_THRESHOLD or REGNO0 is set. */
17108 distance_agu_use (unsigned int regno0
, rtx insn
)
17110 basic_block bb
= BLOCK_FOR_INSN (insn
);
17112 bool found
= false;
17113 bool redefined
= false;
17115 if (insn
!= BB_END (bb
))
17116 distance
= distance_agu_use_in_bb (regno0
, insn
, distance
,
17118 &found
, &redefined
);
17120 if (!found
&& !redefined
&& distance
< LEA_SEARCH_THRESHOLD
)
17124 bool simple_loop
= false;
17126 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
17129 simple_loop
= true;
17134 distance
= distance_agu_use_in_bb (regno0
, insn
,
17135 distance
, BB_HEAD (bb
),
17136 &found
, &redefined
);
17139 int shortest_dist
= -1;
17140 bool found_in_bb
= false;
17141 bool redefined_in_bb
= false;
17143 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
17146 = distance_agu_use_in_bb (regno0
, insn
,
17147 distance
, BB_HEAD (e
->dest
),
17148 &found_in_bb
, &redefined_in_bb
);
17151 if (shortest_dist
< 0)
17152 shortest_dist
= bb_dist
;
17153 else if (bb_dist
> 0)
17154 shortest_dist
= MIN (bb_dist
, shortest_dist
);
17160 distance
= shortest_dist
;
17164 if (!found
|| redefined
)
17167 return distance
>> 1;
17170 /* Define this macro to tune LEA priority vs ADD, it take effect when
17171 there is a dilemma of choicing LEA or ADD
17172 Negative value: ADD is more preferred than LEA
17174 Positive value: LEA is more preferred than ADD*/
17175 #define IX86_LEA_PRIORITY 0
17177 /* Return true if usage of lea INSN has performance advantage
17178 over a sequence of instructions. Instructions sequence has
17179 SPLIT_COST cycles higher latency than lea latency. */
17182 ix86_lea_outperforms (rtx insn
, unsigned int regno0
, unsigned int regno1
,
17183 unsigned int regno2
, int split_cost
)
17185 int dist_define
, dist_use
;
17187 dist_define
= distance_non_agu_define (regno1
, regno2
, insn
);
17188 dist_use
= distance_agu_use (regno0
, insn
);
17190 if (dist_define
< 0 || dist_define
>= LEA_MAX_STALL
)
17192 /* If there is no non AGU operand definition, no AGU
17193 operand usage and split cost is 0 then both lea
17194 and non lea variants have same priority. Currently
17195 we prefer lea for 64 bit code and non lea on 32 bit
17197 if (dist_use
< 0 && split_cost
== 0)
17198 return TARGET_64BIT
|| IX86_LEA_PRIORITY
;
17203 /* With longer definitions distance lea is more preferable.
17204 Here we change it to take into account splitting cost and
17206 dist_define
+= split_cost
+ IX86_LEA_PRIORITY
;
17208 /* If there is no use in memory addess then we just check
17209 that split cost exceeds AGU stall. */
17211 return dist_define
> LEA_MAX_STALL
;
17213 /* If this insn has both backward non-agu dependence and forward
17214 agu dependence, the one with short distance takes effect. */
17215 return dist_define
>= dist_use
;
17218 /* Return true if it is legal to clobber flags by INSN and
17219 false otherwise. */
17222 ix86_ok_to_clobber_flags (rtx insn
)
17224 basic_block bb
= BLOCK_FOR_INSN (insn
);
17230 if (NONDEBUG_INSN_P (insn
))
17232 for (use
= DF_INSN_USES (insn
); *use
; use
++)
17233 if (DF_REF_REG_USE_P (*use
) && DF_REF_REGNO (*use
) == FLAGS_REG
)
17236 if (insn_defines_reg (FLAGS_REG
, INVALID_REGNUM
, insn
))
17240 if (insn
== BB_END (bb
))
17243 insn
= NEXT_INSN (insn
);
17246 live
= df_get_live_out(bb
);
17247 return !REGNO_REG_SET_P (live
, FLAGS_REG
);
17250 /* Return true if we need to split op0 = op1 + op2 into a sequence of
17251 move and add to avoid AGU stalls. */
17254 ix86_avoid_lea_for_add (rtx insn
, rtx operands
[])
17256 unsigned int regno0
, regno1
, regno2
;
17258 /* Check if we need to optimize. */
17259 if (!TARGET_OPT_AGU
|| optimize_function_for_size_p (cfun
))
17262 /* Check it is correct to split here. */
17263 if (!ix86_ok_to_clobber_flags(insn
))
17266 regno0
= true_regnum (operands
[0]);
17267 regno1
= true_regnum (operands
[1]);
17268 regno2
= true_regnum (operands
[2]);
17270 /* We need to split only adds with non destructive
17271 destination operand. */
17272 if (regno0
== regno1
|| regno0
== regno2
)
17275 return !ix86_lea_outperforms (insn
, regno0
, regno1
, regno2
, 1);
17278 /* Return true if we should emit lea instruction instead of mov
17282 ix86_use_lea_for_mov (rtx insn
, rtx operands
[])
17284 unsigned int regno0
, regno1
;
17286 /* Check if we need to optimize. */
17287 if (!TARGET_OPT_AGU
|| optimize_function_for_size_p (cfun
))
17290 /* Use lea for reg to reg moves only. */
17291 if (!REG_P (operands
[0]) || !REG_P (operands
[1]))
17294 regno0
= true_regnum (operands
[0]);
17295 regno1
= true_regnum (operands
[1]);
17297 return ix86_lea_outperforms (insn
, regno0
, regno1
, INVALID_REGNUM
, 0);
17300 /* Return true if we need to split lea into a sequence of
17301 instructions to avoid AGU stalls. */
17304 ix86_avoid_lea_for_addr (rtx insn
, rtx operands
[])
17306 unsigned int regno0
, regno1
, regno2
;
17308 struct ix86_address parts
;
17311 /* Check we need to optimize. */
17312 if (!TARGET_OPT_AGU
|| optimize_function_for_size_p (cfun
))
17315 /* Check it is correct to split here. */
17316 if (!ix86_ok_to_clobber_flags(insn
))
17319 ok
= ix86_decompose_address (operands
[1], &parts
);
17322 /* There should be at least two components in the address. */
17323 if ((parts
.base
!= NULL_RTX
) + (parts
.index
!= NULL_RTX
)
17324 + (parts
.disp
!= NULL_RTX
) + (parts
.scale
> 1) < 2)
17327 /* We should not split into add if non legitimate pic
17328 operand is used as displacement. */
17329 if (parts
.disp
&& flag_pic
&& !LEGITIMATE_PIC_OPERAND_P (parts
.disp
))
17332 regno0
= true_regnum (operands
[0]) ;
17333 regno1
= INVALID_REGNUM
;
17334 regno2
= INVALID_REGNUM
;
17337 regno1
= true_regnum (parts
.base
);
17339 regno2
= true_regnum (parts
.index
);
17343 /* Compute how many cycles we will add to execution time
17344 if split lea into a sequence of instructions. */
17345 if (parts
.base
|| parts
.index
)
17347 /* Have to use mov instruction if non desctructive
17348 destination form is used. */
17349 if (regno1
!= regno0
&& regno2
!= regno0
)
17352 /* Have to add index to base if both exist. */
17353 if (parts
.base
&& parts
.index
)
17356 /* Have to use shift and adds if scale is 2 or greater. */
17357 if (parts
.scale
> 1)
17359 if (regno0
!= regno1
)
17361 else if (regno2
== regno0
)
17364 split_cost
+= parts
.scale
;
17367 /* Have to use add instruction with immediate if
17368 disp is non zero. */
17369 if (parts
.disp
&& parts
.disp
!= const0_rtx
)
17372 /* Subtract the price of lea. */
17376 return !ix86_lea_outperforms (insn
, regno0
, regno1
, regno2
, split_cost
);
17379 /* Emit x86 binary operand CODE in mode MODE, where the first operand
17380 matches destination. RTX includes clobber of FLAGS_REG. */
17383 ix86_emit_binop (enum rtx_code code
, enum machine_mode mode
,
17388 op
= gen_rtx_SET (VOIDmode
, dst
, gen_rtx_fmt_ee (code
, mode
, dst
, src
));
17389 clob
= gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (CCmode
, FLAGS_REG
));
17391 emit_insn (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, op
, clob
)));
17394 /* Return true if regno1 def is nearest to the insn. */
17397 find_nearest_reg_def (rtx insn
, int regno1
, int regno2
)
17400 rtx start
= BB_HEAD (BLOCK_FOR_INSN (insn
));
17404 while (prev
&& prev
!= start
)
17406 if (!INSN_P (prev
) || !NONDEBUG_INSN_P (prev
))
17408 prev
= PREV_INSN (prev
);
17411 if (insn_defines_reg (regno1
, INVALID_REGNUM
, prev
))
17413 else if (insn_defines_reg (regno2
, INVALID_REGNUM
, prev
))
17415 prev
= PREV_INSN (prev
);
17418 /* None of the regs is defined in the bb. */
17422 /* Split lea instructions into a sequence of instructions
17423 which are executed on ALU to avoid AGU stalls.
17424 It is assumed that it is allowed to clobber flags register
17425 at lea position. */
17428 ix86_split_lea_for_addr (rtx insn
, rtx operands
[], enum machine_mode mode
)
17430 unsigned int regno0
, regno1
, regno2
;
17431 struct ix86_address parts
;
17435 ok
= ix86_decompose_address (operands
[1], &parts
);
17438 target
= gen_lowpart (mode
, operands
[0]);
17440 regno0
= true_regnum (target
);
17441 regno1
= INVALID_REGNUM
;
17442 regno2
= INVALID_REGNUM
;
17446 parts
.base
= gen_lowpart (mode
, parts
.base
);
17447 regno1
= true_regnum (parts
.base
);
17452 parts
.index
= gen_lowpart (mode
, parts
.index
);
17453 regno2
= true_regnum (parts
.index
);
17457 parts
.disp
= gen_lowpart (mode
, parts
.disp
);
17459 if (parts
.scale
> 1)
17461 /* Case r1 = r1 + ... */
17462 if (regno1
== regno0
)
17464 /* If we have a case r1 = r1 + C * r1 then we
17465 should use multiplication which is very
17466 expensive. Assume cost model is wrong if we
17467 have such case here. */
17468 gcc_assert (regno2
!= regno0
);
17470 for (adds
= parts
.scale
; adds
> 0; adds
--)
17471 ix86_emit_binop (PLUS
, mode
, target
, parts
.index
);
17475 /* r1 = r2 + r3 * C case. Need to move r3 into r1. */
17476 if (regno0
!= regno2
)
17477 emit_insn (gen_rtx_SET (VOIDmode
, target
, parts
.index
));
17479 /* Use shift for scaling. */
17480 ix86_emit_binop (ASHIFT
, mode
, target
,
17481 GEN_INT (exact_log2 (parts
.scale
)));
17484 ix86_emit_binop (PLUS
, mode
, target
, parts
.base
);
17486 if (parts
.disp
&& parts
.disp
!= const0_rtx
)
17487 ix86_emit_binop (PLUS
, mode
, target
, parts
.disp
);
17490 else if (!parts
.base
&& !parts
.index
)
17492 gcc_assert(parts
.disp
);
17493 emit_insn (gen_rtx_SET (VOIDmode
, target
, parts
.disp
));
17499 if (regno0
!= regno2
)
17500 emit_insn (gen_rtx_SET (VOIDmode
, target
, parts
.index
));
17502 else if (!parts
.index
)
17504 if (regno0
!= regno1
)
17505 emit_insn (gen_rtx_SET (VOIDmode
, target
, parts
.base
));
17509 if (regno0
== regno1
)
17511 else if (regno0
== regno2
)
17517 /* Find better operand for SET instruction, depending
17518 on which definition is farther from the insn. */
17519 if (find_nearest_reg_def (insn
, regno1
, regno2
))
17520 tmp
= parts
.index
, tmp1
= parts
.base
;
17522 tmp
= parts
.base
, tmp1
= parts
.index
;
17524 emit_insn (gen_rtx_SET (VOIDmode
, target
, tmp
));
17526 if (parts
.disp
&& parts
.disp
!= const0_rtx
)
17527 ix86_emit_binop (PLUS
, mode
, target
, parts
.disp
);
17529 ix86_emit_binop (PLUS
, mode
, target
, tmp1
);
17533 ix86_emit_binop (PLUS
, mode
, target
, tmp
);
17536 if (parts
.disp
&& parts
.disp
!= const0_rtx
)
17537 ix86_emit_binop (PLUS
, mode
, target
, parts
.disp
);
17541 /* Return true if it is ok to optimize an ADD operation to LEA
17542 operation to avoid flag register consumation. For most processors,
17543 ADD is faster than LEA. For the processors like ATOM, if the
17544 destination register of LEA holds an actual address which will be
17545 used soon, LEA is better and otherwise ADD is better. */
17548 ix86_lea_for_add_ok (rtx insn
, rtx operands
[])
17550 unsigned int regno0
= true_regnum (operands
[0]);
17551 unsigned int regno1
= true_regnum (operands
[1]);
17552 unsigned int regno2
= true_regnum (operands
[2]);
17554 /* If a = b + c, (a!=b && a!=c), must use lea form. */
17555 if (regno0
!= regno1
&& regno0
!= regno2
)
17558 if (!TARGET_OPT_AGU
|| optimize_function_for_size_p (cfun
))
17561 return ix86_lea_outperforms (insn
, regno0
, regno1
, regno2
, 0);
17564 /* Return true if destination reg of SET_BODY is shift count of
17568 ix86_dep_by_shift_count_body (const_rtx set_body
, const_rtx use_body
)
17574 /* Retrieve destination of SET_BODY. */
17575 switch (GET_CODE (set_body
))
17578 set_dest
= SET_DEST (set_body
);
17579 if (!set_dest
|| !REG_P (set_dest
))
17583 for (i
= XVECLEN (set_body
, 0) - 1; i
>= 0; i
--)
17584 if (ix86_dep_by_shift_count_body (XVECEXP (set_body
, 0, i
),
17592 /* Retrieve shift count of USE_BODY. */
17593 switch (GET_CODE (use_body
))
17596 shift_rtx
= XEXP (use_body
, 1);
17599 for (i
= XVECLEN (use_body
, 0) - 1; i
>= 0; i
--)
17600 if (ix86_dep_by_shift_count_body (set_body
,
17601 XVECEXP (use_body
, 0, i
)))
17609 && (GET_CODE (shift_rtx
) == ASHIFT
17610 || GET_CODE (shift_rtx
) == LSHIFTRT
17611 || GET_CODE (shift_rtx
) == ASHIFTRT
17612 || GET_CODE (shift_rtx
) == ROTATE
17613 || GET_CODE (shift_rtx
) == ROTATERT
))
17615 rtx shift_count
= XEXP (shift_rtx
, 1);
17617 /* Return true if shift count is dest of SET_BODY. */
17618 if (REG_P (shift_count
))
17620 /* Add check since it can be invoked before register
17621 allocation in pre-reload schedule. */
17622 if (reload_completed
17623 && true_regnum (set_dest
) == true_regnum (shift_count
))
17625 else if (REGNO(set_dest
) == REGNO(shift_count
))
17633 /* Return true if destination reg of SET_INSN is shift count of
17637 ix86_dep_by_shift_count (const_rtx set_insn
, const_rtx use_insn
)
17639 return ix86_dep_by_shift_count_body (PATTERN (set_insn
),
17640 PATTERN (use_insn
));
17643 /* Return TRUE or FALSE depending on whether the unary operator meets the
17644 appropriate constraints. */
17647 ix86_unary_operator_ok (enum rtx_code code ATTRIBUTE_UNUSED
,
17648 enum machine_mode mode ATTRIBUTE_UNUSED
,
17649 rtx operands
[2] ATTRIBUTE_UNUSED
)
17651 /* If one of operands is memory, source and destination must match. */
17652 if ((MEM_P (operands
[0])
17653 || MEM_P (operands
[1]))
17654 && ! rtx_equal_p (operands
[0], operands
[1]))
17659 /* Return TRUE if the operands to a vec_interleave_{high,low}v2df
17660 are ok, keeping in mind the possible movddup alternative. */
17663 ix86_vec_interleave_v2df_operator_ok (rtx operands
[3], bool high
)
17665 if (MEM_P (operands
[0]))
17666 return rtx_equal_p (operands
[0], operands
[1 + high
]);
17667 if (MEM_P (operands
[1]) && MEM_P (operands
[2]))
17668 return TARGET_SSE3
&& rtx_equal_p (operands
[1], operands
[2]);
17672 /* Post-reload splitter for converting an SF or DFmode value in an
17673 SSE register into an unsigned SImode. */
17676 ix86_split_convert_uns_si_sse (rtx operands
[])
17678 enum machine_mode vecmode
;
17679 rtx value
, large
, zero_or_two31
, input
, two31
, x
;
17681 large
= operands
[1];
17682 zero_or_two31
= operands
[2];
17683 input
= operands
[3];
17684 two31
= operands
[4];
17685 vecmode
= GET_MODE (large
);
17686 value
= gen_rtx_REG (vecmode
, REGNO (operands
[0]));
17688 /* Load up the value into the low element. We must ensure that the other
17689 elements are valid floats -- zero is the easiest such value. */
17692 if (vecmode
== V4SFmode
)
17693 emit_insn (gen_vec_setv4sf_0 (value
, CONST0_RTX (V4SFmode
), input
));
17695 emit_insn (gen_sse2_loadlpd (value
, CONST0_RTX (V2DFmode
), input
));
17699 input
= gen_rtx_REG (vecmode
, REGNO (input
));
17700 emit_move_insn (value
, CONST0_RTX (vecmode
));
17701 if (vecmode
== V4SFmode
)
17702 emit_insn (gen_sse_movss (value
, value
, input
));
17704 emit_insn (gen_sse2_movsd (value
, value
, input
));
17707 emit_move_insn (large
, two31
);
17708 emit_move_insn (zero_or_two31
, MEM_P (two31
) ? large
: two31
);
17710 x
= gen_rtx_fmt_ee (LE
, vecmode
, large
, value
);
17711 emit_insn (gen_rtx_SET (VOIDmode
, large
, x
));
17713 x
= gen_rtx_AND (vecmode
, zero_or_two31
, large
);
17714 emit_insn (gen_rtx_SET (VOIDmode
, zero_or_two31
, x
));
17716 x
= gen_rtx_MINUS (vecmode
, value
, zero_or_two31
);
17717 emit_insn (gen_rtx_SET (VOIDmode
, value
, x
));
17719 large
= gen_rtx_REG (V4SImode
, REGNO (large
));
17720 emit_insn (gen_ashlv4si3 (large
, large
, GEN_INT (31)));
17722 x
= gen_rtx_REG (V4SImode
, REGNO (value
));
17723 if (vecmode
== V4SFmode
)
17724 emit_insn (gen_fix_truncv4sfv4si2 (x
, value
));
17726 emit_insn (gen_sse2_cvttpd2dq (x
, value
));
17729 emit_insn (gen_xorv4si3 (value
, value
, large
));
17732 /* Convert an unsigned DImode value into a DFmode, using only SSE.
17733 Expects the 64-bit DImode to be supplied in a pair of integral
17734 registers. Requires SSE2; will use SSE3 if available. For x86_32,
17735 -mfpmath=sse, !optimize_size only. */
17738 ix86_expand_convert_uns_didf_sse (rtx target
, rtx input
)
17740 REAL_VALUE_TYPE bias_lo_rvt
, bias_hi_rvt
;
17741 rtx int_xmm
, fp_xmm
;
17742 rtx biases
, exponents
;
17745 int_xmm
= gen_reg_rtx (V4SImode
);
17746 if (TARGET_INTER_UNIT_MOVES
)
17747 emit_insn (gen_movdi_to_sse (int_xmm
, input
));
17748 else if (TARGET_SSE_SPLIT_REGS
)
17750 emit_clobber (int_xmm
);
17751 emit_move_insn (gen_lowpart (DImode
, int_xmm
), input
);
17755 x
= gen_reg_rtx (V2DImode
);
17756 ix86_expand_vector_init_one_nonzero (false, V2DImode
, x
, input
, 0);
17757 emit_move_insn (int_xmm
, gen_lowpart (V4SImode
, x
));
17760 x
= gen_rtx_CONST_VECTOR (V4SImode
,
17761 gen_rtvec (4, GEN_INT (0x43300000UL
),
17762 GEN_INT (0x45300000UL
),
17763 const0_rtx
, const0_rtx
));
17764 exponents
= validize_mem (force_const_mem (V4SImode
, x
));
17766 /* int_xmm = {0x45300000UL, fp_xmm/hi, 0x43300000, fp_xmm/lo } */
17767 emit_insn (gen_vec_interleave_lowv4si (int_xmm
, int_xmm
, exponents
));
17769 /* Concatenating (juxtaposing) (0x43300000UL ## fp_value_low_xmm)
17770 yields a valid DF value equal to (0x1.0p52 + double(fp_value_lo_xmm)).
17771 Similarly (0x45300000UL ## fp_value_hi_xmm) yields
17772 (0x1.0p84 + double(fp_value_hi_xmm)).
17773 Note these exponents differ by 32. */
17775 fp_xmm
= copy_to_mode_reg (V2DFmode
, gen_lowpart (V2DFmode
, int_xmm
));
17777 /* Subtract off those 0x1.0p52 and 0x1.0p84 biases, to produce values
17778 in [0,2**32-1] and [0]+[2**32,2**64-1] respectively. */
17779 real_ldexp (&bias_lo_rvt
, &dconst1
, 52);
17780 real_ldexp (&bias_hi_rvt
, &dconst1
, 84);
17781 biases
= const_double_from_real_value (bias_lo_rvt
, DFmode
);
17782 x
= const_double_from_real_value (bias_hi_rvt
, DFmode
);
17783 biases
= gen_rtx_CONST_VECTOR (V2DFmode
, gen_rtvec (2, biases
, x
));
17784 biases
= validize_mem (force_const_mem (V2DFmode
, biases
));
17785 emit_insn (gen_subv2df3 (fp_xmm
, fp_xmm
, biases
));
17787 /* Add the upper and lower DFmode values together. */
17789 emit_insn (gen_sse3_haddv2df3 (fp_xmm
, fp_xmm
, fp_xmm
));
17792 x
= copy_to_mode_reg (V2DFmode
, fp_xmm
);
17793 emit_insn (gen_vec_interleave_highv2df (fp_xmm
, fp_xmm
, fp_xmm
));
17794 emit_insn (gen_addv2df3 (fp_xmm
, fp_xmm
, x
));
17797 ix86_expand_vector_extract (false, target
, fp_xmm
, 0);
17800 /* Not used, but eases macroization of patterns. */
17802 ix86_expand_convert_uns_sixf_sse (rtx target ATTRIBUTE_UNUSED
,
17803 rtx input ATTRIBUTE_UNUSED
)
17805 gcc_unreachable ();
17808 /* Convert an unsigned SImode value into a DFmode. Only currently used
17809 for SSE, but applicable anywhere. */
17812 ix86_expand_convert_uns_sidf_sse (rtx target
, rtx input
)
17814 REAL_VALUE_TYPE TWO31r
;
17817 x
= expand_simple_binop (SImode
, PLUS
, input
, GEN_INT (-2147483647 - 1),
17818 NULL
, 1, OPTAB_DIRECT
);
17820 fp
= gen_reg_rtx (DFmode
);
17821 emit_insn (gen_floatsidf2 (fp
, x
));
17823 real_ldexp (&TWO31r
, &dconst1
, 31);
17824 x
= const_double_from_real_value (TWO31r
, DFmode
);
17826 x
= expand_simple_binop (DFmode
, PLUS
, fp
, x
, target
, 0, OPTAB_DIRECT
);
17828 emit_move_insn (target
, x
);
17831 /* Convert a signed DImode value into a DFmode. Only used for SSE in
17832 32-bit mode; otherwise we have a direct convert instruction. */
17835 ix86_expand_convert_sign_didf_sse (rtx target
, rtx input
)
17837 REAL_VALUE_TYPE TWO32r
;
17838 rtx fp_lo
, fp_hi
, x
;
17840 fp_lo
= gen_reg_rtx (DFmode
);
17841 fp_hi
= gen_reg_rtx (DFmode
);
17843 emit_insn (gen_floatsidf2 (fp_hi
, gen_highpart (SImode
, input
)));
17845 real_ldexp (&TWO32r
, &dconst1
, 32);
17846 x
= const_double_from_real_value (TWO32r
, DFmode
);
17847 fp_hi
= expand_simple_binop (DFmode
, MULT
, fp_hi
, x
, fp_hi
, 0, OPTAB_DIRECT
);
17849 ix86_expand_convert_uns_sidf_sse (fp_lo
, gen_lowpart (SImode
, input
));
17851 x
= expand_simple_binop (DFmode
, PLUS
, fp_hi
, fp_lo
, target
,
17854 emit_move_insn (target
, x
);
17857 /* Convert an unsigned SImode value into a SFmode, using only SSE.
17858 For x86_32, -mfpmath=sse, !optimize_size only. */
17860 ix86_expand_convert_uns_sisf_sse (rtx target
, rtx input
)
17862 REAL_VALUE_TYPE ONE16r
;
17863 rtx fp_hi
, fp_lo
, int_hi
, int_lo
, x
;
17865 real_ldexp (&ONE16r
, &dconst1
, 16);
17866 x
= const_double_from_real_value (ONE16r
, SFmode
);
17867 int_lo
= expand_simple_binop (SImode
, AND
, input
, GEN_INT(0xffff),
17868 NULL
, 0, OPTAB_DIRECT
);
17869 int_hi
= expand_simple_binop (SImode
, LSHIFTRT
, input
, GEN_INT(16),
17870 NULL
, 0, OPTAB_DIRECT
);
17871 fp_hi
= gen_reg_rtx (SFmode
);
17872 fp_lo
= gen_reg_rtx (SFmode
);
17873 emit_insn (gen_floatsisf2 (fp_hi
, int_hi
));
17874 emit_insn (gen_floatsisf2 (fp_lo
, int_lo
));
17875 fp_hi
= expand_simple_binop (SFmode
, MULT
, fp_hi
, x
, fp_hi
,
17877 fp_hi
= expand_simple_binop (SFmode
, PLUS
, fp_hi
, fp_lo
, target
,
17879 if (!rtx_equal_p (target
, fp_hi
))
17880 emit_move_insn (target
, fp_hi
);
17883 /* floatunsv{4,8}siv{4,8}sf2 expander. Expand code to convert
17884 a vector of unsigned ints VAL to vector of floats TARGET. */
17887 ix86_expand_vector_convert_uns_vsivsf (rtx target
, rtx val
)
17890 REAL_VALUE_TYPE TWO16r
;
17891 enum machine_mode intmode
= GET_MODE (val
);
17892 enum machine_mode fltmode
= GET_MODE (target
);
17893 rtx (*cvt
) (rtx
, rtx
);
17895 if (intmode
== V4SImode
)
17896 cvt
= gen_floatv4siv4sf2
;
17898 cvt
= gen_floatv8siv8sf2
;
17899 tmp
[0] = ix86_build_const_vector (intmode
, 1, GEN_INT (0xffff));
17900 tmp
[0] = force_reg (intmode
, tmp
[0]);
17901 tmp
[1] = expand_simple_binop (intmode
, AND
, val
, tmp
[0], NULL_RTX
, 1,
17903 tmp
[2] = expand_simple_binop (intmode
, LSHIFTRT
, val
, GEN_INT (16),
17904 NULL_RTX
, 1, OPTAB_DIRECT
);
17905 tmp
[3] = gen_reg_rtx (fltmode
);
17906 emit_insn (cvt (tmp
[3], tmp
[1]));
17907 tmp
[4] = gen_reg_rtx (fltmode
);
17908 emit_insn (cvt (tmp
[4], tmp
[2]));
17909 real_ldexp (&TWO16r
, &dconst1
, 16);
17910 tmp
[5] = const_double_from_real_value (TWO16r
, SFmode
);
17911 tmp
[5] = force_reg (fltmode
, ix86_build_const_vector (fltmode
, 1, tmp
[5]));
17912 tmp
[6] = expand_simple_binop (fltmode
, MULT
, tmp
[4], tmp
[5], NULL_RTX
, 1,
17914 tmp
[7] = expand_simple_binop (fltmode
, PLUS
, tmp
[3], tmp
[6], target
, 1,
17916 if (tmp
[7] != target
)
17917 emit_move_insn (target
, tmp
[7]);
17920 /* Adjust a V*SFmode/V*DFmode value VAL so that *sfix_trunc* resp. fix_trunc*
17921 pattern can be used on it instead of *ufix_trunc* resp. fixuns_trunc*.
17922 This is done by doing just signed conversion if < 0x1p31, and otherwise by
17923 subtracting 0x1p31 first and xoring in 0x80000000 from *XORP afterwards. */
17926 ix86_expand_adjust_ufix_to_sfix_si (rtx val
, rtx
*xorp
)
17928 REAL_VALUE_TYPE TWO31r
;
17929 rtx two31r
, tmp
[4];
17930 enum machine_mode mode
= GET_MODE (val
);
17931 enum machine_mode scalarmode
= GET_MODE_INNER (mode
);
17932 enum machine_mode intmode
= GET_MODE_SIZE (mode
) == 32 ? V8SImode
: V4SImode
;
17933 rtx (*cmp
) (rtx
, rtx
, rtx
, rtx
);
17936 for (i
= 0; i
< 3; i
++)
17937 tmp
[i
] = gen_reg_rtx (mode
);
17938 real_ldexp (&TWO31r
, &dconst1
, 31);
17939 two31r
= const_double_from_real_value (TWO31r
, scalarmode
);
17940 two31r
= ix86_build_const_vector (mode
, 1, two31r
);
17941 two31r
= force_reg (mode
, two31r
);
17944 case V8SFmode
: cmp
= gen_avx_maskcmpv8sf3
; break;
17945 case V4SFmode
: cmp
= gen_sse_maskcmpv4sf3
; break;
17946 case V4DFmode
: cmp
= gen_avx_maskcmpv4df3
; break;
17947 case V2DFmode
: cmp
= gen_sse2_maskcmpv2df3
; break;
17948 default: gcc_unreachable ();
17950 tmp
[3] = gen_rtx_LE (mode
, two31r
, val
);
17951 emit_insn (cmp (tmp
[0], two31r
, val
, tmp
[3]));
17952 tmp
[1] = expand_simple_binop (mode
, AND
, tmp
[0], two31r
, tmp
[1],
17954 if (intmode
== V4SImode
|| TARGET_AVX2
)
17955 *xorp
= expand_simple_binop (intmode
, ASHIFT
,
17956 gen_lowpart (intmode
, tmp
[0]),
17957 GEN_INT (31), NULL_RTX
, 0,
17961 rtx two31
= GEN_INT ((unsigned HOST_WIDE_INT
) 1 << 31);
17962 two31
= ix86_build_const_vector (intmode
, 1, two31
);
17963 *xorp
= expand_simple_binop (intmode
, AND
,
17964 gen_lowpart (intmode
, tmp
[0]),
17965 two31
, NULL_RTX
, 0,
17968 return expand_simple_binop (mode
, MINUS
, val
, tmp
[1], tmp
[2],
17972 /* A subroutine of ix86_build_signbit_mask. If VECT is true,
17973 then replicate the value for all elements of the vector
17977 ix86_build_const_vector (enum machine_mode mode
, bool vect
, rtx value
)
17981 enum machine_mode scalar_mode
;
17998 n_elt
= GET_MODE_NUNITS (mode
);
17999 v
= rtvec_alloc (n_elt
);
18000 scalar_mode
= GET_MODE_INNER (mode
);
18002 RTVEC_ELT (v
, 0) = value
;
18004 for (i
= 1; i
< n_elt
; ++i
)
18005 RTVEC_ELT (v
, i
) = vect
? value
: CONST0_RTX (scalar_mode
);
18007 return gen_rtx_CONST_VECTOR (mode
, v
);
18010 gcc_unreachable ();
18014 /* A subroutine of ix86_expand_fp_absneg_operator, copysign expanders
18015 and ix86_expand_int_vcond. Create a mask for the sign bit in MODE
18016 for an SSE register. If VECT is true, then replicate the mask for
18017 all elements of the vector register. If INVERT is true, then create
18018 a mask excluding the sign bit. */
18021 ix86_build_signbit_mask (enum machine_mode mode
, bool vect
, bool invert
)
18023 enum machine_mode vec_mode
, imode
;
18024 HOST_WIDE_INT hi
, lo
;
18029 /* Find the sign bit, sign extended to 2*HWI. */
18037 mode
= GET_MODE_INNER (mode
);
18039 lo
= 0x80000000, hi
= lo
< 0;
18047 mode
= GET_MODE_INNER (mode
);
18049 if (HOST_BITS_PER_WIDE_INT
>= 64)
18050 lo
= (HOST_WIDE_INT
)1 << shift
, hi
= -1;
18052 lo
= 0, hi
= (HOST_WIDE_INT
)1 << (shift
- HOST_BITS_PER_WIDE_INT
);
18057 vec_mode
= VOIDmode
;
18058 if (HOST_BITS_PER_WIDE_INT
>= 64)
18061 lo
= 0, hi
= (HOST_WIDE_INT
)1 << shift
;
18068 lo
= 0, hi
= (HOST_WIDE_INT
)1 << (shift
- HOST_BITS_PER_WIDE_INT
);
18072 lo
= ~lo
, hi
= ~hi
;
18078 mask
= immed_double_const (lo
, hi
, imode
);
18080 vec
= gen_rtvec (2, v
, mask
);
18081 v
= gen_rtx_CONST_VECTOR (V2DImode
, vec
);
18082 v
= copy_to_mode_reg (mode
, gen_lowpart (mode
, v
));
18089 gcc_unreachable ();
18093 lo
= ~lo
, hi
= ~hi
;
18095 /* Force this value into the low part of a fp vector constant. */
18096 mask
= immed_double_const (lo
, hi
, imode
);
18097 mask
= gen_lowpart (mode
, mask
);
18099 if (vec_mode
== VOIDmode
)
18100 return force_reg (mode
, mask
);
18102 v
= ix86_build_const_vector (vec_mode
, vect
, mask
);
18103 return force_reg (vec_mode
, v
);
18106 /* Generate code for floating point ABS or NEG. */
18109 ix86_expand_fp_absneg_operator (enum rtx_code code
, enum machine_mode mode
,
18112 rtx mask
, set
, dst
, src
;
18113 bool use_sse
= false;
18114 bool vector_mode
= VECTOR_MODE_P (mode
);
18115 enum machine_mode vmode
= mode
;
18119 else if (mode
== TFmode
)
18121 else if (TARGET_SSE_MATH
)
18123 use_sse
= SSE_FLOAT_MODE_P (mode
);
18124 if (mode
== SFmode
)
18126 else if (mode
== DFmode
)
18130 /* NEG and ABS performed with SSE use bitwise mask operations.
18131 Create the appropriate mask now. */
18133 mask
= ix86_build_signbit_mask (vmode
, vector_mode
, code
== ABS
);
18140 set
= gen_rtx_fmt_e (code
, mode
, src
);
18141 set
= gen_rtx_SET (VOIDmode
, dst
, set
);
18148 use
= gen_rtx_USE (VOIDmode
, mask
);
18150 par
= gen_rtvec (2, set
, use
);
18153 clob
= gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (CCmode
, FLAGS_REG
));
18154 par
= gen_rtvec (3, set
, use
, clob
);
18156 emit_insn (gen_rtx_PARALLEL (VOIDmode
, par
));
18162 /* Expand a copysign operation. Special case operand 0 being a constant. */
18165 ix86_expand_copysign (rtx operands
[])
18167 enum machine_mode mode
, vmode
;
18168 rtx dest
, op0
, op1
, mask
, nmask
;
18170 dest
= operands
[0];
18174 mode
= GET_MODE (dest
);
18176 if (mode
== SFmode
)
18178 else if (mode
== DFmode
)
18183 if (GET_CODE (op0
) == CONST_DOUBLE
)
18185 rtx (*copysign_insn
)(rtx
, rtx
, rtx
, rtx
);
18187 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0
)))
18188 op0
= simplify_unary_operation (ABS
, mode
, op0
, mode
);
18190 if (mode
== SFmode
|| mode
== DFmode
)
18192 if (op0
== CONST0_RTX (mode
))
18193 op0
= CONST0_RTX (vmode
);
18196 rtx v
= ix86_build_const_vector (vmode
, false, op0
);
18198 op0
= force_reg (vmode
, v
);
18201 else if (op0
!= CONST0_RTX (mode
))
18202 op0
= force_reg (mode
, op0
);
18204 mask
= ix86_build_signbit_mask (vmode
, 0, 0);
18206 if (mode
== SFmode
)
18207 copysign_insn
= gen_copysignsf3_const
;
18208 else if (mode
== DFmode
)
18209 copysign_insn
= gen_copysigndf3_const
;
18211 copysign_insn
= gen_copysigntf3_const
;
18213 emit_insn (copysign_insn (dest
, op0
, op1
, mask
));
18217 rtx (*copysign_insn
)(rtx
, rtx
, rtx
, rtx
, rtx
, rtx
);
18219 nmask
= ix86_build_signbit_mask (vmode
, 0, 1);
18220 mask
= ix86_build_signbit_mask (vmode
, 0, 0);
18222 if (mode
== SFmode
)
18223 copysign_insn
= gen_copysignsf3_var
;
18224 else if (mode
== DFmode
)
18225 copysign_insn
= gen_copysigndf3_var
;
18227 copysign_insn
= gen_copysigntf3_var
;
18229 emit_insn (copysign_insn (dest
, NULL_RTX
, op0
, op1
, nmask
, mask
));
18233 /* Deconstruct a copysign operation into bit masks. Operand 0 is known to
18234 be a constant, and so has already been expanded into a vector constant. */
18237 ix86_split_copysign_const (rtx operands
[])
18239 enum machine_mode mode
, vmode
;
18240 rtx dest
, op0
, mask
, x
;
18242 dest
= operands
[0];
18244 mask
= operands
[3];
18246 mode
= GET_MODE (dest
);
18247 vmode
= GET_MODE (mask
);
18249 dest
= simplify_gen_subreg (vmode
, dest
, mode
, 0);
18250 x
= gen_rtx_AND (vmode
, dest
, mask
);
18251 emit_insn (gen_rtx_SET (VOIDmode
, dest
, x
));
18253 if (op0
!= CONST0_RTX (vmode
))
18255 x
= gen_rtx_IOR (vmode
, dest
, op0
);
18256 emit_insn (gen_rtx_SET (VOIDmode
, dest
, x
));
18260 /* Deconstruct a copysign operation into bit masks. Operand 0 is variable,
18261 so we have to do two masks. */
18264 ix86_split_copysign_var (rtx operands
[])
18266 enum machine_mode mode
, vmode
;
18267 rtx dest
, scratch
, op0
, op1
, mask
, nmask
, x
;
18269 dest
= operands
[0];
18270 scratch
= operands
[1];
18273 nmask
= operands
[4];
18274 mask
= operands
[5];
18276 mode
= GET_MODE (dest
);
18277 vmode
= GET_MODE (mask
);
18279 if (rtx_equal_p (op0
, op1
))
18281 /* Shouldn't happen often (it's useless, obviously), but when it does
18282 we'd generate incorrect code if we continue below. */
18283 emit_move_insn (dest
, op0
);
18287 if (REG_P (mask
) && REGNO (dest
) == REGNO (mask
)) /* alternative 0 */
18289 gcc_assert (REGNO (op1
) == REGNO (scratch
));
18291 x
= gen_rtx_AND (vmode
, scratch
, mask
);
18292 emit_insn (gen_rtx_SET (VOIDmode
, scratch
, x
));
18295 op0
= simplify_gen_subreg (vmode
, op0
, mode
, 0);
18296 x
= gen_rtx_NOT (vmode
, dest
);
18297 x
= gen_rtx_AND (vmode
, x
, op0
);
18298 emit_insn (gen_rtx_SET (VOIDmode
, dest
, x
));
18302 if (REGNO (op1
) == REGNO (scratch
)) /* alternative 1,3 */
18304 x
= gen_rtx_AND (vmode
, scratch
, mask
);
18306 else /* alternative 2,4 */
18308 gcc_assert (REGNO (mask
) == REGNO (scratch
));
18309 op1
= simplify_gen_subreg (vmode
, op1
, mode
, 0);
18310 x
= gen_rtx_AND (vmode
, scratch
, op1
);
18312 emit_insn (gen_rtx_SET (VOIDmode
, scratch
, x
));
18314 if (REGNO (op0
) == REGNO (dest
)) /* alternative 1,2 */
18316 dest
= simplify_gen_subreg (vmode
, op0
, mode
, 0);
18317 x
= gen_rtx_AND (vmode
, dest
, nmask
);
18319 else /* alternative 3,4 */
18321 gcc_assert (REGNO (nmask
) == REGNO (dest
));
18323 op0
= simplify_gen_subreg (vmode
, op0
, mode
, 0);
18324 x
= gen_rtx_AND (vmode
, dest
, op0
);
18326 emit_insn (gen_rtx_SET (VOIDmode
, dest
, x
));
18329 x
= gen_rtx_IOR (vmode
, dest
, scratch
);
18330 emit_insn (gen_rtx_SET (VOIDmode
, dest
, x
));
18333 /* Return TRUE or FALSE depending on whether the first SET in INSN
18334 has source and destination with matching CC modes, and that the
18335 CC mode is at least as constrained as REQ_MODE. */
18338 ix86_match_ccmode (rtx insn
, enum machine_mode req_mode
)
18341 enum machine_mode set_mode
;
18343 set
= PATTERN (insn
);
18344 if (GET_CODE (set
) == PARALLEL
)
18345 set
= XVECEXP (set
, 0, 0);
18346 gcc_assert (GET_CODE (set
) == SET
);
18347 gcc_assert (GET_CODE (SET_SRC (set
)) == COMPARE
);
18349 set_mode
= GET_MODE (SET_DEST (set
));
18353 if (req_mode
!= CCNOmode
18354 && (req_mode
!= CCmode
18355 || XEXP (SET_SRC (set
), 1) != const0_rtx
))
18359 if (req_mode
== CCGCmode
)
18363 if (req_mode
== CCGOCmode
|| req_mode
== CCNOmode
)
18367 if (req_mode
== CCZmode
)
18377 if (set_mode
!= req_mode
)
18382 gcc_unreachable ();
18385 return GET_MODE (SET_SRC (set
)) == set_mode
;
18388 /* Generate insn patterns to do an integer compare of OPERANDS. */
18391 ix86_expand_int_compare (enum rtx_code code
, rtx op0
, rtx op1
)
18393 enum machine_mode cmpmode
;
18396 cmpmode
= SELECT_CC_MODE (code
, op0
, op1
);
18397 flags
= gen_rtx_REG (cmpmode
, FLAGS_REG
);
18399 /* This is very simple, but making the interface the same as in the
18400 FP case makes the rest of the code easier. */
18401 tmp
= gen_rtx_COMPARE (cmpmode
, op0
, op1
);
18402 emit_insn (gen_rtx_SET (VOIDmode
, flags
, tmp
));
18404 /* Return the test that should be put into the flags user, i.e.
18405 the bcc, scc, or cmov instruction. */
18406 return gen_rtx_fmt_ee (code
, VOIDmode
, flags
, const0_rtx
);
18409 /* Figure out whether to use ordered or unordered fp comparisons.
18410 Return the appropriate mode to use. */
18413 ix86_fp_compare_mode (enum rtx_code code ATTRIBUTE_UNUSED
)
18415 /* ??? In order to make all comparisons reversible, we do all comparisons
18416 non-trapping when compiling for IEEE. Once gcc is able to distinguish
18417 all forms trapping and nontrapping comparisons, we can make inequality
18418 comparisons trapping again, since it results in better code when using
18419 FCOM based compares. */
18420 return TARGET_IEEE_FP
? CCFPUmode
: CCFPmode
;
18424 ix86_cc_mode (enum rtx_code code
, rtx op0
, rtx op1
)
18426 enum machine_mode mode
= GET_MODE (op0
);
18428 if (SCALAR_FLOAT_MODE_P (mode
))
18430 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode
));
18431 return ix86_fp_compare_mode (code
);
18436 /* Only zero flag is needed. */
18437 case EQ
: /* ZF=0 */
18438 case NE
: /* ZF!=0 */
18440 /* Codes needing carry flag. */
18441 case GEU
: /* CF=0 */
18442 case LTU
: /* CF=1 */
18443 /* Detect overflow checks. They need just the carry flag. */
18444 if (GET_CODE (op0
) == PLUS
18445 && rtx_equal_p (op1
, XEXP (op0
, 0)))
18449 case GTU
: /* CF=0 & ZF=0 */
18450 case LEU
: /* CF=1 | ZF=1 */
18451 /* Detect overflow checks. They need just the carry flag. */
18452 if (GET_CODE (op0
) == MINUS
18453 && rtx_equal_p (op1
, XEXP (op0
, 0)))
18457 /* Codes possibly doable only with sign flag when
18458 comparing against zero. */
18459 case GE
: /* SF=OF or SF=0 */
18460 case LT
: /* SF<>OF or SF=1 */
18461 if (op1
== const0_rtx
)
18464 /* For other cases Carry flag is not required. */
18466 /* Codes doable only with sign flag when comparing
18467 against zero, but we miss jump instruction for it
18468 so we need to use relational tests against overflow
18469 that thus needs to be zero. */
18470 case GT
: /* ZF=0 & SF=OF */
18471 case LE
: /* ZF=1 | SF<>OF */
18472 if (op1
== const0_rtx
)
18476 /* strcmp pattern do (use flags) and combine may ask us for proper
18481 gcc_unreachable ();
18485 /* Return the fixed registers used for condition codes. */
18488 ix86_fixed_condition_code_regs (unsigned int *p1
, unsigned int *p2
)
18495 /* If two condition code modes are compatible, return a condition code
18496 mode which is compatible with both. Otherwise, return
18499 static enum machine_mode
18500 ix86_cc_modes_compatible (enum machine_mode m1
, enum machine_mode m2
)
18505 if (GET_MODE_CLASS (m1
) != MODE_CC
|| GET_MODE_CLASS (m2
) != MODE_CC
)
18508 if ((m1
== CCGCmode
&& m2
== CCGOCmode
)
18509 || (m1
== CCGOCmode
&& m2
== CCGCmode
))
18512 if (m1
== CCZmode
&& (m2
== CCGCmode
|| m2
== CCGOCmode
))
18514 else if (m2
== CCZmode
&& (m1
== CCGCmode
|| m1
== CCGOCmode
))
18520 gcc_unreachable ();
18550 /* These are only compatible with themselves, which we already
18557 /* Return a comparison we can do and that it is equivalent to
18558 swap_condition (code) apart possibly from orderedness.
18559 But, never change orderedness if TARGET_IEEE_FP, returning
18560 UNKNOWN in that case if necessary. */
18562 static enum rtx_code
18563 ix86_fp_swap_condition (enum rtx_code code
)
18567 case GT
: /* GTU - CF=0 & ZF=0 */
18568 return TARGET_IEEE_FP
? UNKNOWN
: UNLT
;
18569 case GE
: /* GEU - CF=0 */
18570 return TARGET_IEEE_FP
? UNKNOWN
: UNLE
;
18571 case UNLT
: /* LTU - CF=1 */
18572 return TARGET_IEEE_FP
? UNKNOWN
: GT
;
18573 case UNLE
: /* LEU - CF=1 | ZF=1 */
18574 return TARGET_IEEE_FP
? UNKNOWN
: GE
;
18576 return swap_condition (code
);
18580 /* Return cost of comparison CODE using the best strategy for performance.
18581 All following functions do use number of instructions as a cost metrics.
18582 In future this should be tweaked to compute bytes for optimize_size and
18583 take into account performance of various instructions on various CPUs. */
18586 ix86_fp_comparison_cost (enum rtx_code code
)
18590 /* The cost of code using bit-twiddling on %ah. */
18607 arith_cost
= TARGET_IEEE_FP
? 5 : 4;
18611 arith_cost
= TARGET_IEEE_FP
? 6 : 4;
18614 gcc_unreachable ();
18617 switch (ix86_fp_comparison_strategy (code
))
18619 case IX86_FPCMP_COMI
:
18620 return arith_cost
> 4 ? 3 : 2;
18621 case IX86_FPCMP_SAHF
:
18622 return arith_cost
> 4 ? 4 : 3;
18628 /* Return strategy to use for floating-point. We assume that fcomi is always
18629 preferrable where available, since that is also true when looking at size
18630 (2 bytes, vs. 3 for fnstsw+sahf and at least 5 for fnstsw+test). */
18632 enum ix86_fpcmp_strategy
18633 ix86_fp_comparison_strategy (enum rtx_code code ATTRIBUTE_UNUSED
)
18635 /* Do fcomi/sahf based test when profitable. */
18638 return IX86_FPCMP_COMI
;
18640 if (TARGET_SAHF
&& (TARGET_USE_SAHF
|| optimize_function_for_size_p (cfun
)))
18641 return IX86_FPCMP_SAHF
;
18643 return IX86_FPCMP_ARITH
;
18646 /* Swap, force into registers, or otherwise massage the two operands
18647 to a fp comparison. The operands are updated in place; the new
18648 comparison code is returned. */
18650 static enum rtx_code
18651 ix86_prepare_fp_compare_args (enum rtx_code code
, rtx
*pop0
, rtx
*pop1
)
18653 enum machine_mode fpcmp_mode
= ix86_fp_compare_mode (code
);
18654 rtx op0
= *pop0
, op1
= *pop1
;
18655 enum machine_mode op_mode
= GET_MODE (op0
);
18656 int is_sse
= TARGET_SSE_MATH
&& SSE_FLOAT_MODE_P (op_mode
);
18658 /* All of the unordered compare instructions only work on registers.
18659 The same is true of the fcomi compare instructions. The XFmode
18660 compare instructions require registers except when comparing
18661 against zero or when converting operand 1 from fixed point to
18665 && (fpcmp_mode
== CCFPUmode
18666 || (op_mode
== XFmode
18667 && ! (standard_80387_constant_p (op0
) == 1
18668 || standard_80387_constant_p (op1
) == 1)
18669 && GET_CODE (op1
) != FLOAT
)
18670 || ix86_fp_comparison_strategy (code
) == IX86_FPCMP_COMI
))
18672 op0
= force_reg (op_mode
, op0
);
18673 op1
= force_reg (op_mode
, op1
);
18677 /* %%% We only allow op1 in memory; op0 must be st(0). So swap
18678 things around if they appear profitable, otherwise force op0
18679 into a register. */
18681 if (standard_80387_constant_p (op0
) == 0
18683 && ! (standard_80387_constant_p (op1
) == 0
18686 enum rtx_code new_code
= ix86_fp_swap_condition (code
);
18687 if (new_code
!= UNKNOWN
)
18690 tmp
= op0
, op0
= op1
, op1
= tmp
;
18696 op0
= force_reg (op_mode
, op0
);
18698 if (CONSTANT_P (op1
))
18700 int tmp
= standard_80387_constant_p (op1
);
18702 op1
= validize_mem (force_const_mem (op_mode
, op1
));
18706 op1
= force_reg (op_mode
, op1
);
18709 op1
= force_reg (op_mode
, op1
);
18713 /* Try to rearrange the comparison to make it cheaper. */
18714 if (ix86_fp_comparison_cost (code
)
18715 > ix86_fp_comparison_cost (swap_condition (code
))
18716 && (REG_P (op1
) || can_create_pseudo_p ()))
18719 tmp
= op0
, op0
= op1
, op1
= tmp
;
18720 code
= swap_condition (code
);
18722 op0
= force_reg (op_mode
, op0
);
18730 /* Convert comparison codes we use to represent FP comparison to integer
18731 code that will result in proper branch. Return UNKNOWN if no such code
18735 ix86_fp_compare_code_to_integer (enum rtx_code code
)
18764 /* Generate insn patterns to do a floating point compare of OPERANDS. */
18767 ix86_expand_fp_compare (enum rtx_code code
, rtx op0
, rtx op1
, rtx scratch
)
18769 enum machine_mode fpcmp_mode
, intcmp_mode
;
18772 fpcmp_mode
= ix86_fp_compare_mode (code
);
18773 code
= ix86_prepare_fp_compare_args (code
, &op0
, &op1
);
18775 /* Do fcomi/sahf based test when profitable. */
18776 switch (ix86_fp_comparison_strategy (code
))
18778 case IX86_FPCMP_COMI
:
18779 intcmp_mode
= fpcmp_mode
;
18780 tmp
= gen_rtx_COMPARE (fpcmp_mode
, op0
, op1
);
18781 tmp
= gen_rtx_SET (VOIDmode
, gen_rtx_REG (fpcmp_mode
, FLAGS_REG
),
18786 case IX86_FPCMP_SAHF
:
18787 intcmp_mode
= fpcmp_mode
;
18788 tmp
= gen_rtx_COMPARE (fpcmp_mode
, op0
, op1
);
18789 tmp
= gen_rtx_SET (VOIDmode
, gen_rtx_REG (fpcmp_mode
, FLAGS_REG
),
18793 scratch
= gen_reg_rtx (HImode
);
18794 tmp2
= gen_rtx_CLOBBER (VOIDmode
, scratch
);
18795 emit_insn (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, tmp
, tmp2
)));
18798 case IX86_FPCMP_ARITH
:
18799 /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */
18800 tmp
= gen_rtx_COMPARE (fpcmp_mode
, op0
, op1
);
18801 tmp2
= gen_rtx_UNSPEC (HImode
, gen_rtvec (1, tmp
), UNSPEC_FNSTSW
);
18803 scratch
= gen_reg_rtx (HImode
);
18804 emit_insn (gen_rtx_SET (VOIDmode
, scratch
, tmp2
));
18806 /* In the unordered case, we have to check C2 for NaN's, which
18807 doesn't happen to work out to anything nice combination-wise.
18808 So do some bit twiddling on the value we've got in AH to come
18809 up with an appropriate set of condition codes. */
18811 intcmp_mode
= CCNOmode
;
18816 if (code
== GT
|| !TARGET_IEEE_FP
)
18818 emit_insn (gen_testqi_ext_ccno_0 (scratch
, GEN_INT (0x45)));
18823 emit_insn (gen_andqi_ext_0 (scratch
, scratch
, GEN_INT (0x45)));
18824 emit_insn (gen_addqi_ext_1 (scratch
, scratch
, constm1_rtx
));
18825 emit_insn (gen_cmpqi_ext_3 (scratch
, GEN_INT (0x44)));
18826 intcmp_mode
= CCmode
;
18832 if (code
== LT
&& TARGET_IEEE_FP
)
18834 emit_insn (gen_andqi_ext_0 (scratch
, scratch
, GEN_INT (0x45)));
18835 emit_insn (gen_cmpqi_ext_3 (scratch
, const1_rtx
));
18836 intcmp_mode
= CCmode
;
18841 emit_insn (gen_testqi_ext_ccno_0 (scratch
, const1_rtx
));
18847 if (code
== GE
|| !TARGET_IEEE_FP
)
18849 emit_insn (gen_testqi_ext_ccno_0 (scratch
, GEN_INT (0x05)));
18854 emit_insn (gen_andqi_ext_0 (scratch
, scratch
, GEN_INT (0x45)));
18855 emit_insn (gen_xorqi_cc_ext_1 (scratch
, scratch
, const1_rtx
));
18861 if (code
== LE
&& TARGET_IEEE_FP
)
18863 emit_insn (gen_andqi_ext_0 (scratch
, scratch
, GEN_INT (0x45)));
18864 emit_insn (gen_addqi_ext_1 (scratch
, scratch
, constm1_rtx
));
18865 emit_insn (gen_cmpqi_ext_3 (scratch
, GEN_INT (0x40)));
18866 intcmp_mode
= CCmode
;
18871 emit_insn (gen_testqi_ext_ccno_0 (scratch
, GEN_INT (0x45)));
18877 if (code
== EQ
&& TARGET_IEEE_FP
)
18879 emit_insn (gen_andqi_ext_0 (scratch
, scratch
, GEN_INT (0x45)));
18880 emit_insn (gen_cmpqi_ext_3 (scratch
, GEN_INT (0x40)));
18881 intcmp_mode
= CCmode
;
18886 emit_insn (gen_testqi_ext_ccno_0 (scratch
, GEN_INT (0x40)));
18892 if (code
== NE
&& TARGET_IEEE_FP
)
18894 emit_insn (gen_andqi_ext_0 (scratch
, scratch
, GEN_INT (0x45)));
18895 emit_insn (gen_xorqi_cc_ext_1 (scratch
, scratch
,
18901 emit_insn (gen_testqi_ext_ccno_0 (scratch
, GEN_INT (0x40)));
18907 emit_insn (gen_testqi_ext_ccno_0 (scratch
, GEN_INT (0x04)));
18911 emit_insn (gen_testqi_ext_ccno_0 (scratch
, GEN_INT (0x04)));
18916 gcc_unreachable ();
18924 /* Return the test that should be put into the flags user, i.e.
18925 the bcc, scc, or cmov instruction. */
18926 return gen_rtx_fmt_ee (code
, VOIDmode
,
18927 gen_rtx_REG (intcmp_mode
, FLAGS_REG
),
18932 ix86_expand_compare (enum rtx_code code
, rtx op0
, rtx op1
)
18936 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
)
18937 ret
= gen_rtx_fmt_ee (code
, VOIDmode
, op0
, op1
);
18939 else if (SCALAR_FLOAT_MODE_P (GET_MODE (op0
)))
18941 gcc_assert (!DECIMAL_FLOAT_MODE_P (GET_MODE (op0
)));
18942 ret
= ix86_expand_fp_compare (code
, op0
, op1
, NULL_RTX
);
18945 ret
= ix86_expand_int_compare (code
, op0
, op1
);
18951 ix86_expand_branch (enum rtx_code code
, rtx op0
, rtx op1
, rtx label
)
18953 enum machine_mode mode
= GET_MODE (op0
);
18965 tmp
= ix86_expand_compare (code
, op0
, op1
);
18966 tmp
= gen_rtx_IF_THEN_ELSE (VOIDmode
, tmp
,
18967 gen_rtx_LABEL_REF (VOIDmode
, label
),
18969 emit_jump_insn (gen_rtx_SET (VOIDmode
, pc_rtx
, tmp
));
18976 /* Expand DImode branch into multiple compare+branch. */
18978 rtx lo
[2], hi
[2], label2
;
18979 enum rtx_code code1
, code2
, code3
;
18980 enum machine_mode submode
;
18982 if (CONSTANT_P (op0
) && !CONSTANT_P (op1
))
18984 tmp
= op0
, op0
= op1
, op1
= tmp
;
18985 code
= swap_condition (code
);
18988 split_double_mode (mode
, &op0
, 1, lo
+0, hi
+0);
18989 split_double_mode (mode
, &op1
, 1, lo
+1, hi
+1);
18991 submode
= mode
== DImode
? SImode
: DImode
;
18993 /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to
18994 avoid two branches. This costs one extra insn, so disable when
18995 optimizing for size. */
18997 if ((code
== EQ
|| code
== NE
)
18998 && (!optimize_insn_for_size_p ()
18999 || hi
[1] == const0_rtx
|| lo
[1] == const0_rtx
))
19004 if (hi
[1] != const0_rtx
)
19005 xor1
= expand_binop (submode
, xor_optab
, xor1
, hi
[1],
19006 NULL_RTX
, 0, OPTAB_WIDEN
);
19009 if (lo
[1] != const0_rtx
)
19010 xor0
= expand_binop (submode
, xor_optab
, xor0
, lo
[1],
19011 NULL_RTX
, 0, OPTAB_WIDEN
);
19013 tmp
= expand_binop (submode
, ior_optab
, xor1
, xor0
,
19014 NULL_RTX
, 0, OPTAB_WIDEN
);
19016 ix86_expand_branch (code
, tmp
, const0_rtx
, label
);
19020 /* Otherwise, if we are doing less-than or greater-or-equal-than,
19021 op1 is a constant and the low word is zero, then we can just
19022 examine the high word. Similarly for low word -1 and
19023 less-or-equal-than or greater-than. */
19025 if (CONST_INT_P (hi
[1]))
19028 case LT
: case LTU
: case GE
: case GEU
:
19029 if (lo
[1] == const0_rtx
)
19031 ix86_expand_branch (code
, hi
[0], hi
[1], label
);
19035 case LE
: case LEU
: case GT
: case GTU
:
19036 if (lo
[1] == constm1_rtx
)
19038 ix86_expand_branch (code
, hi
[0], hi
[1], label
);
19046 /* Otherwise, we need two or three jumps. */
19048 label2
= gen_label_rtx ();
19051 code2
= swap_condition (code
);
19052 code3
= unsigned_condition (code
);
19056 case LT
: case GT
: case LTU
: case GTU
:
19059 case LE
: code1
= LT
; code2
= GT
; break;
19060 case GE
: code1
= GT
; code2
= LT
; break;
19061 case LEU
: code1
= LTU
; code2
= GTU
; break;
19062 case GEU
: code1
= GTU
; code2
= LTU
; break;
19064 case EQ
: code1
= UNKNOWN
; code2
= NE
; break;
19065 case NE
: code2
= UNKNOWN
; break;
19068 gcc_unreachable ();
19073 * if (hi(a) < hi(b)) goto true;
19074 * if (hi(a) > hi(b)) goto false;
19075 * if (lo(a) < lo(b)) goto true;
19079 if (code1
!= UNKNOWN
)
19080 ix86_expand_branch (code1
, hi
[0], hi
[1], label
);
19081 if (code2
!= UNKNOWN
)
19082 ix86_expand_branch (code2
, hi
[0], hi
[1], label2
);
19084 ix86_expand_branch (code3
, lo
[0], lo
[1], label
);
19086 if (code2
!= UNKNOWN
)
19087 emit_label (label2
);
19092 gcc_assert (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
);
19097 /* Split branch based on floating point condition. */
19099 ix86_split_fp_branch (enum rtx_code code
, rtx op1
, rtx op2
,
19100 rtx target1
, rtx target2
, rtx tmp
, rtx pushed
)
19105 if (target2
!= pc_rtx
)
19108 code
= reverse_condition_maybe_unordered (code
);
19113 condition
= ix86_expand_fp_compare (code
, op1
, op2
,
19116 /* Remove pushed operand from stack. */
19118 ix86_free_from_memory (GET_MODE (pushed
));
19120 i
= emit_jump_insn (gen_rtx_SET
19122 gen_rtx_IF_THEN_ELSE (VOIDmode
,
19123 condition
, target1
, target2
)));
19124 if (split_branch_probability
>= 0)
19125 add_reg_note (i
, REG_BR_PROB
, GEN_INT (split_branch_probability
));
19129 ix86_expand_setcc (rtx dest
, enum rtx_code code
, rtx op0
, rtx op1
)
19133 gcc_assert (GET_MODE (dest
) == QImode
);
19135 ret
= ix86_expand_compare (code
, op0
, op1
);
19136 PUT_MODE (ret
, QImode
);
19137 emit_insn (gen_rtx_SET (VOIDmode
, dest
, ret
));
19140 /* Expand comparison setting or clearing carry flag. Return true when
19141 successful and set pop for the operation. */
19143 ix86_expand_carry_flag_compare (enum rtx_code code
, rtx op0
, rtx op1
, rtx
*pop
)
19145 enum machine_mode mode
=
19146 GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
19148 /* Do not handle double-mode compares that go through special path. */
19149 if (mode
== (TARGET_64BIT
? TImode
: DImode
))
19152 if (SCALAR_FLOAT_MODE_P (mode
))
19154 rtx compare_op
, compare_seq
;
19156 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode
));
19158 /* Shortcut: following common codes never translate
19159 into carry flag compares. */
19160 if (code
== EQ
|| code
== NE
|| code
== UNEQ
|| code
== LTGT
19161 || code
== ORDERED
|| code
== UNORDERED
)
19164 /* These comparisons require zero flag; swap operands so they won't. */
19165 if ((code
== GT
|| code
== UNLE
|| code
== LE
|| code
== UNGT
)
19166 && !TARGET_IEEE_FP
)
19171 code
= swap_condition (code
);
19174 /* Try to expand the comparison and verify that we end up with
19175 carry flag based comparison. This fails to be true only when
19176 we decide to expand comparison using arithmetic that is not
19177 too common scenario. */
19179 compare_op
= ix86_expand_fp_compare (code
, op0
, op1
, NULL_RTX
);
19180 compare_seq
= get_insns ();
19183 if (GET_MODE (XEXP (compare_op
, 0)) == CCFPmode
19184 || GET_MODE (XEXP (compare_op
, 0)) == CCFPUmode
)
19185 code
= ix86_fp_compare_code_to_integer (GET_CODE (compare_op
));
19187 code
= GET_CODE (compare_op
);
19189 if (code
!= LTU
&& code
!= GEU
)
19192 emit_insn (compare_seq
);
19197 if (!INTEGRAL_MODE_P (mode
))
19206 /* Convert a==0 into (unsigned)a<1. */
19209 if (op1
!= const0_rtx
)
19212 code
= (code
== EQ
? LTU
: GEU
);
19215 /* Convert a>b into b<a or a>=b-1. */
19218 if (CONST_INT_P (op1
))
19220 op1
= gen_int_mode (INTVAL (op1
) + 1, GET_MODE (op0
));
19221 /* Bail out on overflow. We still can swap operands but that
19222 would force loading of the constant into register. */
19223 if (op1
== const0_rtx
19224 || !x86_64_immediate_operand (op1
, GET_MODE (op1
)))
19226 code
= (code
== GTU
? GEU
: LTU
);
19233 code
= (code
== GTU
? LTU
: GEU
);
19237 /* Convert a>=0 into (unsigned)a<0x80000000. */
19240 if (mode
== DImode
|| op1
!= const0_rtx
)
19242 op1
= gen_int_mode (1 << (GET_MODE_BITSIZE (mode
) - 1), mode
);
19243 code
= (code
== LT
? GEU
: LTU
);
19247 if (mode
== DImode
|| op1
!= constm1_rtx
)
19249 op1
= gen_int_mode (1 << (GET_MODE_BITSIZE (mode
) - 1), mode
);
19250 code
= (code
== LE
? GEU
: LTU
);
19256 /* Swapping operands may cause constant to appear as first operand. */
19257 if (!nonimmediate_operand (op0
, VOIDmode
))
19259 if (!can_create_pseudo_p ())
19261 op0
= force_reg (mode
, op0
);
19263 *pop
= ix86_expand_compare (code
, op0
, op1
);
19264 gcc_assert (GET_CODE (*pop
) == LTU
|| GET_CODE (*pop
) == GEU
);
19269 ix86_expand_int_movcc (rtx operands
[])
19271 enum rtx_code code
= GET_CODE (operands
[1]), compare_code
;
19272 rtx compare_seq
, compare_op
;
19273 enum machine_mode mode
= GET_MODE (operands
[0]);
19274 bool sign_bit_compare_p
= false;
19275 rtx op0
= XEXP (operands
[1], 0);
19276 rtx op1
= XEXP (operands
[1], 1);
19278 if (GET_MODE (op0
) == TImode
19279 || (GET_MODE (op0
) == DImode
19284 compare_op
= ix86_expand_compare (code
, op0
, op1
);
19285 compare_seq
= get_insns ();
19288 compare_code
= GET_CODE (compare_op
);
19290 if ((op1
== const0_rtx
&& (code
== GE
|| code
== LT
))
19291 || (op1
== constm1_rtx
&& (code
== GT
|| code
== LE
)))
19292 sign_bit_compare_p
= true;
19294 /* Don't attempt mode expansion here -- if we had to expand 5 or 6
19295 HImode insns, we'd be swallowed in word prefix ops. */
19297 if ((mode
!= HImode
|| TARGET_FAST_PREFIX
)
19298 && (mode
!= (TARGET_64BIT
? TImode
: DImode
))
19299 && CONST_INT_P (operands
[2])
19300 && CONST_INT_P (operands
[3]))
19302 rtx out
= operands
[0];
19303 HOST_WIDE_INT ct
= INTVAL (operands
[2]);
19304 HOST_WIDE_INT cf
= INTVAL (operands
[3]);
19305 HOST_WIDE_INT diff
;
19308 /* Sign bit compares are better done using shifts than we do by using
19310 if (sign_bit_compare_p
19311 || ix86_expand_carry_flag_compare (code
, op0
, op1
, &compare_op
))
19313 /* Detect overlap between destination and compare sources. */
19316 if (!sign_bit_compare_p
)
19319 bool fpcmp
= false;
19321 compare_code
= GET_CODE (compare_op
);
19323 flags
= XEXP (compare_op
, 0);
19325 if (GET_MODE (flags
) == CCFPmode
19326 || GET_MODE (flags
) == CCFPUmode
)
19330 = ix86_fp_compare_code_to_integer (compare_code
);
19333 /* To simplify rest of code, restrict to the GEU case. */
19334 if (compare_code
== LTU
)
19336 HOST_WIDE_INT tmp
= ct
;
19339 compare_code
= reverse_condition (compare_code
);
19340 code
= reverse_condition (code
);
19345 PUT_CODE (compare_op
,
19346 reverse_condition_maybe_unordered
19347 (GET_CODE (compare_op
)));
19349 PUT_CODE (compare_op
,
19350 reverse_condition (GET_CODE (compare_op
)));
19354 if (reg_overlap_mentioned_p (out
, op0
)
19355 || reg_overlap_mentioned_p (out
, op1
))
19356 tmp
= gen_reg_rtx (mode
);
19358 if (mode
== DImode
)
19359 emit_insn (gen_x86_movdicc_0_m1 (tmp
, flags
, compare_op
));
19361 emit_insn (gen_x86_movsicc_0_m1 (gen_lowpart (SImode
, tmp
),
19362 flags
, compare_op
));
19366 if (code
== GT
|| code
== GE
)
19367 code
= reverse_condition (code
);
19370 HOST_WIDE_INT tmp
= ct
;
19375 tmp
= emit_store_flag (tmp
, code
, op0
, op1
, VOIDmode
, 0, -1);
19388 tmp
= expand_simple_binop (mode
, PLUS
,
19390 copy_rtx (tmp
), 1, OPTAB_DIRECT
);
19401 tmp
= expand_simple_binop (mode
, IOR
,
19403 copy_rtx (tmp
), 1, OPTAB_DIRECT
);
19405 else if (diff
== -1 && ct
)
19415 tmp
= expand_simple_unop (mode
, NOT
, tmp
, copy_rtx (tmp
), 1);
19417 tmp
= expand_simple_binop (mode
, PLUS
,
19418 copy_rtx (tmp
), GEN_INT (cf
),
19419 copy_rtx (tmp
), 1, OPTAB_DIRECT
);
19427 * andl cf - ct, dest
19437 tmp
= expand_simple_unop (mode
, NOT
, tmp
, copy_rtx (tmp
), 1);
19440 tmp
= expand_simple_binop (mode
, AND
,
19442 gen_int_mode (cf
- ct
, mode
),
19443 copy_rtx (tmp
), 1, OPTAB_DIRECT
);
19445 tmp
= expand_simple_binop (mode
, PLUS
,
19446 copy_rtx (tmp
), GEN_INT (ct
),
19447 copy_rtx (tmp
), 1, OPTAB_DIRECT
);
19450 if (!rtx_equal_p (tmp
, out
))
19451 emit_move_insn (copy_rtx (out
), copy_rtx (tmp
));
19458 enum machine_mode cmp_mode
= GET_MODE (op0
);
19461 tmp
= ct
, ct
= cf
, cf
= tmp
;
19464 if (SCALAR_FLOAT_MODE_P (cmp_mode
))
19466 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode
));
19468 /* We may be reversing unordered compare to normal compare, that
19469 is not valid in general (we may convert non-trapping condition
19470 to trapping one), however on i386 we currently emit all
19471 comparisons unordered. */
19472 compare_code
= reverse_condition_maybe_unordered (compare_code
);
19473 code
= reverse_condition_maybe_unordered (code
);
19477 compare_code
= reverse_condition (compare_code
);
19478 code
= reverse_condition (code
);
19482 compare_code
= UNKNOWN
;
19483 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_INT
19484 && CONST_INT_P (op1
))
19486 if (op1
== const0_rtx
19487 && (code
== LT
|| code
== GE
))
19488 compare_code
= code
;
19489 else if (op1
== constm1_rtx
)
19493 else if (code
== GT
)
19498 /* Optimize dest = (op0 < 0) ? -1 : cf. */
19499 if (compare_code
!= UNKNOWN
19500 && GET_MODE (op0
) == GET_MODE (out
)
19501 && (cf
== -1 || ct
== -1))
19503 /* If lea code below could be used, only optimize
19504 if it results in a 2 insn sequence. */
19506 if (! (diff
== 1 || diff
== 2 || diff
== 4 || diff
== 8
19507 || diff
== 3 || diff
== 5 || diff
== 9)
19508 || (compare_code
== LT
&& ct
== -1)
19509 || (compare_code
== GE
&& cf
== -1))
19512 * notl op1 (if necessary)
19520 code
= reverse_condition (code
);
19523 out
= emit_store_flag (out
, code
, op0
, op1
, VOIDmode
, 0, -1);
19525 out
= expand_simple_binop (mode
, IOR
,
19527 out
, 1, OPTAB_DIRECT
);
19528 if (out
!= operands
[0])
19529 emit_move_insn (operands
[0], out
);
19536 if ((diff
== 1 || diff
== 2 || diff
== 4 || diff
== 8
19537 || diff
== 3 || diff
== 5 || diff
== 9)
19538 && ((mode
!= QImode
&& mode
!= HImode
) || !TARGET_PARTIAL_REG_STALL
)
19540 || x86_64_immediate_operand (GEN_INT (cf
), VOIDmode
)))
19546 * lea cf(dest*(ct-cf)),dest
19550 * This also catches the degenerate setcc-only case.
19556 out
= emit_store_flag (out
, code
, op0
, op1
, VOIDmode
, 0, 1);
19559 /* On x86_64 the lea instruction operates on Pmode, so we need
19560 to get arithmetics done in proper mode to match. */
19562 tmp
= copy_rtx (out
);
19566 out1
= copy_rtx (out
);
19567 tmp
= gen_rtx_MULT (mode
, out1
, GEN_INT (diff
& ~1));
19571 tmp
= gen_rtx_PLUS (mode
, tmp
, out1
);
19577 tmp
= gen_rtx_PLUS (mode
, tmp
, GEN_INT (cf
));
19580 if (!rtx_equal_p (tmp
, out
))
19583 out
= force_operand (tmp
, copy_rtx (out
));
19585 emit_insn (gen_rtx_SET (VOIDmode
, copy_rtx (out
), copy_rtx (tmp
)));
19587 if (!rtx_equal_p (out
, operands
[0]))
19588 emit_move_insn (operands
[0], copy_rtx (out
));
19594 * General case: Jumpful:
19595 * xorl dest,dest cmpl op1, op2
19596 * cmpl op1, op2 movl ct, dest
19597 * setcc dest jcc 1f
19598 * decl dest movl cf, dest
19599 * andl (cf-ct),dest 1:
19602 * Size 20. Size 14.
19604 * This is reasonably steep, but branch mispredict costs are
19605 * high on modern cpus, so consider failing only if optimizing
19609 if ((!TARGET_CMOVE
|| (mode
== QImode
&& TARGET_PARTIAL_REG_STALL
))
19610 && BRANCH_COST (optimize_insn_for_speed_p (),
19615 enum machine_mode cmp_mode
= GET_MODE (op0
);
19620 if (SCALAR_FLOAT_MODE_P (cmp_mode
))
19622 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode
));
19624 /* We may be reversing unordered compare to normal compare,
19625 that is not valid in general (we may convert non-trapping
19626 condition to trapping one), however on i386 we currently
19627 emit all comparisons unordered. */
19628 code
= reverse_condition_maybe_unordered (code
);
19632 code
= reverse_condition (code
);
19633 if (compare_code
!= UNKNOWN
)
19634 compare_code
= reverse_condition (compare_code
);
19638 if (compare_code
!= UNKNOWN
)
19640 /* notl op1 (if needed)
19645 For x < 0 (resp. x <= -1) there will be no notl,
19646 so if possible swap the constants to get rid of the
19648 True/false will be -1/0 while code below (store flag
19649 followed by decrement) is 0/-1, so the constants need
19650 to be exchanged once more. */
19652 if (compare_code
== GE
|| !cf
)
19654 code
= reverse_condition (code
);
19659 HOST_WIDE_INT tmp
= cf
;
19664 out
= emit_store_flag (out
, code
, op0
, op1
, VOIDmode
, 0, -1);
19668 out
= emit_store_flag (out
, code
, op0
, op1
, VOIDmode
, 0, 1);
19670 out
= expand_simple_binop (mode
, PLUS
, copy_rtx (out
),
19672 copy_rtx (out
), 1, OPTAB_DIRECT
);
19675 out
= expand_simple_binop (mode
, AND
, copy_rtx (out
),
19676 gen_int_mode (cf
- ct
, mode
),
19677 copy_rtx (out
), 1, OPTAB_DIRECT
);
19679 out
= expand_simple_binop (mode
, PLUS
, copy_rtx (out
), GEN_INT (ct
),
19680 copy_rtx (out
), 1, OPTAB_DIRECT
);
19681 if (!rtx_equal_p (out
, operands
[0]))
19682 emit_move_insn (operands
[0], copy_rtx (out
));
19688 if (!TARGET_CMOVE
|| (mode
== QImode
&& TARGET_PARTIAL_REG_STALL
))
19690 /* Try a few things more with specific constants and a variable. */
19693 rtx var
, orig_out
, out
, tmp
;
19695 if (BRANCH_COST (optimize_insn_for_speed_p (), false) <= 2)
19698 /* If one of the two operands is an interesting constant, load a
19699 constant with the above and mask it in with a logical operation. */
19701 if (CONST_INT_P (operands
[2]))
19704 if (INTVAL (operands
[2]) == 0 && operands
[3] != constm1_rtx
)
19705 operands
[3] = constm1_rtx
, op
= and_optab
;
19706 else if (INTVAL (operands
[2]) == -1 && operands
[3] != const0_rtx
)
19707 operands
[3] = const0_rtx
, op
= ior_optab
;
19711 else if (CONST_INT_P (operands
[3]))
19714 if (INTVAL (operands
[3]) == 0 && operands
[2] != constm1_rtx
)
19715 operands
[2] = constm1_rtx
, op
= and_optab
;
19716 else if (INTVAL (operands
[3]) == -1 && operands
[3] != const0_rtx
)
19717 operands
[2] = const0_rtx
, op
= ior_optab
;
19724 orig_out
= operands
[0];
19725 tmp
= gen_reg_rtx (mode
);
19728 /* Recurse to get the constant loaded. */
19729 if (ix86_expand_int_movcc (operands
) == 0)
19732 /* Mask in the interesting variable. */
19733 out
= expand_binop (mode
, op
, var
, tmp
, orig_out
, 0,
19735 if (!rtx_equal_p (out
, orig_out
))
19736 emit_move_insn (copy_rtx (orig_out
), copy_rtx (out
));
19742 * For comparison with above,
19752 if (! nonimmediate_operand (operands
[2], mode
))
19753 operands
[2] = force_reg (mode
, operands
[2]);
19754 if (! nonimmediate_operand (operands
[3], mode
))
19755 operands
[3] = force_reg (mode
, operands
[3]);
19757 if (! register_operand (operands
[2], VOIDmode
)
19759 || ! register_operand (operands
[3], VOIDmode
)))
19760 operands
[2] = force_reg (mode
, operands
[2]);
19763 && ! register_operand (operands
[3], VOIDmode
))
19764 operands
[3] = force_reg (mode
, operands
[3]);
19766 emit_insn (compare_seq
);
19767 emit_insn (gen_rtx_SET (VOIDmode
, operands
[0],
19768 gen_rtx_IF_THEN_ELSE (mode
,
19769 compare_op
, operands
[2],
19774 /* Swap, force into registers, or otherwise massage the two operands
19775 to an sse comparison with a mask result. Thus we differ a bit from
19776 ix86_prepare_fp_compare_args which expects to produce a flags result.
19778 The DEST operand exists to help determine whether to commute commutative
19779 operators. The POP0/POP1 operands are updated in place. The new
19780 comparison code is returned, or UNKNOWN if not implementable. */
19782 static enum rtx_code
19783 ix86_prepare_sse_fp_compare_args (rtx dest
, enum rtx_code code
,
19784 rtx
*pop0
, rtx
*pop1
)
19792 /* AVX supports all the needed comparisons. */
19795 /* We have no LTGT as an operator. We could implement it with
19796 NE & ORDERED, but this requires an extra temporary. It's
19797 not clear that it's worth it. */
19804 /* These are supported directly. */
19811 /* AVX has 3 operand comparisons, no need to swap anything. */
19814 /* For commutative operators, try to canonicalize the destination
19815 operand to be first in the comparison - this helps reload to
19816 avoid extra moves. */
19817 if (!dest
|| !rtx_equal_p (dest
, *pop1
))
19825 /* These are not supported directly before AVX, and furthermore
19826 ix86_expand_sse_fp_minmax only optimizes LT/UNGE. Swap the
19827 comparison operands to transform into something that is
19832 code
= swap_condition (code
);
19836 gcc_unreachable ();
19842 /* Detect conditional moves that exactly match min/max operational
19843 semantics. Note that this is IEEE safe, as long as we don't
19844 interchange the operands.
19846 Returns FALSE if this conditional move doesn't match a MIN/MAX,
19847 and TRUE if the operation is successful and instructions are emitted. */
19850 ix86_expand_sse_fp_minmax (rtx dest
, enum rtx_code code
, rtx cmp_op0
,
19851 rtx cmp_op1
, rtx if_true
, rtx if_false
)
19853 enum machine_mode mode
;
19859 else if (code
== UNGE
)
19862 if_true
= if_false
;
19868 if (rtx_equal_p (cmp_op0
, if_true
) && rtx_equal_p (cmp_op1
, if_false
))
19870 else if (rtx_equal_p (cmp_op1
, if_true
) && rtx_equal_p (cmp_op0
, if_false
))
19875 mode
= GET_MODE (dest
);
19877 /* We want to check HONOR_NANS and HONOR_SIGNED_ZEROS here,
19878 but MODE may be a vector mode and thus not appropriate. */
19879 if (!flag_finite_math_only
|| !flag_unsafe_math_optimizations
)
19881 int u
= is_min
? UNSPEC_IEEE_MIN
: UNSPEC_IEEE_MAX
;
19884 if_true
= force_reg (mode
, if_true
);
19885 v
= gen_rtvec (2, if_true
, if_false
);
19886 tmp
= gen_rtx_UNSPEC (mode
, v
, u
);
19890 code
= is_min
? SMIN
: SMAX
;
19891 tmp
= gen_rtx_fmt_ee (code
, mode
, if_true
, if_false
);
19894 emit_insn (gen_rtx_SET (VOIDmode
, dest
, tmp
));
19898 /* Expand an sse vector comparison. Return the register with the result. */
19901 ix86_expand_sse_cmp (rtx dest
, enum rtx_code code
, rtx cmp_op0
, rtx cmp_op1
,
19902 rtx op_true
, rtx op_false
)
19904 enum machine_mode mode
= GET_MODE (dest
);
19905 enum machine_mode cmp_mode
= GET_MODE (cmp_op0
);
19908 cmp_op0
= force_reg (cmp_mode
, cmp_op0
);
19909 if (!nonimmediate_operand (cmp_op1
, cmp_mode
))
19910 cmp_op1
= force_reg (cmp_mode
, cmp_op1
);
19913 || reg_overlap_mentioned_p (dest
, op_true
)
19914 || reg_overlap_mentioned_p (dest
, op_false
))
19915 dest
= gen_reg_rtx (mode
);
19917 x
= gen_rtx_fmt_ee (code
, cmp_mode
, cmp_op0
, cmp_op1
);
19918 if (cmp_mode
!= mode
)
19920 x
= force_reg (cmp_mode
, x
);
19921 convert_move (dest
, x
, false);
19924 emit_insn (gen_rtx_SET (VOIDmode
, dest
, x
));
19929 /* Expand DEST = CMP ? OP_TRUE : OP_FALSE into a sequence of logical
19930 operations. This is used for both scalar and vector conditional moves. */
19933 ix86_expand_sse_movcc (rtx dest
, rtx cmp
, rtx op_true
, rtx op_false
)
19935 enum machine_mode mode
= GET_MODE (dest
);
19938 if (vector_all_ones_operand (op_true
, mode
)
19939 && rtx_equal_p (op_false
, CONST0_RTX (mode
)))
19941 emit_insn (gen_rtx_SET (VOIDmode
, dest
, cmp
));
19943 else if (op_false
== CONST0_RTX (mode
))
19945 op_true
= force_reg (mode
, op_true
);
19946 x
= gen_rtx_AND (mode
, cmp
, op_true
);
19947 emit_insn (gen_rtx_SET (VOIDmode
, dest
, x
));
19949 else if (op_true
== CONST0_RTX (mode
))
19951 op_false
= force_reg (mode
, op_false
);
19952 x
= gen_rtx_NOT (mode
, cmp
);
19953 x
= gen_rtx_AND (mode
, x
, op_false
);
19954 emit_insn (gen_rtx_SET (VOIDmode
, dest
, x
));
19956 else if (INTEGRAL_MODE_P (mode
) && op_true
== CONSTM1_RTX (mode
))
19958 op_false
= force_reg (mode
, op_false
);
19959 x
= gen_rtx_IOR (mode
, cmp
, op_false
);
19960 emit_insn (gen_rtx_SET (VOIDmode
, dest
, x
));
19962 else if (TARGET_XOP
)
19964 op_true
= force_reg (mode
, op_true
);
19966 if (!nonimmediate_operand (op_false
, mode
))
19967 op_false
= force_reg (mode
, op_false
);
19969 emit_insn (gen_rtx_SET (mode
, dest
,
19970 gen_rtx_IF_THEN_ELSE (mode
, cmp
,
19976 rtx (*gen
) (rtx
, rtx
, rtx
, rtx
) = NULL
;
19978 if (!nonimmediate_operand (op_true
, mode
))
19979 op_true
= force_reg (mode
, op_true
);
19981 op_false
= force_reg (mode
, op_false
);
19987 gen
= gen_sse4_1_blendvps
;
19991 gen
= gen_sse4_1_blendvpd
;
19999 gen
= gen_sse4_1_pblendvb
;
20000 dest
= gen_lowpart (V16QImode
, dest
);
20001 op_false
= gen_lowpart (V16QImode
, op_false
);
20002 op_true
= gen_lowpart (V16QImode
, op_true
);
20003 cmp
= gen_lowpart (V16QImode
, cmp
);
20008 gen
= gen_avx_blendvps256
;
20012 gen
= gen_avx_blendvpd256
;
20020 gen
= gen_avx2_pblendvb
;
20021 dest
= gen_lowpart (V32QImode
, dest
);
20022 op_false
= gen_lowpart (V32QImode
, op_false
);
20023 op_true
= gen_lowpart (V32QImode
, op_true
);
20024 cmp
= gen_lowpart (V32QImode
, cmp
);
20032 emit_insn (gen (dest
, op_false
, op_true
, cmp
));
20035 op_true
= force_reg (mode
, op_true
);
20037 t2
= gen_reg_rtx (mode
);
20039 t3
= gen_reg_rtx (mode
);
20043 x
= gen_rtx_AND (mode
, op_true
, cmp
);
20044 emit_insn (gen_rtx_SET (VOIDmode
, t2
, x
));
20046 x
= gen_rtx_NOT (mode
, cmp
);
20047 x
= gen_rtx_AND (mode
, x
, op_false
);
20048 emit_insn (gen_rtx_SET (VOIDmode
, t3
, x
));
20050 x
= gen_rtx_IOR (mode
, t3
, t2
);
20051 emit_insn (gen_rtx_SET (VOIDmode
, dest
, x
));
20056 /* Expand a floating-point conditional move. Return true if successful. */
20059 ix86_expand_fp_movcc (rtx operands
[])
20061 enum machine_mode mode
= GET_MODE (operands
[0]);
20062 enum rtx_code code
= GET_CODE (operands
[1]);
20063 rtx tmp
, compare_op
;
20064 rtx op0
= XEXP (operands
[1], 0);
20065 rtx op1
= XEXP (operands
[1], 1);
20067 if (TARGET_SSE_MATH
&& SSE_FLOAT_MODE_P (mode
))
20069 enum machine_mode cmode
;
20071 /* Since we've no cmove for sse registers, don't force bad register
20072 allocation just to gain access to it. Deny movcc when the
20073 comparison mode doesn't match the move mode. */
20074 cmode
= GET_MODE (op0
);
20075 if (cmode
== VOIDmode
)
20076 cmode
= GET_MODE (op1
);
20080 code
= ix86_prepare_sse_fp_compare_args (operands
[0], code
, &op0
, &op1
);
20081 if (code
== UNKNOWN
)
20084 if (ix86_expand_sse_fp_minmax (operands
[0], code
, op0
, op1
,
20085 operands
[2], operands
[3]))
20088 tmp
= ix86_expand_sse_cmp (operands
[0], code
, op0
, op1
,
20089 operands
[2], operands
[3]);
20090 ix86_expand_sse_movcc (operands
[0], tmp
, operands
[2], operands
[3]);
20094 if (GET_MODE (op0
) == TImode
20095 || (GET_MODE (op0
) == DImode
20099 /* The floating point conditional move instructions don't directly
20100 support conditions resulting from a signed integer comparison. */
20102 compare_op
= ix86_expand_compare (code
, op0
, op1
);
20103 if (!fcmov_comparison_operator (compare_op
, VOIDmode
))
20105 tmp
= gen_reg_rtx (QImode
);
20106 ix86_expand_setcc (tmp
, code
, op0
, op1
);
20108 compare_op
= ix86_expand_compare (NE
, tmp
, const0_rtx
);
20111 emit_insn (gen_rtx_SET (VOIDmode
, operands
[0],
20112 gen_rtx_IF_THEN_ELSE (mode
, compare_op
,
20113 operands
[2], operands
[3])));
20118 /* Expand a floating-point vector conditional move; a vcond operation
20119 rather than a movcc operation. */
20122 ix86_expand_fp_vcond (rtx operands
[])
20124 enum rtx_code code
= GET_CODE (operands
[3]);
20127 code
= ix86_prepare_sse_fp_compare_args (operands
[0], code
,
20128 &operands
[4], &operands
[5]);
20129 if (code
== UNKNOWN
)
20132 switch (GET_CODE (operands
[3]))
20135 temp
= ix86_expand_sse_cmp (operands
[0], ORDERED
, operands
[4],
20136 operands
[5], operands
[0], operands
[0]);
20137 cmp
= ix86_expand_sse_cmp (operands
[0], NE
, operands
[4],
20138 operands
[5], operands
[1], operands
[2]);
20142 temp
= ix86_expand_sse_cmp (operands
[0], UNORDERED
, operands
[4],
20143 operands
[5], operands
[0], operands
[0]);
20144 cmp
= ix86_expand_sse_cmp (operands
[0], EQ
, operands
[4],
20145 operands
[5], operands
[1], operands
[2]);
20149 gcc_unreachable ();
20151 cmp
= expand_simple_binop (GET_MODE (cmp
), code
, temp
, cmp
, cmp
, 1,
20153 ix86_expand_sse_movcc (operands
[0], cmp
, operands
[1], operands
[2]);
20157 if (ix86_expand_sse_fp_minmax (operands
[0], code
, operands
[4],
20158 operands
[5], operands
[1], operands
[2]))
20161 cmp
= ix86_expand_sse_cmp (operands
[0], code
, operands
[4], operands
[5],
20162 operands
[1], operands
[2]);
20163 ix86_expand_sse_movcc (operands
[0], cmp
, operands
[1], operands
[2]);
20167 /* Expand a signed/unsigned integral vector conditional move. */
20170 ix86_expand_int_vcond (rtx operands
[])
20172 enum machine_mode data_mode
= GET_MODE (operands
[0]);
20173 enum machine_mode mode
= GET_MODE (operands
[4]);
20174 enum rtx_code code
= GET_CODE (operands
[3]);
20175 bool negate
= false;
20178 cop0
= operands
[4];
20179 cop1
= operands
[5];
20181 /* Try to optimize x < 0 ? -1 : 0 into (signed) x >> 31
20182 and x < 0 ? 1 : 0 into (unsigned) x >> 31. */
20183 if ((code
== LT
|| code
== GE
)
20184 && data_mode
== mode
20185 && cop1
== CONST0_RTX (mode
)
20186 && operands
[1 + (code
== LT
)] == CONST0_RTX (data_mode
)
20187 && GET_MODE_SIZE (GET_MODE_INNER (data_mode
)) > 1
20188 && GET_MODE_SIZE (GET_MODE_INNER (data_mode
)) <= 8
20189 && (GET_MODE_SIZE (data_mode
) == 16
20190 || (TARGET_AVX2
&& GET_MODE_SIZE (data_mode
) == 32)))
20192 rtx negop
= operands
[2 - (code
== LT
)];
20193 int shift
= GET_MODE_BITSIZE (GET_MODE_INNER (data_mode
)) - 1;
20194 if (negop
== CONST1_RTX (data_mode
))
20196 rtx res
= expand_simple_binop (mode
, LSHIFTRT
, cop0
, GEN_INT (shift
),
20197 operands
[0], 1, OPTAB_DIRECT
);
20198 if (res
!= operands
[0])
20199 emit_move_insn (operands
[0], res
);
20202 else if (GET_MODE_INNER (data_mode
) != DImode
20203 && vector_all_ones_operand (negop
, data_mode
))
20205 rtx res
= expand_simple_binop (mode
, ASHIFTRT
, cop0
, GEN_INT (shift
),
20206 operands
[0], 0, OPTAB_DIRECT
);
20207 if (res
!= operands
[0])
20208 emit_move_insn (operands
[0], res
);
20213 if (!nonimmediate_operand (cop1
, mode
))
20214 cop1
= force_reg (mode
, cop1
);
20215 if (!general_operand (operands
[1], data_mode
))
20216 operands
[1] = force_reg (data_mode
, operands
[1]);
20217 if (!general_operand (operands
[2], data_mode
))
20218 operands
[2] = force_reg (data_mode
, operands
[2]);
20220 /* XOP supports all of the comparisons on all 128-bit vector int types. */
20222 && (mode
== V16QImode
|| mode
== V8HImode
20223 || mode
== V4SImode
|| mode
== V2DImode
))
20227 /* Canonicalize the comparison to EQ, GT, GTU. */
20238 code
= reverse_condition (code
);
20244 code
= reverse_condition (code
);
20250 code
= swap_condition (code
);
20251 x
= cop0
, cop0
= cop1
, cop1
= x
;
20255 gcc_unreachable ();
20258 /* Only SSE4.1/SSE4.2 supports V2DImode. */
20259 if (mode
== V2DImode
)
20264 /* SSE4.1 supports EQ. */
20265 if (!TARGET_SSE4_1
)
20271 /* SSE4.2 supports GT/GTU. */
20272 if (!TARGET_SSE4_2
)
20277 gcc_unreachable ();
20281 /* Unsigned parallel compare is not supported by the hardware.
20282 Play some tricks to turn this into a signed comparison
20286 cop0
= force_reg (mode
, cop0
);
20296 rtx (*gen_sub3
) (rtx
, rtx
, rtx
);
20300 case V8SImode
: gen_sub3
= gen_subv8si3
; break;
20301 case V4DImode
: gen_sub3
= gen_subv4di3
; break;
20302 case V4SImode
: gen_sub3
= gen_subv4si3
; break;
20303 case V2DImode
: gen_sub3
= gen_subv2di3
; break;
20305 gcc_unreachable ();
20307 /* Subtract (-(INT MAX) - 1) from both operands to make
20309 mask
= ix86_build_signbit_mask (mode
, true, false);
20310 t1
= gen_reg_rtx (mode
);
20311 emit_insn (gen_sub3 (t1
, cop0
, mask
));
20313 t2
= gen_reg_rtx (mode
);
20314 emit_insn (gen_sub3 (t2
, cop1
, mask
));
20326 /* Perform a parallel unsigned saturating subtraction. */
20327 x
= gen_reg_rtx (mode
);
20328 emit_insn (gen_rtx_SET (VOIDmode
, x
,
20329 gen_rtx_US_MINUS (mode
, cop0
, cop1
)));
20332 cop1
= CONST0_RTX (mode
);
20338 gcc_unreachable ();
20343 /* Allow the comparison to be done in one mode, but the movcc to
20344 happen in another mode. */
20345 if (data_mode
== mode
)
20347 x
= ix86_expand_sse_cmp (operands
[0], code
, cop0
, cop1
,
20348 operands
[1+negate
], operands
[2-negate
]);
20352 gcc_assert (GET_MODE_SIZE (data_mode
) == GET_MODE_SIZE (mode
));
20353 x
= ix86_expand_sse_cmp (gen_lowpart (mode
, operands
[0]),
20355 operands
[1+negate
], operands
[2-negate
]);
20356 x
= gen_lowpart (data_mode
, x
);
20359 ix86_expand_sse_movcc (operands
[0], x
, operands
[1+negate
],
20360 operands
[2-negate
]);
20364 /* Expand a variable vector permutation. */
20367 ix86_expand_vec_perm (rtx operands
[])
20369 rtx target
= operands
[0];
20370 rtx op0
= operands
[1];
20371 rtx op1
= operands
[2];
20372 rtx mask
= operands
[3];
20373 rtx t1
, t2
, t3
, t4
, vt
, vt2
, vec
[32];
20374 enum machine_mode mode
= GET_MODE (op0
);
20375 enum machine_mode maskmode
= GET_MODE (mask
);
20377 bool one_operand_shuffle
= rtx_equal_p (op0
, op1
);
20379 /* Number of elements in the vector. */
20380 w
= GET_MODE_NUNITS (mode
);
20381 e
= GET_MODE_UNIT_SIZE (mode
);
20382 gcc_assert (w
<= 32);
20386 if (mode
== V4DImode
|| mode
== V4DFmode
|| mode
== V16HImode
)
20388 /* Unfortunately, the VPERMQ and VPERMPD instructions only support
20389 an constant shuffle operand. With a tiny bit of effort we can
20390 use VPERMD instead. A re-interpretation stall for V4DFmode is
20391 unfortunate but there's no avoiding it.
20392 Similarly for V16HImode we don't have instructions for variable
20393 shuffling, while for V32QImode we can use after preparing suitable
20394 masks vpshufb; vpshufb; vpermq; vpor. */
20396 if (mode
== V16HImode
)
20398 maskmode
= mode
= V32QImode
;
20404 maskmode
= mode
= V8SImode
;
20408 t1
= gen_reg_rtx (maskmode
);
20410 /* Replicate the low bits of the V4DImode mask into V8SImode:
20412 t1 = { A A B B C C D D }. */
20413 for (i
= 0; i
< w
/ 2; ++i
)
20414 vec
[i
*2 + 1] = vec
[i
*2] = GEN_INT (i
* 2);
20415 vt
= gen_rtx_CONST_VECTOR (maskmode
, gen_rtvec_v (w
, vec
));
20416 vt
= force_reg (maskmode
, vt
);
20417 mask
= gen_lowpart (maskmode
, mask
);
20418 if (maskmode
== V8SImode
)
20419 emit_insn (gen_avx2_permvarv8si (t1
, mask
, vt
));
20421 emit_insn (gen_avx2_pshufbv32qi3 (t1
, mask
, vt
));
20423 /* Multiply the shuffle indicies by two. */
20424 t1
= expand_simple_binop (maskmode
, PLUS
, t1
, t1
, t1
, 1,
20427 /* Add one to the odd shuffle indicies:
20428 t1 = { A*2, A*2+1, B*2, B*2+1, ... }. */
20429 for (i
= 0; i
< w
/ 2; ++i
)
20431 vec
[i
* 2] = const0_rtx
;
20432 vec
[i
* 2 + 1] = const1_rtx
;
20434 vt
= gen_rtx_CONST_VECTOR (maskmode
, gen_rtvec_v (w
, vec
));
20435 vt
= force_const_mem (maskmode
, vt
);
20436 t1
= expand_simple_binop (maskmode
, PLUS
, t1
, vt
, t1
, 1,
20439 /* Continue as if V8SImode (resp. V32QImode) was used initially. */
20440 operands
[3] = mask
= t1
;
20441 target
= gen_lowpart (mode
, target
);
20442 op0
= gen_lowpart (mode
, op0
);
20443 op1
= gen_lowpart (mode
, op1
);
20449 /* The VPERMD and VPERMPS instructions already properly ignore
20450 the high bits of the shuffle elements. No need for us to
20451 perform an AND ourselves. */
20452 if (one_operand_shuffle
)
20453 emit_insn (gen_avx2_permvarv8si (target
, op0
, mask
));
20456 t1
= gen_reg_rtx (V8SImode
);
20457 t2
= gen_reg_rtx (V8SImode
);
20458 emit_insn (gen_avx2_permvarv8si (t1
, op0
, mask
));
20459 emit_insn (gen_avx2_permvarv8si (t2
, op1
, mask
));
20465 mask
= gen_lowpart (V8SFmode
, mask
);
20466 if (one_operand_shuffle
)
20467 emit_insn (gen_avx2_permvarv8sf (target
, op0
, mask
));
20470 t1
= gen_reg_rtx (V8SFmode
);
20471 t2
= gen_reg_rtx (V8SFmode
);
20472 emit_insn (gen_avx2_permvarv8sf (t1
, op0
, mask
));
20473 emit_insn (gen_avx2_permvarv8sf (t2
, op1
, mask
));
20479 /* By combining the two 128-bit input vectors into one 256-bit
20480 input vector, we can use VPERMD and VPERMPS for the full
20481 two-operand shuffle. */
20482 t1
= gen_reg_rtx (V8SImode
);
20483 t2
= gen_reg_rtx (V8SImode
);
20484 emit_insn (gen_avx_vec_concatv8si (t1
, op0
, op1
));
20485 emit_insn (gen_avx_vec_concatv8si (t2
, mask
, mask
));
20486 emit_insn (gen_avx2_permvarv8si (t1
, t1
, t2
));
20487 emit_insn (gen_avx_vextractf128v8si (target
, t1
, const0_rtx
));
20491 t1
= gen_reg_rtx (V8SFmode
);
20492 t2
= gen_reg_rtx (V8SImode
);
20493 mask
= gen_lowpart (V4SImode
, mask
);
20494 emit_insn (gen_avx_vec_concatv8sf (t1
, op0
, op1
));
20495 emit_insn (gen_avx_vec_concatv8si (t2
, mask
, mask
));
20496 emit_insn (gen_avx2_permvarv8sf (t1
, t1
, t2
));
20497 emit_insn (gen_avx_vextractf128v8sf (target
, t1
, const0_rtx
));
20501 t1
= gen_reg_rtx (V32QImode
);
20502 t2
= gen_reg_rtx (V32QImode
);
20503 t3
= gen_reg_rtx (V32QImode
);
20504 vt2
= GEN_INT (128);
20505 for (i
= 0; i
< 32; i
++)
20507 vt
= gen_rtx_CONST_VECTOR (V32QImode
, gen_rtvec_v (32, vec
));
20508 vt
= force_reg (V32QImode
, vt
);
20509 for (i
= 0; i
< 32; i
++)
20510 vec
[i
] = i
< 16 ? vt2
: const0_rtx
;
20511 vt2
= gen_rtx_CONST_VECTOR (V32QImode
, gen_rtvec_v (32, vec
));
20512 vt2
= force_reg (V32QImode
, vt2
);
20513 /* From mask create two adjusted masks, which contain the same
20514 bits as mask in the low 7 bits of each vector element.
20515 The first mask will have the most significant bit clear
20516 if it requests element from the same 128-bit lane
20517 and MSB set if it requests element from the other 128-bit lane.
20518 The second mask will have the opposite values of the MSB,
20519 and additionally will have its 128-bit lanes swapped.
20520 E.g. { 07 12 1e 09 ... | 17 19 05 1f ... } mask vector will have
20521 t1 { 07 92 9e 09 ... | 17 19 85 1f ... } and
20522 t3 { 97 99 05 9f ... | 87 12 1e 89 ... } where each ...
20523 stands for other 12 bytes. */
20524 /* The bit whether element is from the same lane or the other
20525 lane is bit 4, so shift it up by 3 to the MSB position. */
20526 emit_insn (gen_ashlv4di3 (gen_lowpart (V4DImode
, t1
),
20527 gen_lowpart (V4DImode
, mask
),
20529 /* Clear MSB bits from the mask just in case it had them set. */
20530 emit_insn (gen_avx2_andnotv32qi3 (t2
, vt
, mask
));
20531 /* After this t1 will have MSB set for elements from other lane. */
20532 emit_insn (gen_xorv32qi3 (t1
, t1
, vt2
));
20533 /* Clear bits other than MSB. */
20534 emit_insn (gen_andv32qi3 (t1
, t1
, vt
));
20535 /* Or in the lower bits from mask into t3. */
20536 emit_insn (gen_iorv32qi3 (t3
, t1
, t2
));
20537 /* And invert MSB bits in t1, so MSB is set for elements from the same
20539 emit_insn (gen_xorv32qi3 (t1
, t1
, vt
));
20540 /* Swap 128-bit lanes in t3. */
20541 emit_insn (gen_avx2_permv4di_1 (gen_lowpart (V4DImode
, t3
),
20542 gen_lowpart (V4DImode
, t3
),
20543 const2_rtx
, GEN_INT (3),
20544 const0_rtx
, const1_rtx
));
20545 /* And or in the lower bits from mask into t1. */
20546 emit_insn (gen_iorv32qi3 (t1
, t1
, t2
));
20547 if (one_operand_shuffle
)
20549 /* Each of these shuffles will put 0s in places where
20550 element from the other 128-bit lane is needed, otherwise
20551 will shuffle in the requested value. */
20552 emit_insn (gen_avx2_pshufbv32qi3 (t3
, op0
, t3
));
20553 emit_insn (gen_avx2_pshufbv32qi3 (t1
, op0
, t1
));
20554 /* For t3 the 128-bit lanes are swapped again. */
20555 emit_insn (gen_avx2_permv4di_1 (gen_lowpart (V4DImode
, t3
),
20556 gen_lowpart (V4DImode
, t3
),
20557 const2_rtx
, GEN_INT (3),
20558 const0_rtx
, const1_rtx
));
20559 /* And oring both together leads to the result. */
20560 emit_insn (gen_iorv32qi3 (target
, t1
, t3
));
20564 t4
= gen_reg_rtx (V32QImode
);
20565 /* Similarly to the above one_operand_shuffle code,
20566 just for repeated twice for each operand. merge_two:
20567 code will merge the two results together. */
20568 emit_insn (gen_avx2_pshufbv32qi3 (t4
, op0
, t3
));
20569 emit_insn (gen_avx2_pshufbv32qi3 (t3
, op1
, t3
));
20570 emit_insn (gen_avx2_pshufbv32qi3 (t2
, op0
, t1
));
20571 emit_insn (gen_avx2_pshufbv32qi3 (t1
, op1
, t1
));
20572 emit_insn (gen_avx2_permv4di_1 (gen_lowpart (V4DImode
, t4
),
20573 gen_lowpart (V4DImode
, t4
),
20574 const2_rtx
, GEN_INT (3),
20575 const0_rtx
, const1_rtx
));
20576 emit_insn (gen_avx2_permv4di_1 (gen_lowpart (V4DImode
, t3
),
20577 gen_lowpart (V4DImode
, t3
),
20578 const2_rtx
, GEN_INT (3),
20579 const0_rtx
, const1_rtx
));
20580 emit_insn (gen_iorv32qi3 (t4
, t2
, t4
));
20581 emit_insn (gen_iorv32qi3 (t3
, t1
, t3
));
20587 gcc_assert (GET_MODE_SIZE (mode
) <= 16);
20594 /* The XOP VPPERM insn supports three inputs. By ignoring the
20595 one_operand_shuffle special case, we avoid creating another
20596 set of constant vectors in memory. */
20597 one_operand_shuffle
= false;
20599 /* mask = mask & {2*w-1, ...} */
20600 vt
= GEN_INT (2*w
- 1);
20604 /* mask = mask & {w-1, ...} */
20605 vt
= GEN_INT (w
- 1);
20608 for (i
= 0; i
< w
; i
++)
20610 vt
= gen_rtx_CONST_VECTOR (maskmode
, gen_rtvec_v (w
, vec
));
20611 mask
= expand_simple_binop (maskmode
, AND
, mask
, vt
,
20612 NULL_RTX
, 0, OPTAB_DIRECT
);
20614 /* For non-QImode operations, convert the word permutation control
20615 into a byte permutation control. */
20616 if (mode
!= V16QImode
)
20618 mask
= expand_simple_binop (maskmode
, ASHIFT
, mask
,
20619 GEN_INT (exact_log2 (e
)),
20620 NULL_RTX
, 0, OPTAB_DIRECT
);
20622 /* Convert mask to vector of chars. */
20623 mask
= force_reg (V16QImode
, gen_lowpart (V16QImode
, mask
));
20625 /* Replicate each of the input bytes into byte positions:
20626 (v2di) --> {0,0,0,0,0,0,0,0, 8,8,8,8,8,8,8,8}
20627 (v4si) --> {0,0,0,0, 4,4,4,4, 8,8,8,8, 12,12,12,12}
20628 (v8hi) --> {0,0, 2,2, 4,4, 6,6, ...}. */
20629 for (i
= 0; i
< 16; ++i
)
20630 vec
[i
] = GEN_INT (i
/e
* e
);
20631 vt
= gen_rtx_CONST_VECTOR (V16QImode
, gen_rtvec_v (16, vec
));
20632 vt
= force_const_mem (V16QImode
, vt
);
20634 emit_insn (gen_xop_pperm (mask
, mask
, mask
, vt
));
20636 emit_insn (gen_ssse3_pshufbv16qi3 (mask
, mask
, vt
));
20638 /* Convert it into the byte positions by doing
20639 mask = mask + {0,1,..,16/w, 0,1,..,16/w, ...} */
20640 for (i
= 0; i
< 16; ++i
)
20641 vec
[i
] = GEN_INT (i
% e
);
20642 vt
= gen_rtx_CONST_VECTOR (V16QImode
, gen_rtvec_v (16, vec
));
20643 vt
= force_const_mem (V16QImode
, vt
);
20644 emit_insn (gen_addv16qi3 (mask
, mask
, vt
));
20647 /* The actual shuffle operations all operate on V16QImode. */
20648 op0
= gen_lowpart (V16QImode
, op0
);
20649 op1
= gen_lowpart (V16QImode
, op1
);
20650 target
= gen_lowpart (V16QImode
, target
);
20654 emit_insn (gen_xop_pperm (target
, op0
, op1
, mask
));
20656 else if (one_operand_shuffle
)
20658 emit_insn (gen_ssse3_pshufbv16qi3 (target
, op0
, mask
));
20665 /* Shuffle the two input vectors independently. */
20666 t1
= gen_reg_rtx (V16QImode
);
20667 t2
= gen_reg_rtx (V16QImode
);
20668 emit_insn (gen_ssse3_pshufbv16qi3 (t1
, op0
, mask
));
20669 emit_insn (gen_ssse3_pshufbv16qi3 (t2
, op1
, mask
));
20672 /* Then merge them together. The key is whether any given control
20673 element contained a bit set that indicates the second word. */
20674 mask
= operands
[3];
20676 if (maskmode
== V2DImode
&& !TARGET_SSE4_1
)
20678 /* Without SSE4.1, we don't have V2DImode EQ. Perform one
20679 more shuffle to convert the V2DI input mask into a V4SI
20680 input mask. At which point the masking that expand_int_vcond
20681 will work as desired. */
20682 rtx t3
= gen_reg_rtx (V4SImode
);
20683 emit_insn (gen_sse2_pshufd_1 (t3
, gen_lowpart (V4SImode
, mask
),
20684 const0_rtx
, const0_rtx
,
20685 const2_rtx
, const2_rtx
));
20687 maskmode
= V4SImode
;
20691 for (i
= 0; i
< w
; i
++)
20693 vt
= gen_rtx_CONST_VECTOR (maskmode
, gen_rtvec_v (w
, vec
));
20694 vt
= force_reg (maskmode
, vt
);
20695 mask
= expand_simple_binop (maskmode
, AND
, mask
, vt
,
20696 NULL_RTX
, 0, OPTAB_DIRECT
);
20698 xops
[0] = gen_lowpart (mode
, operands
[0]);
20699 xops
[1] = gen_lowpart (mode
, t2
);
20700 xops
[2] = gen_lowpart (mode
, t1
);
20701 xops
[3] = gen_rtx_EQ (maskmode
, mask
, vt
);
20704 ok
= ix86_expand_int_vcond (xops
);
20709 /* Unpack OP[1] into the next wider integer vector type. UNSIGNED_P is
20710 true if we should do zero extension, else sign extension. HIGH_P is
20711 true if we want the N/2 high elements, else the low elements. */
20714 ix86_expand_sse_unpack (rtx dest
, rtx src
, bool unsigned_p
, bool high_p
)
20716 enum machine_mode imode
= GET_MODE (src
);
20721 rtx (*unpack
)(rtx
, rtx
);
20722 rtx (*extract
)(rtx
, rtx
) = NULL
;
20723 enum machine_mode halfmode
= BLKmode
;
20729 unpack
= gen_avx2_zero_extendv16qiv16hi2
;
20731 unpack
= gen_avx2_sign_extendv16qiv16hi2
;
20732 halfmode
= V16QImode
;
20734 = high_p
? gen_vec_extract_hi_v32qi
: gen_vec_extract_lo_v32qi
;
20738 unpack
= gen_avx2_zero_extendv8hiv8si2
;
20740 unpack
= gen_avx2_sign_extendv8hiv8si2
;
20741 halfmode
= V8HImode
;
20743 = high_p
? gen_vec_extract_hi_v16hi
: gen_vec_extract_lo_v16hi
;
20747 unpack
= gen_avx2_zero_extendv4siv4di2
;
20749 unpack
= gen_avx2_sign_extendv4siv4di2
;
20750 halfmode
= V4SImode
;
20752 = high_p
? gen_vec_extract_hi_v8si
: gen_vec_extract_lo_v8si
;
20756 unpack
= gen_sse4_1_zero_extendv8qiv8hi2
;
20758 unpack
= gen_sse4_1_sign_extendv8qiv8hi2
;
20762 unpack
= gen_sse4_1_zero_extendv4hiv4si2
;
20764 unpack
= gen_sse4_1_sign_extendv4hiv4si2
;
20768 unpack
= gen_sse4_1_zero_extendv2siv2di2
;
20770 unpack
= gen_sse4_1_sign_extendv2siv2di2
;
20773 gcc_unreachable ();
20776 if (GET_MODE_SIZE (imode
) == 32)
20778 tmp
= gen_reg_rtx (halfmode
);
20779 emit_insn (extract (tmp
, src
));
20783 /* Shift higher 8 bytes to lower 8 bytes. */
20784 tmp
= gen_reg_rtx (imode
);
20785 emit_insn (gen_sse2_lshrv1ti3 (gen_lowpart (V1TImode
, tmp
),
20786 gen_lowpart (V1TImode
, src
),
20792 emit_insn (unpack (dest
, tmp
));
20796 rtx (*unpack
)(rtx
, rtx
, rtx
);
20802 unpack
= gen_vec_interleave_highv16qi
;
20804 unpack
= gen_vec_interleave_lowv16qi
;
20808 unpack
= gen_vec_interleave_highv8hi
;
20810 unpack
= gen_vec_interleave_lowv8hi
;
20814 unpack
= gen_vec_interleave_highv4si
;
20816 unpack
= gen_vec_interleave_lowv4si
;
20819 gcc_unreachable ();
20823 tmp
= force_reg (imode
, CONST0_RTX (imode
));
20825 tmp
= ix86_expand_sse_cmp (gen_reg_rtx (imode
), GT
, CONST0_RTX (imode
),
20826 src
, pc_rtx
, pc_rtx
);
20828 emit_insn (unpack (gen_lowpart (imode
, dest
), src
, tmp
));
20832 /* Expand conditional increment or decrement using adb/sbb instructions.
20833 The default case using setcc followed by the conditional move can be
20834 done by generic code. */
20836 ix86_expand_int_addcc (rtx operands
[])
20838 enum rtx_code code
= GET_CODE (operands
[1]);
20840 rtx (*insn
)(rtx
, rtx
, rtx
, rtx
, rtx
);
20842 rtx val
= const0_rtx
;
20843 bool fpcmp
= false;
20844 enum machine_mode mode
;
20845 rtx op0
= XEXP (operands
[1], 0);
20846 rtx op1
= XEXP (operands
[1], 1);
20848 if (operands
[3] != const1_rtx
20849 && operands
[3] != constm1_rtx
)
20851 if (!ix86_expand_carry_flag_compare (code
, op0
, op1
, &compare_op
))
20853 code
= GET_CODE (compare_op
);
20855 flags
= XEXP (compare_op
, 0);
20857 if (GET_MODE (flags
) == CCFPmode
20858 || GET_MODE (flags
) == CCFPUmode
)
20861 code
= ix86_fp_compare_code_to_integer (code
);
20868 PUT_CODE (compare_op
,
20869 reverse_condition_maybe_unordered
20870 (GET_CODE (compare_op
)));
20872 PUT_CODE (compare_op
, reverse_condition (GET_CODE (compare_op
)));
20875 mode
= GET_MODE (operands
[0]);
20877 /* Construct either adc or sbb insn. */
20878 if ((code
== LTU
) == (operands
[3] == constm1_rtx
))
20883 insn
= gen_subqi3_carry
;
20886 insn
= gen_subhi3_carry
;
20889 insn
= gen_subsi3_carry
;
20892 insn
= gen_subdi3_carry
;
20895 gcc_unreachable ();
20903 insn
= gen_addqi3_carry
;
20906 insn
= gen_addhi3_carry
;
20909 insn
= gen_addsi3_carry
;
20912 insn
= gen_adddi3_carry
;
20915 gcc_unreachable ();
20918 emit_insn (insn (operands
[0], operands
[2], val
, flags
, compare_op
));
20924 /* Split operands 0 and 1 into half-mode parts. Similar to split_double_mode,
20925 but works for floating pointer parameters and nonoffsetable memories.
20926 For pushes, it returns just stack offsets; the values will be saved
20927 in the right order. Maximally three parts are generated. */
20930 ix86_split_to_parts (rtx operand
, rtx
*parts
, enum machine_mode mode
)
20935 size
= mode
==XFmode
? 3 : GET_MODE_SIZE (mode
) / 4;
20937 size
= (GET_MODE_SIZE (mode
) + 4) / 8;
20939 gcc_assert (!REG_P (operand
) || !MMX_REGNO_P (REGNO (operand
)));
20940 gcc_assert (size
>= 2 && size
<= 4);
20942 /* Optimize constant pool reference to immediates. This is used by fp
20943 moves, that force all constants to memory to allow combining. */
20944 if (MEM_P (operand
) && MEM_READONLY_P (operand
))
20946 rtx tmp
= maybe_get_pool_constant (operand
);
20951 if (MEM_P (operand
) && !offsettable_memref_p (operand
))
20953 /* The only non-offsetable memories we handle are pushes. */
20954 int ok
= push_operand (operand
, VOIDmode
);
20958 operand
= copy_rtx (operand
);
20959 PUT_MODE (operand
, word_mode
);
20960 parts
[0] = parts
[1] = parts
[2] = parts
[3] = operand
;
20964 if (GET_CODE (operand
) == CONST_VECTOR
)
20966 enum machine_mode imode
= int_mode_for_mode (mode
);
20967 /* Caution: if we looked through a constant pool memory above,
20968 the operand may actually have a different mode now. That's
20969 ok, since we want to pun this all the way back to an integer. */
20970 operand
= simplify_subreg (imode
, operand
, GET_MODE (operand
), 0);
20971 gcc_assert (operand
!= NULL
);
20977 if (mode
== DImode
)
20978 split_double_mode (mode
, &operand
, 1, &parts
[0], &parts
[1]);
20983 if (REG_P (operand
))
20985 gcc_assert (reload_completed
);
20986 for (i
= 0; i
< size
; i
++)
20987 parts
[i
] = gen_rtx_REG (SImode
, REGNO (operand
) + i
);
20989 else if (offsettable_memref_p (operand
))
20991 operand
= adjust_address (operand
, SImode
, 0);
20992 parts
[0] = operand
;
20993 for (i
= 1; i
< size
; i
++)
20994 parts
[i
] = adjust_address (operand
, SImode
, 4 * i
);
20996 else if (GET_CODE (operand
) == CONST_DOUBLE
)
21001 REAL_VALUE_FROM_CONST_DOUBLE (r
, operand
);
21005 real_to_target (l
, &r
, mode
);
21006 parts
[3] = gen_int_mode (l
[3], SImode
);
21007 parts
[2] = gen_int_mode (l
[2], SImode
);
21010 /* We can't use REAL_VALUE_TO_TARGET_LONG_DOUBLE since
21011 long double may not be 80-bit. */
21012 real_to_target (l
, &r
, mode
);
21013 parts
[2] = gen_int_mode (l
[2], SImode
);
21016 REAL_VALUE_TO_TARGET_DOUBLE (r
, l
);
21019 gcc_unreachable ();
21021 parts
[1] = gen_int_mode (l
[1], SImode
);
21022 parts
[0] = gen_int_mode (l
[0], SImode
);
21025 gcc_unreachable ();
21030 if (mode
== TImode
)
21031 split_double_mode (mode
, &operand
, 1, &parts
[0], &parts
[1]);
21032 if (mode
== XFmode
|| mode
== TFmode
)
21034 enum machine_mode upper_mode
= mode
==XFmode
? SImode
: DImode
;
21035 if (REG_P (operand
))
21037 gcc_assert (reload_completed
);
21038 parts
[0] = gen_rtx_REG (DImode
, REGNO (operand
) + 0);
21039 parts
[1] = gen_rtx_REG (upper_mode
, REGNO (operand
) + 1);
21041 else if (offsettable_memref_p (operand
))
21043 operand
= adjust_address (operand
, DImode
, 0);
21044 parts
[0] = operand
;
21045 parts
[1] = adjust_address (operand
, upper_mode
, 8);
21047 else if (GET_CODE (operand
) == CONST_DOUBLE
)
21052 REAL_VALUE_FROM_CONST_DOUBLE (r
, operand
);
21053 real_to_target (l
, &r
, mode
);
21055 /* Do not use shift by 32 to avoid warning on 32bit systems. */
21056 if (HOST_BITS_PER_WIDE_INT
>= 64)
21059 ((l
[0] & (((HOST_WIDE_INT
) 2 << 31) - 1))
21060 + ((((HOST_WIDE_INT
) l
[1]) << 31) << 1),
21063 parts
[0] = immed_double_const (l
[0], l
[1], DImode
);
21065 if (upper_mode
== SImode
)
21066 parts
[1] = gen_int_mode (l
[2], SImode
);
21067 else if (HOST_BITS_PER_WIDE_INT
>= 64)
21070 ((l
[2] & (((HOST_WIDE_INT
) 2 << 31) - 1))
21071 + ((((HOST_WIDE_INT
) l
[3]) << 31) << 1),
21074 parts
[1] = immed_double_const (l
[2], l
[3], DImode
);
21077 gcc_unreachable ();
21084 /* Emit insns to perform a move or push of DI, DF, XF, and TF values.
21085 Return false when normal moves are needed; true when all required
21086 insns have been emitted. Operands 2-4 contain the input values
21087 int the correct order; operands 5-7 contain the output values. */
21090 ix86_split_long_move (rtx operands
[])
21095 int collisions
= 0;
21096 enum machine_mode mode
= GET_MODE (operands
[0]);
21097 bool collisionparts
[4];
21099 /* The DFmode expanders may ask us to move double.
21100 For 64bit target this is single move. By hiding the fact
21101 here we simplify i386.md splitters. */
21102 if (TARGET_64BIT
&& GET_MODE_SIZE (GET_MODE (operands
[0])) == 8)
21104 /* Optimize constant pool reference to immediates. This is used by
21105 fp moves, that force all constants to memory to allow combining. */
21107 if (MEM_P (operands
[1])
21108 && GET_CODE (XEXP (operands
[1], 0)) == SYMBOL_REF
21109 && CONSTANT_POOL_ADDRESS_P (XEXP (operands
[1], 0)))
21110 operands
[1] = get_pool_constant (XEXP (operands
[1], 0));
21111 if (push_operand (operands
[0], VOIDmode
))
21113 operands
[0] = copy_rtx (operands
[0]);
21114 PUT_MODE (operands
[0], word_mode
);
21117 operands
[0] = gen_lowpart (DImode
, operands
[0]);
21118 operands
[1] = gen_lowpart (DImode
, operands
[1]);
21119 emit_move_insn (operands
[0], operands
[1]);
21123 /* The only non-offsettable memory we handle is push. */
21124 if (push_operand (operands
[0], VOIDmode
))
21127 gcc_assert (!MEM_P (operands
[0])
21128 || offsettable_memref_p (operands
[0]));
21130 nparts
= ix86_split_to_parts (operands
[1], part
[1], GET_MODE (operands
[0]));
21131 ix86_split_to_parts (operands
[0], part
[0], GET_MODE (operands
[0]));
21133 /* When emitting push, take care for source operands on the stack. */
21134 if (push
&& MEM_P (operands
[1])
21135 && reg_overlap_mentioned_p (stack_pointer_rtx
, operands
[1]))
21137 rtx src_base
= XEXP (part
[1][nparts
- 1], 0);
21139 /* Compensate for the stack decrement by 4. */
21140 if (!TARGET_64BIT
&& nparts
== 3
21141 && mode
== XFmode
&& TARGET_128BIT_LONG_DOUBLE
)
21142 src_base
= plus_constant (Pmode
, src_base
, 4);
21144 /* src_base refers to the stack pointer and is
21145 automatically decreased by emitted push. */
21146 for (i
= 0; i
< nparts
; i
++)
21147 part
[1][i
] = change_address (part
[1][i
],
21148 GET_MODE (part
[1][i
]), src_base
);
21151 /* We need to do copy in the right order in case an address register
21152 of the source overlaps the destination. */
21153 if (REG_P (part
[0][0]) && MEM_P (part
[1][0]))
21157 for (i
= 0; i
< nparts
; i
++)
21160 = reg_overlap_mentioned_p (part
[0][i
], XEXP (part
[1][0], 0));
21161 if (collisionparts
[i
])
21165 /* Collision in the middle part can be handled by reordering. */
21166 if (collisions
== 1 && nparts
== 3 && collisionparts
[1])
21168 tmp
= part
[0][1]; part
[0][1] = part
[0][2]; part
[0][2] = tmp
;
21169 tmp
= part
[1][1]; part
[1][1] = part
[1][2]; part
[1][2] = tmp
;
21171 else if (collisions
== 1
21173 && (collisionparts
[1] || collisionparts
[2]))
21175 if (collisionparts
[1])
21177 tmp
= part
[0][1]; part
[0][1] = part
[0][2]; part
[0][2] = tmp
;
21178 tmp
= part
[1][1]; part
[1][1] = part
[1][2]; part
[1][2] = tmp
;
21182 tmp
= part
[0][2]; part
[0][2] = part
[0][3]; part
[0][3] = tmp
;
21183 tmp
= part
[1][2]; part
[1][2] = part
[1][3]; part
[1][3] = tmp
;
21187 /* If there are more collisions, we can't handle it by reordering.
21188 Do an lea to the last part and use only one colliding move. */
21189 else if (collisions
> 1)
21195 base
= part
[0][nparts
- 1];
21197 /* Handle the case when the last part isn't valid for lea.
21198 Happens in 64-bit mode storing the 12-byte XFmode. */
21199 if (GET_MODE (base
) != Pmode
)
21200 base
= gen_rtx_REG (Pmode
, REGNO (base
));
21202 emit_insn (gen_rtx_SET (VOIDmode
, base
, XEXP (part
[1][0], 0)));
21203 part
[1][0] = replace_equiv_address (part
[1][0], base
);
21204 for (i
= 1; i
< nparts
; i
++)
21206 tmp
= plus_constant (Pmode
, base
, UNITS_PER_WORD
* i
);
21207 part
[1][i
] = replace_equiv_address (part
[1][i
], tmp
);
21218 if (TARGET_128BIT_LONG_DOUBLE
&& mode
== XFmode
)
21219 emit_insn (ix86_gen_add3 (stack_pointer_rtx
,
21220 stack_pointer_rtx
, GEN_INT (-4)));
21221 emit_move_insn (part
[0][2], part
[1][2]);
21223 else if (nparts
== 4)
21225 emit_move_insn (part
[0][3], part
[1][3]);
21226 emit_move_insn (part
[0][2], part
[1][2]);
21231 /* In 64bit mode we don't have 32bit push available. In case this is
21232 register, it is OK - we will just use larger counterpart. We also
21233 retype memory - these comes from attempt to avoid REX prefix on
21234 moving of second half of TFmode value. */
21235 if (GET_MODE (part
[1][1]) == SImode
)
21237 switch (GET_CODE (part
[1][1]))
21240 part
[1][1] = adjust_address (part
[1][1], DImode
, 0);
21244 part
[1][1] = gen_rtx_REG (DImode
, REGNO (part
[1][1]));
21248 gcc_unreachable ();
21251 if (GET_MODE (part
[1][0]) == SImode
)
21252 part
[1][0] = part
[1][1];
21255 emit_move_insn (part
[0][1], part
[1][1]);
21256 emit_move_insn (part
[0][0], part
[1][0]);
21260 /* Choose correct order to not overwrite the source before it is copied. */
21261 if ((REG_P (part
[0][0])
21262 && REG_P (part
[1][1])
21263 && (REGNO (part
[0][0]) == REGNO (part
[1][1])
21265 && REGNO (part
[0][0]) == REGNO (part
[1][2]))
21267 && REGNO (part
[0][0]) == REGNO (part
[1][3]))))
21269 && reg_overlap_mentioned_p (part
[0][0], XEXP (part
[1][0], 0))))
21271 for (i
= 0, j
= nparts
- 1; i
< nparts
; i
++, j
--)
21273 operands
[2 + i
] = part
[0][j
];
21274 operands
[6 + i
] = part
[1][j
];
21279 for (i
= 0; i
< nparts
; i
++)
21281 operands
[2 + i
] = part
[0][i
];
21282 operands
[6 + i
] = part
[1][i
];
21286 /* If optimizing for size, attempt to locally unCSE nonzero constants. */
21287 if (optimize_insn_for_size_p ())
21289 for (j
= 0; j
< nparts
- 1; j
++)
21290 if (CONST_INT_P (operands
[6 + j
])
21291 && operands
[6 + j
] != const0_rtx
21292 && REG_P (operands
[2 + j
]))
21293 for (i
= j
; i
< nparts
- 1; i
++)
21294 if (CONST_INT_P (operands
[7 + i
])
21295 && INTVAL (operands
[7 + i
]) == INTVAL (operands
[6 + j
]))
21296 operands
[7 + i
] = operands
[2 + j
];
21299 for (i
= 0; i
< nparts
; i
++)
21300 emit_move_insn (operands
[2 + i
], operands
[6 + i
]);
21305 /* Helper function of ix86_split_ashl used to generate an SImode/DImode
21306 left shift by a constant, either using a single shift or
21307 a sequence of add instructions. */
21310 ix86_expand_ashl_const (rtx operand
, int count
, enum machine_mode mode
)
21312 rtx (*insn
)(rtx
, rtx
, rtx
);
21315 || (count
* ix86_cost
->add
<= ix86_cost
->shift_const
21316 && !optimize_insn_for_size_p ()))
21318 insn
= mode
== DImode
? gen_addsi3
: gen_adddi3
;
21319 while (count
-- > 0)
21320 emit_insn (insn (operand
, operand
, operand
));
21324 insn
= mode
== DImode
? gen_ashlsi3
: gen_ashldi3
;
21325 emit_insn (insn (operand
, operand
, GEN_INT (count
)));
21330 ix86_split_ashl (rtx
*operands
, rtx scratch
, enum machine_mode mode
)
21332 rtx (*gen_ashl3
)(rtx
, rtx
, rtx
);
21333 rtx (*gen_shld
)(rtx
, rtx
, rtx
);
21334 int half_width
= GET_MODE_BITSIZE (mode
) >> 1;
21336 rtx low
[2], high
[2];
21339 if (CONST_INT_P (operands
[2]))
21341 split_double_mode (mode
, operands
, 2, low
, high
);
21342 count
= INTVAL (operands
[2]) & (GET_MODE_BITSIZE (mode
) - 1);
21344 if (count
>= half_width
)
21346 emit_move_insn (high
[0], low
[1]);
21347 emit_move_insn (low
[0], const0_rtx
);
21349 if (count
> half_width
)
21350 ix86_expand_ashl_const (high
[0], count
- half_width
, mode
);
21354 gen_shld
= mode
== DImode
? gen_x86_shld
: gen_x86_64_shld
;
21356 if (!rtx_equal_p (operands
[0], operands
[1]))
21357 emit_move_insn (operands
[0], operands
[1]);
21359 emit_insn (gen_shld (high
[0], low
[0], GEN_INT (count
)));
21360 ix86_expand_ashl_const (low
[0], count
, mode
);
21365 split_double_mode (mode
, operands
, 1, low
, high
);
21367 gen_ashl3
= mode
== DImode
? gen_ashlsi3
: gen_ashldi3
;
21369 if (operands
[1] == const1_rtx
)
21371 /* Assuming we've chosen a QImode capable registers, then 1 << N
21372 can be done with two 32/64-bit shifts, no branches, no cmoves. */
21373 if (ANY_QI_REG_P (low
[0]) && ANY_QI_REG_P (high
[0]))
21375 rtx s
, d
, flags
= gen_rtx_REG (CCZmode
, FLAGS_REG
);
21377 ix86_expand_clear (low
[0]);
21378 ix86_expand_clear (high
[0]);
21379 emit_insn (gen_testqi_ccz_1 (operands
[2], GEN_INT (half_width
)));
21381 d
= gen_lowpart (QImode
, low
[0]);
21382 d
= gen_rtx_STRICT_LOW_PART (VOIDmode
, d
);
21383 s
= gen_rtx_EQ (QImode
, flags
, const0_rtx
);
21384 emit_insn (gen_rtx_SET (VOIDmode
, d
, s
));
21386 d
= gen_lowpart (QImode
, high
[0]);
21387 d
= gen_rtx_STRICT_LOW_PART (VOIDmode
, d
);
21388 s
= gen_rtx_NE (QImode
, flags
, const0_rtx
);
21389 emit_insn (gen_rtx_SET (VOIDmode
, d
, s
));
21392 /* Otherwise, we can get the same results by manually performing
21393 a bit extract operation on bit 5/6, and then performing the two
21394 shifts. The two methods of getting 0/1 into low/high are exactly
21395 the same size. Avoiding the shift in the bit extract case helps
21396 pentium4 a bit; no one else seems to care much either way. */
21399 enum machine_mode half_mode
;
21400 rtx (*gen_lshr3
)(rtx
, rtx
, rtx
);
21401 rtx (*gen_and3
)(rtx
, rtx
, rtx
);
21402 rtx (*gen_xor3
)(rtx
, rtx
, rtx
);
21403 HOST_WIDE_INT bits
;
21406 if (mode
== DImode
)
21408 half_mode
= SImode
;
21409 gen_lshr3
= gen_lshrsi3
;
21410 gen_and3
= gen_andsi3
;
21411 gen_xor3
= gen_xorsi3
;
21416 half_mode
= DImode
;
21417 gen_lshr3
= gen_lshrdi3
;
21418 gen_and3
= gen_anddi3
;
21419 gen_xor3
= gen_xordi3
;
21423 if (TARGET_PARTIAL_REG_STALL
&& !optimize_insn_for_size_p ())
21424 x
= gen_rtx_ZERO_EXTEND (half_mode
, operands
[2]);
21426 x
= gen_lowpart (half_mode
, operands
[2]);
21427 emit_insn (gen_rtx_SET (VOIDmode
, high
[0], x
));
21429 emit_insn (gen_lshr3 (high
[0], high
[0], GEN_INT (bits
)));
21430 emit_insn (gen_and3 (high
[0], high
[0], const1_rtx
));
21431 emit_move_insn (low
[0], high
[0]);
21432 emit_insn (gen_xor3 (low
[0], low
[0], const1_rtx
));
21435 emit_insn (gen_ashl3 (low
[0], low
[0], operands
[2]));
21436 emit_insn (gen_ashl3 (high
[0], high
[0], operands
[2]));
21440 if (operands
[1] == constm1_rtx
)
21442 /* For -1 << N, we can avoid the shld instruction, because we
21443 know that we're shifting 0...31/63 ones into a -1. */
21444 emit_move_insn (low
[0], constm1_rtx
);
21445 if (optimize_insn_for_size_p ())
21446 emit_move_insn (high
[0], low
[0]);
21448 emit_move_insn (high
[0], constm1_rtx
);
21452 gen_shld
= mode
== DImode
? gen_x86_shld
: gen_x86_64_shld
;
21454 if (!rtx_equal_p (operands
[0], operands
[1]))
21455 emit_move_insn (operands
[0], operands
[1]);
21457 split_double_mode (mode
, operands
, 1, low
, high
);
21458 emit_insn (gen_shld (high
[0], low
[0], operands
[2]));
21461 emit_insn (gen_ashl3 (low
[0], low
[0], operands
[2]));
21463 if (TARGET_CMOVE
&& scratch
)
21465 rtx (*gen_x86_shift_adj_1
)(rtx
, rtx
, rtx
, rtx
)
21466 = mode
== DImode
? gen_x86_shiftsi_adj_1
: gen_x86_shiftdi_adj_1
;
21468 ix86_expand_clear (scratch
);
21469 emit_insn (gen_x86_shift_adj_1 (high
[0], low
[0], operands
[2], scratch
));
21473 rtx (*gen_x86_shift_adj_2
)(rtx
, rtx
, rtx
)
21474 = mode
== DImode
? gen_x86_shiftsi_adj_2
: gen_x86_shiftdi_adj_2
;
21476 emit_insn (gen_x86_shift_adj_2 (high
[0], low
[0], operands
[2]));
21481 ix86_split_ashr (rtx
*operands
, rtx scratch
, enum machine_mode mode
)
21483 rtx (*gen_ashr3
)(rtx
, rtx
, rtx
)
21484 = mode
== DImode
? gen_ashrsi3
: gen_ashrdi3
;
21485 rtx (*gen_shrd
)(rtx
, rtx
, rtx
);
21486 int half_width
= GET_MODE_BITSIZE (mode
) >> 1;
21488 rtx low
[2], high
[2];
21491 if (CONST_INT_P (operands
[2]))
21493 split_double_mode (mode
, operands
, 2, low
, high
);
21494 count
= INTVAL (operands
[2]) & (GET_MODE_BITSIZE (mode
) - 1);
21496 if (count
== GET_MODE_BITSIZE (mode
) - 1)
21498 emit_move_insn (high
[0], high
[1]);
21499 emit_insn (gen_ashr3 (high
[0], high
[0],
21500 GEN_INT (half_width
- 1)));
21501 emit_move_insn (low
[0], high
[0]);
21504 else if (count
>= half_width
)
21506 emit_move_insn (low
[0], high
[1]);
21507 emit_move_insn (high
[0], low
[0]);
21508 emit_insn (gen_ashr3 (high
[0], high
[0],
21509 GEN_INT (half_width
- 1)));
21511 if (count
> half_width
)
21512 emit_insn (gen_ashr3 (low
[0], low
[0],
21513 GEN_INT (count
- half_width
)));
21517 gen_shrd
= mode
== DImode
? gen_x86_shrd
: gen_x86_64_shrd
;
21519 if (!rtx_equal_p (operands
[0], operands
[1]))
21520 emit_move_insn (operands
[0], operands
[1]);
21522 emit_insn (gen_shrd (low
[0], high
[0], GEN_INT (count
)));
21523 emit_insn (gen_ashr3 (high
[0], high
[0], GEN_INT (count
)));
21528 gen_shrd
= mode
== DImode
? gen_x86_shrd
: gen_x86_64_shrd
;
21530 if (!rtx_equal_p (operands
[0], operands
[1]))
21531 emit_move_insn (operands
[0], operands
[1]);
21533 split_double_mode (mode
, operands
, 1, low
, high
);
21535 emit_insn (gen_shrd (low
[0], high
[0], operands
[2]));
21536 emit_insn (gen_ashr3 (high
[0], high
[0], operands
[2]));
21538 if (TARGET_CMOVE
&& scratch
)
21540 rtx (*gen_x86_shift_adj_1
)(rtx
, rtx
, rtx
, rtx
)
21541 = mode
== DImode
? gen_x86_shiftsi_adj_1
: gen_x86_shiftdi_adj_1
;
21543 emit_move_insn (scratch
, high
[0]);
21544 emit_insn (gen_ashr3 (scratch
, scratch
,
21545 GEN_INT (half_width
- 1)));
21546 emit_insn (gen_x86_shift_adj_1 (low
[0], high
[0], operands
[2],
21551 rtx (*gen_x86_shift_adj_3
)(rtx
, rtx
, rtx
)
21552 = mode
== DImode
? gen_x86_shiftsi_adj_3
: gen_x86_shiftdi_adj_3
;
21554 emit_insn (gen_x86_shift_adj_3 (low
[0], high
[0], operands
[2]));
21560 ix86_split_lshr (rtx
*operands
, rtx scratch
, enum machine_mode mode
)
21562 rtx (*gen_lshr3
)(rtx
, rtx
, rtx
)
21563 = mode
== DImode
? gen_lshrsi3
: gen_lshrdi3
;
21564 rtx (*gen_shrd
)(rtx
, rtx
, rtx
);
21565 int half_width
= GET_MODE_BITSIZE (mode
) >> 1;
21567 rtx low
[2], high
[2];
21570 if (CONST_INT_P (operands
[2]))
21572 split_double_mode (mode
, operands
, 2, low
, high
);
21573 count
= INTVAL (operands
[2]) & (GET_MODE_BITSIZE (mode
) - 1);
21575 if (count
>= half_width
)
21577 emit_move_insn (low
[0], high
[1]);
21578 ix86_expand_clear (high
[0]);
21580 if (count
> half_width
)
21581 emit_insn (gen_lshr3 (low
[0], low
[0],
21582 GEN_INT (count
- half_width
)));
21586 gen_shrd
= mode
== DImode
? gen_x86_shrd
: gen_x86_64_shrd
;
21588 if (!rtx_equal_p (operands
[0], operands
[1]))
21589 emit_move_insn (operands
[0], operands
[1]);
21591 emit_insn (gen_shrd (low
[0], high
[0], GEN_INT (count
)));
21592 emit_insn (gen_lshr3 (high
[0], high
[0], GEN_INT (count
)));
21597 gen_shrd
= mode
== DImode
? gen_x86_shrd
: gen_x86_64_shrd
;
21599 if (!rtx_equal_p (operands
[0], operands
[1]))
21600 emit_move_insn (operands
[0], operands
[1]);
21602 split_double_mode (mode
, operands
, 1, low
, high
);
21604 emit_insn (gen_shrd (low
[0], high
[0], operands
[2]));
21605 emit_insn (gen_lshr3 (high
[0], high
[0], operands
[2]));
21607 if (TARGET_CMOVE
&& scratch
)
21609 rtx (*gen_x86_shift_adj_1
)(rtx
, rtx
, rtx
, rtx
)
21610 = mode
== DImode
? gen_x86_shiftsi_adj_1
: gen_x86_shiftdi_adj_1
;
21612 ix86_expand_clear (scratch
);
21613 emit_insn (gen_x86_shift_adj_1 (low
[0], high
[0], operands
[2],
21618 rtx (*gen_x86_shift_adj_2
)(rtx
, rtx
, rtx
)
21619 = mode
== DImode
? gen_x86_shiftsi_adj_2
: gen_x86_shiftdi_adj_2
;
21621 emit_insn (gen_x86_shift_adj_2 (low
[0], high
[0], operands
[2]));
21626 /* Predict just emitted jump instruction to be taken with probability PROB. */
21628 predict_jump (int prob
)
21630 rtx insn
= get_last_insn ();
21631 gcc_assert (JUMP_P (insn
));
21632 add_reg_note (insn
, REG_BR_PROB
, GEN_INT (prob
));
21635 /* Helper function for the string operations below. Dest VARIABLE whether
21636 it is aligned to VALUE bytes. If true, jump to the label. */
21638 ix86_expand_aligntest (rtx variable
, int value
, bool epilogue
)
21640 rtx label
= gen_label_rtx ();
21641 rtx tmpcount
= gen_reg_rtx (GET_MODE (variable
));
21642 if (GET_MODE (variable
) == DImode
)
21643 emit_insn (gen_anddi3 (tmpcount
, variable
, GEN_INT (value
)));
21645 emit_insn (gen_andsi3 (tmpcount
, variable
, GEN_INT (value
)));
21646 emit_cmp_and_jump_insns (tmpcount
, const0_rtx
, EQ
, 0, GET_MODE (variable
),
21649 predict_jump (REG_BR_PROB_BASE
* 50 / 100);
21651 predict_jump (REG_BR_PROB_BASE
* 90 / 100);
21655 /* Adjust COUNTER by the VALUE. */
21657 ix86_adjust_counter (rtx countreg
, HOST_WIDE_INT value
)
21659 rtx (*gen_add
)(rtx
, rtx
, rtx
)
21660 = GET_MODE (countreg
) == DImode
? gen_adddi3
: gen_addsi3
;
21662 emit_insn (gen_add (countreg
, countreg
, GEN_INT (-value
)));
21665 /* Zero extend possibly SImode EXP to Pmode register. */
21667 ix86_zero_extend_to_Pmode (rtx exp
)
21669 return force_reg (Pmode
, convert_to_mode (Pmode
, exp
, 1));
21672 /* Divide COUNTREG by SCALE. */
21674 scale_counter (rtx countreg
, int scale
)
21680 if (CONST_INT_P (countreg
))
21681 return GEN_INT (INTVAL (countreg
) / scale
);
21682 gcc_assert (REG_P (countreg
));
21684 sc
= expand_simple_binop (GET_MODE (countreg
), LSHIFTRT
, countreg
,
21685 GEN_INT (exact_log2 (scale
)),
21686 NULL
, 1, OPTAB_DIRECT
);
21690 /* Return mode for the memcpy/memset loop counter. Prefer SImode over
21691 DImode for constant loop counts. */
21693 static enum machine_mode
21694 counter_mode (rtx count_exp
)
21696 if (GET_MODE (count_exp
) != VOIDmode
)
21697 return GET_MODE (count_exp
);
21698 if (!CONST_INT_P (count_exp
))
21700 if (TARGET_64BIT
&& (INTVAL (count_exp
) & ~0xffffffff))
21705 /* When SRCPTR is non-NULL, output simple loop to move memory
21706 pointer to SRCPTR to DESTPTR via chunks of MODE unrolled UNROLL times,
21707 overall size is COUNT specified in bytes. When SRCPTR is NULL, output the
21708 equivalent loop to set memory by VALUE (supposed to be in MODE).
21710 The size is rounded down to whole number of chunk size moved at once.
21711 SRCMEM and DESTMEM provide MEMrtx to feed proper aliasing info. */
21715 expand_set_or_movmem_via_loop (rtx destmem
, rtx srcmem
,
21716 rtx destptr
, rtx srcptr
, rtx value
,
21717 rtx count
, enum machine_mode mode
, int unroll
,
21720 rtx out_label
, top_label
, iter
, tmp
;
21721 enum machine_mode iter_mode
= counter_mode (count
);
21722 rtx piece_size
= GEN_INT (GET_MODE_SIZE (mode
) * unroll
);
21723 rtx piece_size_mask
= GEN_INT (~((GET_MODE_SIZE (mode
) * unroll
) - 1));
21729 top_label
= gen_label_rtx ();
21730 out_label
= gen_label_rtx ();
21731 iter
= gen_reg_rtx (iter_mode
);
21733 size
= expand_simple_binop (iter_mode
, AND
, count
, piece_size_mask
,
21734 NULL
, 1, OPTAB_DIRECT
);
21735 /* Those two should combine. */
21736 if (piece_size
== const1_rtx
)
21738 emit_cmp_and_jump_insns (size
, const0_rtx
, EQ
, NULL_RTX
, iter_mode
,
21740 predict_jump (REG_BR_PROB_BASE
* 10 / 100);
21742 emit_move_insn (iter
, const0_rtx
);
21744 emit_label (top_label
);
21746 tmp
= convert_modes (Pmode
, iter_mode
, iter
, true);
21747 x_addr
= gen_rtx_PLUS (Pmode
, destptr
, tmp
);
21748 destmem
= change_address (destmem
, mode
, x_addr
);
21752 y_addr
= gen_rtx_PLUS (Pmode
, srcptr
, copy_rtx (tmp
));
21753 srcmem
= change_address (srcmem
, mode
, y_addr
);
21755 /* When unrolling for chips that reorder memory reads and writes,
21756 we can save registers by using single temporary.
21757 Also using 4 temporaries is overkill in 32bit mode. */
21758 if (!TARGET_64BIT
&& 0)
21760 for (i
= 0; i
< unroll
; i
++)
21765 adjust_address (copy_rtx (destmem
), mode
, GET_MODE_SIZE (mode
));
21767 adjust_address (copy_rtx (srcmem
), mode
, GET_MODE_SIZE (mode
));
21769 emit_move_insn (destmem
, srcmem
);
21775 gcc_assert (unroll
<= 4);
21776 for (i
= 0; i
< unroll
; i
++)
21778 tmpreg
[i
] = gen_reg_rtx (mode
);
21782 adjust_address (copy_rtx (srcmem
), mode
, GET_MODE_SIZE (mode
));
21784 emit_move_insn (tmpreg
[i
], srcmem
);
21786 for (i
= 0; i
< unroll
; i
++)
21791 adjust_address (copy_rtx (destmem
), mode
, GET_MODE_SIZE (mode
));
21793 emit_move_insn (destmem
, tmpreg
[i
]);
21798 for (i
= 0; i
< unroll
; i
++)
21802 adjust_address (copy_rtx (destmem
), mode
, GET_MODE_SIZE (mode
));
21803 emit_move_insn (destmem
, value
);
21806 tmp
= expand_simple_binop (iter_mode
, PLUS
, iter
, piece_size
, iter
,
21807 true, OPTAB_LIB_WIDEN
);
21809 emit_move_insn (iter
, tmp
);
21811 emit_cmp_and_jump_insns (iter
, size
, LT
, NULL_RTX
, iter_mode
,
21813 if (expected_size
!= -1)
21815 expected_size
/= GET_MODE_SIZE (mode
) * unroll
;
21816 if (expected_size
== 0)
21818 else if (expected_size
> REG_BR_PROB_BASE
)
21819 predict_jump (REG_BR_PROB_BASE
- 1);
21821 predict_jump (REG_BR_PROB_BASE
- (REG_BR_PROB_BASE
+ expected_size
/ 2) / expected_size
);
21824 predict_jump (REG_BR_PROB_BASE
* 80 / 100);
21825 iter
= ix86_zero_extend_to_Pmode (iter
);
21826 tmp
= expand_simple_binop (Pmode
, PLUS
, destptr
, iter
, destptr
,
21827 true, OPTAB_LIB_WIDEN
);
21828 if (tmp
!= destptr
)
21829 emit_move_insn (destptr
, tmp
);
21832 tmp
= expand_simple_binop (Pmode
, PLUS
, srcptr
, iter
, srcptr
,
21833 true, OPTAB_LIB_WIDEN
);
21835 emit_move_insn (srcptr
, tmp
);
21837 emit_label (out_label
);
21840 /* Output "rep; mov" instruction.
21841 Arguments have same meaning as for previous function */
21843 expand_movmem_via_rep_mov (rtx destmem
, rtx srcmem
,
21844 rtx destptr
, rtx srcptr
,
21846 enum machine_mode mode
)
21851 HOST_WIDE_INT rounded_count
;
21853 /* If the size is known, it is shorter to use rep movs. */
21854 if (mode
== QImode
&& CONST_INT_P (count
)
21855 && !(INTVAL (count
) & 3))
21858 if (destptr
!= XEXP (destmem
, 0) || GET_MODE (destmem
) != BLKmode
)
21859 destmem
= adjust_automodify_address_nv (destmem
, BLKmode
, destptr
, 0);
21860 if (srcptr
!= XEXP (srcmem
, 0) || GET_MODE (srcmem
) != BLKmode
)
21861 srcmem
= adjust_automodify_address_nv (srcmem
, BLKmode
, srcptr
, 0);
21862 countreg
= ix86_zero_extend_to_Pmode (scale_counter (count
, GET_MODE_SIZE (mode
)));
21863 if (mode
!= QImode
)
21865 destexp
= gen_rtx_ASHIFT (Pmode
, countreg
,
21866 GEN_INT (exact_log2 (GET_MODE_SIZE (mode
))));
21867 destexp
= gen_rtx_PLUS (Pmode
, destexp
, destptr
);
21868 srcexp
= gen_rtx_ASHIFT (Pmode
, countreg
,
21869 GEN_INT (exact_log2 (GET_MODE_SIZE (mode
))));
21870 srcexp
= gen_rtx_PLUS (Pmode
, srcexp
, srcptr
);
21874 destexp
= gen_rtx_PLUS (Pmode
, destptr
, countreg
);
21875 srcexp
= gen_rtx_PLUS (Pmode
, srcptr
, countreg
);
21877 if (CONST_INT_P (count
))
21879 rounded_count
= (INTVAL (count
)
21880 & ~((HOST_WIDE_INT
) GET_MODE_SIZE (mode
) - 1));
21881 destmem
= shallow_copy_rtx (destmem
);
21882 srcmem
= shallow_copy_rtx (srcmem
);
21883 set_mem_size (destmem
, rounded_count
);
21884 set_mem_size (srcmem
, rounded_count
);
21888 if (MEM_SIZE_KNOWN_P (destmem
))
21889 clear_mem_size (destmem
);
21890 if (MEM_SIZE_KNOWN_P (srcmem
))
21891 clear_mem_size (srcmem
);
21893 emit_insn (gen_rep_mov (destptr
, destmem
, srcptr
, srcmem
, countreg
,
21897 /* Output "rep; stos" instruction.
21898 Arguments have same meaning as for previous function */
21900 expand_setmem_via_rep_stos (rtx destmem
, rtx destptr
, rtx value
,
21901 rtx count
, enum machine_mode mode
,
21906 HOST_WIDE_INT rounded_count
;
21908 if (destptr
!= XEXP (destmem
, 0) || GET_MODE (destmem
) != BLKmode
)
21909 destmem
= adjust_automodify_address_nv (destmem
, BLKmode
, destptr
, 0);
21910 value
= force_reg (mode
, gen_lowpart (mode
, value
));
21911 countreg
= ix86_zero_extend_to_Pmode (scale_counter (count
, GET_MODE_SIZE (mode
)));
21912 if (mode
!= QImode
)
21914 destexp
= gen_rtx_ASHIFT (Pmode
, countreg
,
21915 GEN_INT (exact_log2 (GET_MODE_SIZE (mode
))));
21916 destexp
= gen_rtx_PLUS (Pmode
, destexp
, destptr
);
21919 destexp
= gen_rtx_PLUS (Pmode
, destptr
, countreg
);
21920 if (orig_value
== const0_rtx
&& CONST_INT_P (count
))
21922 rounded_count
= (INTVAL (count
)
21923 & ~((HOST_WIDE_INT
) GET_MODE_SIZE (mode
) - 1));
21924 destmem
= shallow_copy_rtx (destmem
);
21925 set_mem_size (destmem
, rounded_count
);
21927 else if (MEM_SIZE_KNOWN_P (destmem
))
21928 clear_mem_size (destmem
);
21929 emit_insn (gen_rep_stos (destptr
, countreg
, destmem
, value
, destexp
));
21933 emit_strmov (rtx destmem
, rtx srcmem
,
21934 rtx destptr
, rtx srcptr
, enum machine_mode mode
, int offset
)
21936 rtx src
= adjust_automodify_address_nv (srcmem
, mode
, srcptr
, offset
);
21937 rtx dest
= adjust_automodify_address_nv (destmem
, mode
, destptr
, offset
);
21938 emit_insn (gen_strmov (destptr
, dest
, srcptr
, src
));
21941 /* Output code to copy at most count & (max_size - 1) bytes from SRC to DEST. */
21943 expand_movmem_epilogue (rtx destmem
, rtx srcmem
,
21944 rtx destptr
, rtx srcptr
, rtx count
, int max_size
)
21947 if (CONST_INT_P (count
))
21949 HOST_WIDE_INT countval
= INTVAL (count
);
21952 if ((countval
& 0x10) && max_size
> 16)
21956 emit_strmov (destmem
, srcmem
, destptr
, srcptr
, DImode
, offset
);
21957 emit_strmov (destmem
, srcmem
, destptr
, srcptr
, DImode
, offset
+ 8);
21960 gcc_unreachable ();
21963 if ((countval
& 0x08) && max_size
> 8)
21966 emit_strmov (destmem
, srcmem
, destptr
, srcptr
, DImode
, offset
);
21969 emit_strmov (destmem
, srcmem
, destptr
, srcptr
, SImode
, offset
);
21970 emit_strmov (destmem
, srcmem
, destptr
, srcptr
, SImode
, offset
+ 4);
21974 if ((countval
& 0x04) && max_size
> 4)
21976 emit_strmov (destmem
, srcmem
, destptr
, srcptr
, SImode
, offset
);
21979 if ((countval
& 0x02) && max_size
> 2)
21981 emit_strmov (destmem
, srcmem
, destptr
, srcptr
, HImode
, offset
);
21984 if ((countval
& 0x01) && max_size
> 1)
21986 emit_strmov (destmem
, srcmem
, destptr
, srcptr
, QImode
, offset
);
21993 count
= expand_simple_binop (GET_MODE (count
), AND
, count
, GEN_INT (max_size
- 1),
21994 count
, 1, OPTAB_DIRECT
);
21995 expand_set_or_movmem_via_loop (destmem
, srcmem
, destptr
, srcptr
, NULL
,
21996 count
, QImode
, 1, 4);
22000 /* When there are stringops, we can cheaply increase dest and src pointers.
22001 Otherwise we save code size by maintaining offset (zero is readily
22002 available from preceding rep operation) and using x86 addressing modes.
22004 if (TARGET_SINGLE_STRINGOP
)
22008 rtx label
= ix86_expand_aligntest (count
, 4, true);
22009 src
= change_address (srcmem
, SImode
, srcptr
);
22010 dest
= change_address (destmem
, SImode
, destptr
);
22011 emit_insn (gen_strmov (destptr
, dest
, srcptr
, src
));
22012 emit_label (label
);
22013 LABEL_NUSES (label
) = 1;
22017 rtx label
= ix86_expand_aligntest (count
, 2, true);
22018 src
= change_address (srcmem
, HImode
, srcptr
);
22019 dest
= change_address (destmem
, HImode
, destptr
);
22020 emit_insn (gen_strmov (destptr
, dest
, srcptr
, src
));
22021 emit_label (label
);
22022 LABEL_NUSES (label
) = 1;
22026 rtx label
= ix86_expand_aligntest (count
, 1, true);
22027 src
= change_address (srcmem
, QImode
, srcptr
);
22028 dest
= change_address (destmem
, QImode
, destptr
);
22029 emit_insn (gen_strmov (destptr
, dest
, srcptr
, src
));
22030 emit_label (label
);
22031 LABEL_NUSES (label
) = 1;
22036 rtx offset
= force_reg (Pmode
, const0_rtx
);
22041 rtx label
= ix86_expand_aligntest (count
, 4, true);
22042 src
= change_address (srcmem
, SImode
, srcptr
);
22043 dest
= change_address (destmem
, SImode
, destptr
);
22044 emit_move_insn (dest
, src
);
22045 tmp
= expand_simple_binop (Pmode
, PLUS
, offset
, GEN_INT (4), NULL
,
22046 true, OPTAB_LIB_WIDEN
);
22048 emit_move_insn (offset
, tmp
);
22049 emit_label (label
);
22050 LABEL_NUSES (label
) = 1;
22054 rtx label
= ix86_expand_aligntest (count
, 2, true);
22055 tmp
= gen_rtx_PLUS (Pmode
, srcptr
, offset
);
22056 src
= change_address (srcmem
, HImode
, tmp
);
22057 tmp
= gen_rtx_PLUS (Pmode
, destptr
, offset
);
22058 dest
= change_address (destmem
, HImode
, tmp
);
22059 emit_move_insn (dest
, src
);
22060 tmp
= expand_simple_binop (Pmode
, PLUS
, offset
, GEN_INT (2), tmp
,
22061 true, OPTAB_LIB_WIDEN
);
22063 emit_move_insn (offset
, tmp
);
22064 emit_label (label
);
22065 LABEL_NUSES (label
) = 1;
22069 rtx label
= ix86_expand_aligntest (count
, 1, true);
22070 tmp
= gen_rtx_PLUS (Pmode
, srcptr
, offset
);
22071 src
= change_address (srcmem
, QImode
, tmp
);
22072 tmp
= gen_rtx_PLUS (Pmode
, destptr
, offset
);
22073 dest
= change_address (destmem
, QImode
, tmp
);
22074 emit_move_insn (dest
, src
);
22075 emit_label (label
);
22076 LABEL_NUSES (label
) = 1;
22081 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
22083 expand_setmem_epilogue_via_loop (rtx destmem
, rtx destptr
, rtx value
,
22084 rtx count
, int max_size
)
22087 expand_simple_binop (counter_mode (count
), AND
, count
,
22088 GEN_INT (max_size
- 1), count
, 1, OPTAB_DIRECT
);
22089 expand_set_or_movmem_via_loop (destmem
, NULL
, destptr
, NULL
,
22090 gen_lowpart (QImode
, value
), count
, QImode
,
22094 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
22096 expand_setmem_epilogue (rtx destmem
, rtx destptr
, rtx value
, rtx count
, int max_size
)
22100 if (CONST_INT_P (count
))
22102 HOST_WIDE_INT countval
= INTVAL (count
);
22105 if ((countval
& 0x10) && max_size
> 16)
22109 dest
= adjust_automodify_address_nv (destmem
, DImode
, destptr
, offset
);
22110 emit_insn (gen_strset (destptr
, dest
, value
));
22111 dest
= adjust_automodify_address_nv (destmem
, DImode
, destptr
, offset
+ 8);
22112 emit_insn (gen_strset (destptr
, dest
, value
));
22115 gcc_unreachable ();
22118 if ((countval
& 0x08) && max_size
> 8)
22122 dest
= adjust_automodify_address_nv (destmem
, DImode
, destptr
, offset
);
22123 emit_insn (gen_strset (destptr
, dest
, value
));
22127 dest
= adjust_automodify_address_nv (destmem
, SImode
, destptr
, offset
);
22128 emit_insn (gen_strset (destptr
, dest
, value
));
22129 dest
= adjust_automodify_address_nv (destmem
, SImode
, destptr
, offset
+ 4);
22130 emit_insn (gen_strset (destptr
, dest
, value
));
22134 if ((countval
& 0x04) && max_size
> 4)
22136 dest
= adjust_automodify_address_nv (destmem
, SImode
, destptr
, offset
);
22137 emit_insn (gen_strset (destptr
, dest
, gen_lowpart (SImode
, value
)));
22140 if ((countval
& 0x02) && max_size
> 2)
22142 dest
= adjust_automodify_address_nv (destmem
, HImode
, destptr
, offset
);
22143 emit_insn (gen_strset (destptr
, dest
, gen_lowpart (HImode
, value
)));
22146 if ((countval
& 0x01) && max_size
> 1)
22148 dest
= adjust_automodify_address_nv (destmem
, QImode
, destptr
, offset
);
22149 emit_insn (gen_strset (destptr
, dest
, gen_lowpart (QImode
, value
)));
22156 expand_setmem_epilogue_via_loop (destmem
, destptr
, value
, count
, max_size
);
22161 rtx label
= ix86_expand_aligntest (count
, 16, true);
22164 dest
= change_address (destmem
, DImode
, destptr
);
22165 emit_insn (gen_strset (destptr
, dest
, value
));
22166 emit_insn (gen_strset (destptr
, dest
, value
));
22170 dest
= change_address (destmem
, SImode
, destptr
);
22171 emit_insn (gen_strset (destptr
, dest
, value
));
22172 emit_insn (gen_strset (destptr
, dest
, value
));
22173 emit_insn (gen_strset (destptr
, dest
, value
));
22174 emit_insn (gen_strset (destptr
, dest
, value
));
22176 emit_label (label
);
22177 LABEL_NUSES (label
) = 1;
22181 rtx label
= ix86_expand_aligntest (count
, 8, true);
22184 dest
= change_address (destmem
, DImode
, destptr
);
22185 emit_insn (gen_strset (destptr
, dest
, value
));
22189 dest
= change_address (destmem
, SImode
, destptr
);
22190 emit_insn (gen_strset (destptr
, dest
, value
));
22191 emit_insn (gen_strset (destptr
, dest
, value
));
22193 emit_label (label
);
22194 LABEL_NUSES (label
) = 1;
22198 rtx label
= ix86_expand_aligntest (count
, 4, true);
22199 dest
= change_address (destmem
, SImode
, destptr
);
22200 emit_insn (gen_strset (destptr
, dest
, gen_lowpart (SImode
, value
)));
22201 emit_label (label
);
22202 LABEL_NUSES (label
) = 1;
22206 rtx label
= ix86_expand_aligntest (count
, 2, true);
22207 dest
= change_address (destmem
, HImode
, destptr
);
22208 emit_insn (gen_strset (destptr
, dest
, gen_lowpart (HImode
, value
)));
22209 emit_label (label
);
22210 LABEL_NUSES (label
) = 1;
22214 rtx label
= ix86_expand_aligntest (count
, 1, true);
22215 dest
= change_address (destmem
, QImode
, destptr
);
22216 emit_insn (gen_strset (destptr
, dest
, gen_lowpart (QImode
, value
)));
22217 emit_label (label
);
22218 LABEL_NUSES (label
) = 1;
22222 /* Copy enough from DEST to SRC to align DEST known to by aligned by ALIGN to
22223 DESIRED_ALIGNMENT. */
22225 expand_movmem_prologue (rtx destmem
, rtx srcmem
,
22226 rtx destptr
, rtx srcptr
, rtx count
,
22227 int align
, int desired_alignment
)
22229 if (align
<= 1 && desired_alignment
> 1)
22231 rtx label
= ix86_expand_aligntest (destptr
, 1, false);
22232 srcmem
= change_address (srcmem
, QImode
, srcptr
);
22233 destmem
= change_address (destmem
, QImode
, destptr
);
22234 emit_insn (gen_strmov (destptr
, destmem
, srcptr
, srcmem
));
22235 ix86_adjust_counter (count
, 1);
22236 emit_label (label
);
22237 LABEL_NUSES (label
) = 1;
22239 if (align
<= 2 && desired_alignment
> 2)
22241 rtx label
= ix86_expand_aligntest (destptr
, 2, false);
22242 srcmem
= change_address (srcmem
, HImode
, srcptr
);
22243 destmem
= change_address (destmem
, HImode
, destptr
);
22244 emit_insn (gen_strmov (destptr
, destmem
, srcptr
, srcmem
));
22245 ix86_adjust_counter (count
, 2);
22246 emit_label (label
);
22247 LABEL_NUSES (label
) = 1;
22249 if (align
<= 4 && desired_alignment
> 4)
22251 rtx label
= ix86_expand_aligntest (destptr
, 4, false);
22252 srcmem
= change_address (srcmem
, SImode
, srcptr
);
22253 destmem
= change_address (destmem
, SImode
, destptr
);
22254 emit_insn (gen_strmov (destptr
, destmem
, srcptr
, srcmem
));
22255 ix86_adjust_counter (count
, 4);
22256 emit_label (label
);
22257 LABEL_NUSES (label
) = 1;
22259 gcc_assert (desired_alignment
<= 8);
22262 /* Copy enough from DST to SRC to align DST known to DESIRED_ALIGN.
22263 ALIGN_BYTES is how many bytes need to be copied. */
22265 expand_constant_movmem_prologue (rtx dst
, rtx
*srcp
, rtx destreg
, rtx srcreg
,
22266 int desired_align
, int align_bytes
)
22269 rtx orig_dst
= dst
;
22270 rtx orig_src
= src
;
22272 int src_align_bytes
= get_mem_align_offset (src
, desired_align
* BITS_PER_UNIT
);
22273 if (src_align_bytes
>= 0)
22274 src_align_bytes
= desired_align
- src_align_bytes
;
22275 if (align_bytes
& 1)
22277 dst
= adjust_automodify_address_nv (dst
, QImode
, destreg
, 0);
22278 src
= adjust_automodify_address_nv (src
, QImode
, srcreg
, 0);
22280 emit_insn (gen_strmov (destreg
, dst
, srcreg
, src
));
22282 if (align_bytes
& 2)
22284 dst
= adjust_automodify_address_nv (dst
, HImode
, destreg
, off
);
22285 src
= adjust_automodify_address_nv (src
, HImode
, srcreg
, off
);
22286 if (MEM_ALIGN (dst
) < 2 * BITS_PER_UNIT
)
22287 set_mem_align (dst
, 2 * BITS_PER_UNIT
);
22288 if (src_align_bytes
>= 0
22289 && (src_align_bytes
& 1) == (align_bytes
& 1)
22290 && MEM_ALIGN (src
) < 2 * BITS_PER_UNIT
)
22291 set_mem_align (src
, 2 * BITS_PER_UNIT
);
22293 emit_insn (gen_strmov (destreg
, dst
, srcreg
, src
));
22295 if (align_bytes
& 4)
22297 dst
= adjust_automodify_address_nv (dst
, SImode
, destreg
, off
);
22298 src
= adjust_automodify_address_nv (src
, SImode
, srcreg
, off
);
22299 if (MEM_ALIGN (dst
) < 4 * BITS_PER_UNIT
)
22300 set_mem_align (dst
, 4 * BITS_PER_UNIT
);
22301 if (src_align_bytes
>= 0)
22303 unsigned int src_align
= 0;
22304 if ((src_align_bytes
& 3) == (align_bytes
& 3))
22306 else if ((src_align_bytes
& 1) == (align_bytes
& 1))
22308 if (MEM_ALIGN (src
) < src_align
* BITS_PER_UNIT
)
22309 set_mem_align (src
, src_align
* BITS_PER_UNIT
);
22312 emit_insn (gen_strmov (destreg
, dst
, srcreg
, src
));
22314 dst
= adjust_automodify_address_nv (dst
, BLKmode
, destreg
, off
);
22315 src
= adjust_automodify_address_nv (src
, BLKmode
, srcreg
, off
);
22316 if (MEM_ALIGN (dst
) < (unsigned int) desired_align
* BITS_PER_UNIT
)
22317 set_mem_align (dst
, desired_align
* BITS_PER_UNIT
);
22318 if (src_align_bytes
>= 0)
22320 unsigned int src_align
= 0;
22321 if ((src_align_bytes
& 7) == (align_bytes
& 7))
22323 else if ((src_align_bytes
& 3) == (align_bytes
& 3))
22325 else if ((src_align_bytes
& 1) == (align_bytes
& 1))
22327 if (src_align
> (unsigned int) desired_align
)
22328 src_align
= desired_align
;
22329 if (MEM_ALIGN (src
) < src_align
* BITS_PER_UNIT
)
22330 set_mem_align (src
, src_align
* BITS_PER_UNIT
);
22332 if (MEM_SIZE_KNOWN_P (orig_dst
))
22333 set_mem_size (dst
, MEM_SIZE (orig_dst
) - align_bytes
);
22334 if (MEM_SIZE_KNOWN_P (orig_src
))
22335 set_mem_size (src
, MEM_SIZE (orig_src
) - align_bytes
);
22340 /* Set enough from DEST to align DEST known to by aligned by ALIGN to
22341 DESIRED_ALIGNMENT. */
22343 expand_setmem_prologue (rtx destmem
, rtx destptr
, rtx value
, rtx count
,
22344 int align
, int desired_alignment
)
22346 if (align
<= 1 && desired_alignment
> 1)
22348 rtx label
= ix86_expand_aligntest (destptr
, 1, false);
22349 destmem
= change_address (destmem
, QImode
, destptr
);
22350 emit_insn (gen_strset (destptr
, destmem
, gen_lowpart (QImode
, value
)));
22351 ix86_adjust_counter (count
, 1);
22352 emit_label (label
);
22353 LABEL_NUSES (label
) = 1;
22355 if (align
<= 2 && desired_alignment
> 2)
22357 rtx label
= ix86_expand_aligntest (destptr
, 2, false);
22358 destmem
= change_address (destmem
, HImode
, destptr
);
22359 emit_insn (gen_strset (destptr
, destmem
, gen_lowpart (HImode
, value
)));
22360 ix86_adjust_counter (count
, 2);
22361 emit_label (label
);
22362 LABEL_NUSES (label
) = 1;
22364 if (align
<= 4 && desired_alignment
> 4)
22366 rtx label
= ix86_expand_aligntest (destptr
, 4, false);
22367 destmem
= change_address (destmem
, SImode
, destptr
);
22368 emit_insn (gen_strset (destptr
, destmem
, gen_lowpart (SImode
, value
)));
22369 ix86_adjust_counter (count
, 4);
22370 emit_label (label
);
22371 LABEL_NUSES (label
) = 1;
22373 gcc_assert (desired_alignment
<= 8);
22376 /* Set enough from DST to align DST known to by aligned by ALIGN to
22377 DESIRED_ALIGN. ALIGN_BYTES is how many bytes need to be stored. */
22379 expand_constant_setmem_prologue (rtx dst
, rtx destreg
, rtx value
,
22380 int desired_align
, int align_bytes
)
22383 rtx orig_dst
= dst
;
22384 if (align_bytes
& 1)
22386 dst
= adjust_automodify_address_nv (dst
, QImode
, destreg
, 0);
22388 emit_insn (gen_strset (destreg
, dst
,
22389 gen_lowpart (QImode
, value
)));
22391 if (align_bytes
& 2)
22393 dst
= adjust_automodify_address_nv (dst
, HImode
, destreg
, off
);
22394 if (MEM_ALIGN (dst
) < 2 * BITS_PER_UNIT
)
22395 set_mem_align (dst
, 2 * BITS_PER_UNIT
);
22397 emit_insn (gen_strset (destreg
, dst
,
22398 gen_lowpart (HImode
, value
)));
22400 if (align_bytes
& 4)
22402 dst
= adjust_automodify_address_nv (dst
, SImode
, destreg
, off
);
22403 if (MEM_ALIGN (dst
) < 4 * BITS_PER_UNIT
)
22404 set_mem_align (dst
, 4 * BITS_PER_UNIT
);
22406 emit_insn (gen_strset (destreg
, dst
,
22407 gen_lowpart (SImode
, value
)));
22409 dst
= adjust_automodify_address_nv (dst
, BLKmode
, destreg
, off
);
22410 if (MEM_ALIGN (dst
) < (unsigned int) desired_align
* BITS_PER_UNIT
)
22411 set_mem_align (dst
, desired_align
* BITS_PER_UNIT
);
22412 if (MEM_SIZE_KNOWN_P (orig_dst
))
22413 set_mem_size (dst
, MEM_SIZE (orig_dst
) - align_bytes
);
22417 /* Given COUNT and EXPECTED_SIZE, decide on codegen of string operation. */
22418 static enum stringop_alg
22419 decide_alg (HOST_WIDE_INT count
, HOST_WIDE_INT expected_size
, bool memset
,
22420 int *dynamic_check
, bool *noalign
)
22422 const struct stringop_algs
* algs
;
22423 bool optimize_for_speed
;
22424 /* Algorithms using the rep prefix want at least edi and ecx;
22425 additionally, memset wants eax and memcpy wants esi. Don't
22426 consider such algorithms if the user has appropriated those
22427 registers for their own purposes. */
22428 bool rep_prefix_usable
= !(fixed_regs
[CX_REG
] || fixed_regs
[DI_REG
]
22430 ? fixed_regs
[AX_REG
] : fixed_regs
[SI_REG
]));
22433 #define ALG_USABLE_P(alg) (rep_prefix_usable \
22434 || (alg != rep_prefix_1_byte \
22435 && alg != rep_prefix_4_byte \
22436 && alg != rep_prefix_8_byte))
22437 const struct processor_costs
*cost
;
22439 /* Even if the string operation call is cold, we still might spend a lot
22440 of time processing large blocks. */
22441 if (optimize_function_for_size_p (cfun
)
22442 || (optimize_insn_for_size_p ()
22443 && expected_size
!= -1 && expected_size
< 256))
22444 optimize_for_speed
= false;
22446 optimize_for_speed
= true;
22448 cost
= optimize_for_speed
? ix86_cost
: &ix86_size_cost
;
22450 *dynamic_check
= -1;
22452 algs
= &cost
->memset
[TARGET_64BIT
!= 0];
22454 algs
= &cost
->memcpy
[TARGET_64BIT
!= 0];
22455 if (ix86_stringop_alg
!= no_stringop
&& ALG_USABLE_P (ix86_stringop_alg
))
22456 return ix86_stringop_alg
;
22457 /* rep; movq or rep; movl is the smallest variant. */
22458 else if (!optimize_for_speed
)
22460 if (!count
|| (count
& 3))
22461 return rep_prefix_usable
? rep_prefix_1_byte
: loop_1_byte
;
22463 return rep_prefix_usable
? rep_prefix_4_byte
: loop
;
22465 /* Very tiny blocks are best handled via the loop, REP is expensive to setup.
22467 else if (expected_size
!= -1 && expected_size
< 4)
22468 return loop_1_byte
;
22469 else if (expected_size
!= -1)
22472 enum stringop_alg alg
= libcall
;
22473 for (i
= 0; i
< MAX_STRINGOP_ALGS
; i
++)
22475 /* We get here if the algorithms that were not libcall-based
22476 were rep-prefix based and we are unable to use rep prefixes
22477 based on global register usage. Break out of the loop and
22478 use the heuristic below. */
22479 if (algs
->size
[i
].max
== 0)
22481 if (algs
->size
[i
].max
>= expected_size
|| algs
->size
[i
].max
== -1)
22483 enum stringop_alg candidate
= algs
->size
[i
].alg
;
22485 if (candidate
!= libcall
&& ALG_USABLE_P (candidate
))
22487 /* Honor TARGET_INLINE_ALL_STRINGOPS by picking
22488 last non-libcall inline algorithm. */
22489 if (TARGET_INLINE_ALL_STRINGOPS
)
22491 /* When the current size is best to be copied by a libcall,
22492 but we are still forced to inline, run the heuristic below
22493 that will pick code for medium sized blocks. */
22494 if (alg
!= libcall
)
22498 else if (ALG_USABLE_P (candidate
))
22500 *noalign
= algs
->size
[i
].noalign
;
22505 gcc_assert (TARGET_INLINE_ALL_STRINGOPS
|| !rep_prefix_usable
);
22507 /* When asked to inline the call anyway, try to pick meaningful choice.
22508 We look for maximal size of block that is faster to copy by hand and
22509 take blocks of at most of that size guessing that average size will
22510 be roughly half of the block.
22512 If this turns out to be bad, we might simply specify the preferred
22513 choice in ix86_costs. */
22514 if ((TARGET_INLINE_ALL_STRINGOPS
|| TARGET_INLINE_STRINGOPS_DYNAMICALLY
)
22515 && (algs
->unknown_size
== libcall
|| !ALG_USABLE_P (algs
->unknown_size
)))
22518 enum stringop_alg alg
;
22520 bool any_alg_usable_p
= true;
22522 for (i
= 0; i
< MAX_STRINGOP_ALGS
; i
++)
22524 enum stringop_alg candidate
= algs
->size
[i
].alg
;
22525 any_alg_usable_p
= any_alg_usable_p
&& ALG_USABLE_P (candidate
);
22527 if (candidate
!= libcall
&& candidate
22528 && ALG_USABLE_P (candidate
))
22529 max
= algs
->size
[i
].max
;
22531 /* If there aren't any usable algorithms, then recursing on
22532 smaller sizes isn't going to find anything. Just return the
22533 simple byte-at-a-time copy loop. */
22534 if (!any_alg_usable_p
)
22536 /* Pick something reasonable. */
22537 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY
)
22538 *dynamic_check
= 128;
22539 return loop_1_byte
;
22543 alg
= decide_alg (count
, max
/ 2, memset
, dynamic_check
, noalign
);
22544 gcc_assert (*dynamic_check
== -1);
22545 gcc_assert (alg
!= libcall
);
22546 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY
)
22547 *dynamic_check
= max
;
22550 return ALG_USABLE_P (algs
->unknown_size
) ? algs
->unknown_size
: libcall
;
22551 #undef ALG_USABLE_P
22554 /* Decide on alignment. We know that the operand is already aligned to ALIGN
22555 (ALIGN can be based on profile feedback and thus it is not 100% guaranteed). */
22557 decide_alignment (int align
,
22558 enum stringop_alg alg
,
22561 int desired_align
= 0;
22565 gcc_unreachable ();
22567 case unrolled_loop
:
22568 desired_align
= GET_MODE_SIZE (Pmode
);
22570 case rep_prefix_8_byte
:
22573 case rep_prefix_4_byte
:
22574 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
22575 copying whole cacheline at once. */
22576 if (TARGET_PENTIUMPRO
)
22581 case rep_prefix_1_byte
:
22582 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
22583 copying whole cacheline at once. */
22584 if (TARGET_PENTIUMPRO
)
22598 if (desired_align
< align
)
22599 desired_align
= align
;
22600 if (expected_size
!= -1 && expected_size
< 4)
22601 desired_align
= align
;
22602 return desired_align
;
22605 /* Return the smallest power of 2 greater than VAL. */
22607 smallest_pow2_greater_than (int val
)
22615 /* Expand string move (memcpy) operation. Use i386 string operations
22616 when profitable. expand_setmem contains similar code. The code
22617 depends upon architecture, block size and alignment, but always has
22618 the same overall structure:
22620 1) Prologue guard: Conditional that jumps up to epilogues for small
22621 blocks that can be handled by epilogue alone. This is faster
22622 but also needed for correctness, since prologue assume the block
22623 is larger than the desired alignment.
22625 Optional dynamic check for size and libcall for large
22626 blocks is emitted here too, with -minline-stringops-dynamically.
22628 2) Prologue: copy first few bytes in order to get destination
22629 aligned to DESIRED_ALIGN. It is emitted only when ALIGN is less
22630 than DESIRED_ALIGN and up to DESIRED_ALIGN - ALIGN bytes can be
22631 copied. We emit either a jump tree on power of two sized
22632 blocks, or a byte loop.
22634 3) Main body: the copying loop itself, copying in SIZE_NEEDED chunks
22635 with specified algorithm.
22637 4) Epilogue: code copying tail of the block that is too small to be
22638 handled by main body (or up to size guarded by prologue guard). */
22641 ix86_expand_movmem (rtx dst
, rtx src
, rtx count_exp
, rtx align_exp
,
22642 rtx expected_align_exp
, rtx expected_size_exp
)
22648 rtx jump_around_label
= NULL
;
22649 HOST_WIDE_INT align
= 1;
22650 unsigned HOST_WIDE_INT count
= 0;
22651 HOST_WIDE_INT expected_size
= -1;
22652 int size_needed
= 0, epilogue_size_needed
;
22653 int desired_align
= 0, align_bytes
= 0;
22654 enum stringop_alg alg
;
22656 bool need_zero_guard
= false;
22659 if (CONST_INT_P (align_exp
))
22660 align
= INTVAL (align_exp
);
22661 /* i386 can do misaligned access on reasonably increased cost. */
22662 if (CONST_INT_P (expected_align_exp
)
22663 && INTVAL (expected_align_exp
) > align
)
22664 align
= INTVAL (expected_align_exp
);
22665 /* ALIGN is the minimum of destination and source alignment, but we care here
22666 just about destination alignment. */
22667 else if (MEM_ALIGN (dst
) > (unsigned HOST_WIDE_INT
) align
* BITS_PER_UNIT
)
22668 align
= MEM_ALIGN (dst
) / BITS_PER_UNIT
;
22670 if (CONST_INT_P (count_exp
))
22671 count
= expected_size
= INTVAL (count_exp
);
22672 if (CONST_INT_P (expected_size_exp
) && count
== 0)
22673 expected_size
= INTVAL (expected_size_exp
);
22675 /* Make sure we don't need to care about overflow later on. */
22676 if (count
> ((unsigned HOST_WIDE_INT
) 1 << 30))
22679 /* Step 0: Decide on preferred algorithm, desired alignment and
22680 size of chunks to be copied by main loop. */
22682 alg
= decide_alg (count
, expected_size
, false, &dynamic_check
, &noalign
);
22683 desired_align
= decide_alignment (align
, alg
, expected_size
);
22685 if (!TARGET_ALIGN_STRINGOPS
|| noalign
)
22686 align
= desired_align
;
22688 if (alg
== libcall
)
22690 gcc_assert (alg
!= no_stringop
);
22692 count_exp
= copy_to_mode_reg (GET_MODE (count_exp
), count_exp
);
22693 destreg
= copy_addr_to_reg (XEXP (dst
, 0));
22694 srcreg
= copy_addr_to_reg (XEXP (src
, 0));
22699 gcc_unreachable ();
22701 need_zero_guard
= true;
22702 size_needed
= GET_MODE_SIZE (word_mode
);
22704 case unrolled_loop
:
22705 need_zero_guard
= true;
22706 size_needed
= GET_MODE_SIZE (word_mode
) * (TARGET_64BIT
? 4 : 2);
22708 case rep_prefix_8_byte
:
22711 case rep_prefix_4_byte
:
22714 case rep_prefix_1_byte
:
22718 need_zero_guard
= true;
22723 epilogue_size_needed
= size_needed
;
22725 /* Step 1: Prologue guard. */
22727 /* Alignment code needs count to be in register. */
22728 if (CONST_INT_P (count_exp
) && desired_align
> align
)
22730 if (INTVAL (count_exp
) > desired_align
22731 && INTVAL (count_exp
) > size_needed
)
22734 = get_mem_align_offset (dst
, desired_align
* BITS_PER_UNIT
);
22735 if (align_bytes
<= 0)
22738 align_bytes
= desired_align
- align_bytes
;
22740 if (align_bytes
== 0)
22741 count_exp
= force_reg (counter_mode (count_exp
), count_exp
);
22743 gcc_assert (desired_align
>= 1 && align
>= 1);
22745 /* Ensure that alignment prologue won't copy past end of block. */
22746 if (size_needed
> 1 || (desired_align
> 1 && desired_align
> align
))
22748 epilogue_size_needed
= MAX (size_needed
- 1, desired_align
- align
);
22749 /* Epilogue always copies COUNT_EXP & EPILOGUE_SIZE_NEEDED bytes.
22750 Make sure it is power of 2. */
22751 epilogue_size_needed
= smallest_pow2_greater_than (epilogue_size_needed
);
22755 if (count
< (unsigned HOST_WIDE_INT
)epilogue_size_needed
)
22757 /* If main algorithm works on QImode, no epilogue is needed.
22758 For small sizes just don't align anything. */
22759 if (size_needed
== 1)
22760 desired_align
= align
;
22767 label
= gen_label_rtx ();
22768 emit_cmp_and_jump_insns (count_exp
,
22769 GEN_INT (epilogue_size_needed
),
22770 LTU
, 0, counter_mode (count_exp
), 1, label
);
22771 if (expected_size
== -1 || expected_size
< epilogue_size_needed
)
22772 predict_jump (REG_BR_PROB_BASE
* 60 / 100);
22774 predict_jump (REG_BR_PROB_BASE
* 20 / 100);
22778 /* Emit code to decide on runtime whether library call or inline should be
22780 if (dynamic_check
!= -1)
22782 if (CONST_INT_P (count_exp
))
22784 if (UINTVAL (count_exp
) >= (unsigned HOST_WIDE_INT
)dynamic_check
)
22786 emit_block_move_via_libcall (dst
, src
, count_exp
, false);
22787 count_exp
= const0_rtx
;
22793 rtx hot_label
= gen_label_rtx ();
22794 jump_around_label
= gen_label_rtx ();
22795 emit_cmp_and_jump_insns (count_exp
, GEN_INT (dynamic_check
- 1),
22796 LEU
, 0, GET_MODE (count_exp
), 1, hot_label
);
22797 predict_jump (REG_BR_PROB_BASE
* 90 / 100);
22798 emit_block_move_via_libcall (dst
, src
, count_exp
, false);
22799 emit_jump (jump_around_label
);
22800 emit_label (hot_label
);
22804 /* Step 2: Alignment prologue. */
22806 if (desired_align
> align
)
22808 if (align_bytes
== 0)
22810 /* Except for the first move in epilogue, we no longer know
22811 constant offset in aliasing info. It don't seems to worth
22812 the pain to maintain it for the first move, so throw away
22814 src
= change_address (src
, BLKmode
, srcreg
);
22815 dst
= change_address (dst
, BLKmode
, destreg
);
22816 expand_movmem_prologue (dst
, src
, destreg
, srcreg
, count_exp
, align
,
22821 /* If we know how many bytes need to be stored before dst is
22822 sufficiently aligned, maintain aliasing info accurately. */
22823 dst
= expand_constant_movmem_prologue (dst
, &src
, destreg
, srcreg
,
22824 desired_align
, align_bytes
);
22825 count_exp
= plus_constant (counter_mode (count_exp
),
22826 count_exp
, -align_bytes
);
22827 count
-= align_bytes
;
22829 if (need_zero_guard
22830 && (count
< (unsigned HOST_WIDE_INT
) size_needed
22831 || (align_bytes
== 0
22832 && count
< ((unsigned HOST_WIDE_INT
) size_needed
22833 + desired_align
- align
))))
22835 /* It is possible that we copied enough so the main loop will not
22837 gcc_assert (size_needed
> 1);
22838 if (label
== NULL_RTX
)
22839 label
= gen_label_rtx ();
22840 emit_cmp_and_jump_insns (count_exp
,
22841 GEN_INT (size_needed
),
22842 LTU
, 0, counter_mode (count_exp
), 1, label
);
22843 if (expected_size
== -1
22844 || expected_size
< (desired_align
- align
) / 2 + size_needed
)
22845 predict_jump (REG_BR_PROB_BASE
* 20 / 100);
22847 predict_jump (REG_BR_PROB_BASE
* 60 / 100);
22850 if (label
&& size_needed
== 1)
22852 emit_label (label
);
22853 LABEL_NUSES (label
) = 1;
22855 epilogue_size_needed
= 1;
22857 else if (label
== NULL_RTX
)
22858 epilogue_size_needed
= size_needed
;
22860 /* Step 3: Main loop. */
22866 gcc_unreachable ();
22868 expand_set_or_movmem_via_loop (dst
, src
, destreg
, srcreg
, NULL
,
22869 count_exp
, QImode
, 1, expected_size
);
22872 expand_set_or_movmem_via_loop (dst
, src
, destreg
, srcreg
, NULL
,
22873 count_exp
, word_mode
, 1, expected_size
);
22875 case unrolled_loop
:
22876 /* Unroll only by factor of 2 in 32bit mode, since we don't have enough
22877 registers for 4 temporaries anyway. */
22878 expand_set_or_movmem_via_loop (dst
, src
, destreg
, srcreg
, NULL
,
22879 count_exp
, word_mode
, TARGET_64BIT
? 4 : 2,
22882 case rep_prefix_8_byte
:
22883 expand_movmem_via_rep_mov (dst
, src
, destreg
, srcreg
, count_exp
,
22886 case rep_prefix_4_byte
:
22887 expand_movmem_via_rep_mov (dst
, src
, destreg
, srcreg
, count_exp
,
22890 case rep_prefix_1_byte
:
22891 expand_movmem_via_rep_mov (dst
, src
, destreg
, srcreg
, count_exp
,
22895 /* Adjust properly the offset of src and dest memory for aliasing. */
22896 if (CONST_INT_P (count_exp
))
22898 src
= adjust_automodify_address_nv (src
, BLKmode
, srcreg
,
22899 (count
/ size_needed
) * size_needed
);
22900 dst
= adjust_automodify_address_nv (dst
, BLKmode
, destreg
,
22901 (count
/ size_needed
) * size_needed
);
22905 src
= change_address (src
, BLKmode
, srcreg
);
22906 dst
= change_address (dst
, BLKmode
, destreg
);
22909 /* Step 4: Epilogue to copy the remaining bytes. */
22913 /* When the main loop is done, COUNT_EXP might hold original count,
22914 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
22915 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
22916 bytes. Compensate if needed. */
22918 if (size_needed
< epilogue_size_needed
)
22921 expand_simple_binop (counter_mode (count_exp
), AND
, count_exp
,
22922 GEN_INT (size_needed
- 1), count_exp
, 1,
22924 if (tmp
!= count_exp
)
22925 emit_move_insn (count_exp
, tmp
);
22927 emit_label (label
);
22928 LABEL_NUSES (label
) = 1;
22931 if (count_exp
!= const0_rtx
&& epilogue_size_needed
> 1)
22932 expand_movmem_epilogue (dst
, src
, destreg
, srcreg
, count_exp
,
22933 epilogue_size_needed
);
22934 if (jump_around_label
)
22935 emit_label (jump_around_label
);
22939 /* Helper function for memcpy. For QImode value 0xXY produce
22940 0xXYXYXYXY of wide specified by MODE. This is essentially
22941 a * 0x10101010, but we can do slightly better than
22942 synth_mult by unwinding the sequence by hand on CPUs with
22945 promote_duplicated_reg (enum machine_mode mode
, rtx val
)
22947 enum machine_mode valmode
= GET_MODE (val
);
22949 int nops
= mode
== DImode
? 3 : 2;
22951 gcc_assert (mode
== SImode
|| mode
== DImode
);
22952 if (val
== const0_rtx
)
22953 return copy_to_mode_reg (mode
, const0_rtx
);
22954 if (CONST_INT_P (val
))
22956 HOST_WIDE_INT v
= INTVAL (val
) & 255;
22960 if (mode
== DImode
)
22961 v
|= (v
<< 16) << 16;
22962 return copy_to_mode_reg (mode
, gen_int_mode (v
, mode
));
22965 if (valmode
== VOIDmode
)
22967 if (valmode
!= QImode
)
22968 val
= gen_lowpart (QImode
, val
);
22969 if (mode
== QImode
)
22971 if (!TARGET_PARTIAL_REG_STALL
)
22973 if (ix86_cost
->mult_init
[mode
== DImode
? 3 : 2]
22974 + ix86_cost
->mult_bit
* (mode
== DImode
? 8 : 4)
22975 <= (ix86_cost
->shift_const
+ ix86_cost
->add
) * nops
22976 + (COSTS_N_INSNS (TARGET_PARTIAL_REG_STALL
== 0)))
22978 rtx reg
= convert_modes (mode
, QImode
, val
, true);
22979 tmp
= promote_duplicated_reg (mode
, const1_rtx
);
22980 return expand_simple_binop (mode
, MULT
, reg
, tmp
, NULL
, 1,
22985 rtx reg
= convert_modes (mode
, QImode
, val
, true);
22987 if (!TARGET_PARTIAL_REG_STALL
)
22988 if (mode
== SImode
)
22989 emit_insn (gen_movsi_insv_1 (reg
, reg
));
22991 emit_insn (gen_movdi_insv_1 (reg
, reg
));
22994 tmp
= expand_simple_binop (mode
, ASHIFT
, reg
, GEN_INT (8),
22995 NULL
, 1, OPTAB_DIRECT
);
22997 expand_simple_binop (mode
, IOR
, reg
, tmp
, reg
, 1, OPTAB_DIRECT
);
22999 tmp
= expand_simple_binop (mode
, ASHIFT
, reg
, GEN_INT (16),
23000 NULL
, 1, OPTAB_DIRECT
);
23001 reg
= expand_simple_binop (mode
, IOR
, reg
, tmp
, reg
, 1, OPTAB_DIRECT
);
23002 if (mode
== SImode
)
23004 tmp
= expand_simple_binop (mode
, ASHIFT
, reg
, GEN_INT (32),
23005 NULL
, 1, OPTAB_DIRECT
);
23006 reg
= expand_simple_binop (mode
, IOR
, reg
, tmp
, reg
, 1, OPTAB_DIRECT
);
23011 /* Duplicate value VAL using promote_duplicated_reg into maximal size that will
23012 be needed by main loop copying SIZE_NEEDED chunks and prologue getting
23013 alignment from ALIGN to DESIRED_ALIGN. */
23015 promote_duplicated_reg_to_size (rtx val
, int size_needed
, int desired_align
, int align
)
23020 && (size_needed
> 4 || (desired_align
> align
&& desired_align
> 4)))
23021 promoted_val
= promote_duplicated_reg (DImode
, val
);
23022 else if (size_needed
> 2 || (desired_align
> align
&& desired_align
> 2))
23023 promoted_val
= promote_duplicated_reg (SImode
, val
);
23024 else if (size_needed
> 1 || (desired_align
> align
&& desired_align
> 1))
23025 promoted_val
= promote_duplicated_reg (HImode
, val
);
23027 promoted_val
= val
;
23029 return promoted_val
;
23032 /* Expand string clear operation (bzero). Use i386 string operations when
23033 profitable. See expand_movmem comment for explanation of individual
23034 steps performed. */
23036 ix86_expand_setmem (rtx dst
, rtx count_exp
, rtx val_exp
, rtx align_exp
,
23037 rtx expected_align_exp
, rtx expected_size_exp
)
23042 rtx jump_around_label
= NULL
;
23043 HOST_WIDE_INT align
= 1;
23044 unsigned HOST_WIDE_INT count
= 0;
23045 HOST_WIDE_INT expected_size
= -1;
23046 int size_needed
= 0, epilogue_size_needed
;
23047 int desired_align
= 0, align_bytes
= 0;
23048 enum stringop_alg alg
;
23049 rtx promoted_val
= NULL
;
23050 bool force_loopy_epilogue
= false;
23052 bool need_zero_guard
= false;
23055 if (CONST_INT_P (align_exp
))
23056 align
= INTVAL (align_exp
);
23057 /* i386 can do misaligned access on reasonably increased cost. */
23058 if (CONST_INT_P (expected_align_exp
)
23059 && INTVAL (expected_align_exp
) > align
)
23060 align
= INTVAL (expected_align_exp
);
23061 if (CONST_INT_P (count_exp
))
23062 count
= expected_size
= INTVAL (count_exp
);
23063 if (CONST_INT_P (expected_size_exp
) && count
== 0)
23064 expected_size
= INTVAL (expected_size_exp
);
23066 /* Make sure we don't need to care about overflow later on. */
23067 if (count
> ((unsigned HOST_WIDE_INT
) 1 << 30))
23070 /* Step 0: Decide on preferred algorithm, desired alignment and
23071 size of chunks to be copied by main loop. */
23073 alg
= decide_alg (count
, expected_size
, true, &dynamic_check
, &noalign
);
23074 desired_align
= decide_alignment (align
, alg
, expected_size
);
23076 if (!TARGET_ALIGN_STRINGOPS
|| noalign
)
23077 align
= desired_align
;
23079 if (alg
== libcall
)
23081 gcc_assert (alg
!= no_stringop
);
23083 count_exp
= copy_to_mode_reg (counter_mode (count_exp
), count_exp
);
23084 destreg
= copy_addr_to_reg (XEXP (dst
, 0));
23089 gcc_unreachable ();
23091 need_zero_guard
= true;
23092 size_needed
= GET_MODE_SIZE (word_mode
);
23094 case unrolled_loop
:
23095 need_zero_guard
= true;
23096 size_needed
= GET_MODE_SIZE (word_mode
) * 4;
23098 case rep_prefix_8_byte
:
23101 case rep_prefix_4_byte
:
23104 case rep_prefix_1_byte
:
23108 need_zero_guard
= true;
23112 epilogue_size_needed
= size_needed
;
23114 /* Step 1: Prologue guard. */
23116 /* Alignment code needs count to be in register. */
23117 if (CONST_INT_P (count_exp
) && desired_align
> align
)
23119 if (INTVAL (count_exp
) > desired_align
23120 && INTVAL (count_exp
) > size_needed
)
23123 = get_mem_align_offset (dst
, desired_align
* BITS_PER_UNIT
);
23124 if (align_bytes
<= 0)
23127 align_bytes
= desired_align
- align_bytes
;
23129 if (align_bytes
== 0)
23131 enum machine_mode mode
= SImode
;
23132 if (TARGET_64BIT
&& (count
& ~0xffffffff))
23134 count_exp
= force_reg (mode
, count_exp
);
23137 /* Do the cheap promotion to allow better CSE across the
23138 main loop and epilogue (ie one load of the big constant in the
23139 front of all code. */
23140 if (CONST_INT_P (val_exp
))
23141 promoted_val
= promote_duplicated_reg_to_size (val_exp
, size_needed
,
23142 desired_align
, align
);
23143 /* Ensure that alignment prologue won't copy past end of block. */
23144 if (size_needed
> 1 || (desired_align
> 1 && desired_align
> align
))
23146 epilogue_size_needed
= MAX (size_needed
- 1, desired_align
- align
);
23147 /* Epilogue always copies COUNT_EXP & (EPILOGUE_SIZE_NEEDED - 1) bytes.
23148 Make sure it is power of 2. */
23149 epilogue_size_needed
= smallest_pow2_greater_than (epilogue_size_needed
);
23151 /* To improve performance of small blocks, we jump around the VAL
23152 promoting mode. This mean that if the promoted VAL is not constant,
23153 we might not use it in the epilogue and have to use byte
23155 if (epilogue_size_needed
> 2 && !promoted_val
)
23156 force_loopy_epilogue
= true;
23159 if (count
< (unsigned HOST_WIDE_INT
)epilogue_size_needed
)
23161 /* If main algorithm works on QImode, no epilogue is needed.
23162 For small sizes just don't align anything. */
23163 if (size_needed
== 1)
23164 desired_align
= align
;
23171 label
= gen_label_rtx ();
23172 emit_cmp_and_jump_insns (count_exp
,
23173 GEN_INT (epilogue_size_needed
),
23174 LTU
, 0, counter_mode (count_exp
), 1, label
);
23175 if (expected_size
== -1 || expected_size
<= epilogue_size_needed
)
23176 predict_jump (REG_BR_PROB_BASE
* 60 / 100);
23178 predict_jump (REG_BR_PROB_BASE
* 20 / 100);
23181 if (dynamic_check
!= -1)
23183 rtx hot_label
= gen_label_rtx ();
23184 jump_around_label
= gen_label_rtx ();
23185 emit_cmp_and_jump_insns (count_exp
, GEN_INT (dynamic_check
- 1),
23186 LEU
, 0, counter_mode (count_exp
), 1, hot_label
);
23187 predict_jump (REG_BR_PROB_BASE
* 90 / 100);
23188 set_storage_via_libcall (dst
, count_exp
, val_exp
, false);
23189 emit_jump (jump_around_label
);
23190 emit_label (hot_label
);
23193 /* Step 2: Alignment prologue. */
23195 /* Do the expensive promotion once we branched off the small blocks. */
23197 promoted_val
= promote_duplicated_reg_to_size (val_exp
, size_needed
,
23198 desired_align
, align
);
23199 gcc_assert (desired_align
>= 1 && align
>= 1);
23201 if (desired_align
> align
)
23203 if (align_bytes
== 0)
23205 /* Except for the first move in epilogue, we no longer know
23206 constant offset in aliasing info. It don't seems to worth
23207 the pain to maintain it for the first move, so throw away
23209 dst
= change_address (dst
, BLKmode
, destreg
);
23210 expand_setmem_prologue (dst
, destreg
, promoted_val
, count_exp
, align
,
23215 /* If we know how many bytes need to be stored before dst is
23216 sufficiently aligned, maintain aliasing info accurately. */
23217 dst
= expand_constant_setmem_prologue (dst
, destreg
, promoted_val
,
23218 desired_align
, align_bytes
);
23219 count_exp
= plus_constant (counter_mode (count_exp
),
23220 count_exp
, -align_bytes
);
23221 count
-= align_bytes
;
23223 if (need_zero_guard
23224 && (count
< (unsigned HOST_WIDE_INT
) size_needed
23225 || (align_bytes
== 0
23226 && count
< ((unsigned HOST_WIDE_INT
) size_needed
23227 + desired_align
- align
))))
23229 /* It is possible that we copied enough so the main loop will not
23231 gcc_assert (size_needed
> 1);
23232 if (label
== NULL_RTX
)
23233 label
= gen_label_rtx ();
23234 emit_cmp_and_jump_insns (count_exp
,
23235 GEN_INT (size_needed
),
23236 LTU
, 0, counter_mode (count_exp
), 1, label
);
23237 if (expected_size
== -1
23238 || expected_size
< (desired_align
- align
) / 2 + size_needed
)
23239 predict_jump (REG_BR_PROB_BASE
* 20 / 100);
23241 predict_jump (REG_BR_PROB_BASE
* 60 / 100);
23244 if (label
&& size_needed
== 1)
23246 emit_label (label
);
23247 LABEL_NUSES (label
) = 1;
23249 promoted_val
= val_exp
;
23250 epilogue_size_needed
= 1;
23252 else if (label
== NULL_RTX
)
23253 epilogue_size_needed
= size_needed
;
23255 /* Step 3: Main loop. */
23261 gcc_unreachable ();
23263 expand_set_or_movmem_via_loop (dst
, NULL
, destreg
, NULL
, promoted_val
,
23264 count_exp
, QImode
, 1, expected_size
);
23267 expand_set_or_movmem_via_loop (dst
, NULL
, destreg
, NULL
, promoted_val
,
23268 count_exp
, word_mode
, 1, expected_size
);
23270 case unrolled_loop
:
23271 expand_set_or_movmem_via_loop (dst
, NULL
, destreg
, NULL
, promoted_val
,
23272 count_exp
, word_mode
, 4, expected_size
);
23274 case rep_prefix_8_byte
:
23275 expand_setmem_via_rep_stos (dst
, destreg
, promoted_val
, count_exp
,
23278 case rep_prefix_4_byte
:
23279 expand_setmem_via_rep_stos (dst
, destreg
, promoted_val
, count_exp
,
23282 case rep_prefix_1_byte
:
23283 expand_setmem_via_rep_stos (dst
, destreg
, promoted_val
, count_exp
,
23287 /* Adjust properly the offset of src and dest memory for aliasing. */
23288 if (CONST_INT_P (count_exp
))
23289 dst
= adjust_automodify_address_nv (dst
, BLKmode
, destreg
,
23290 (count
/ size_needed
) * size_needed
);
23292 dst
= change_address (dst
, BLKmode
, destreg
);
23294 /* Step 4: Epilogue to copy the remaining bytes. */
23298 /* When the main loop is done, COUNT_EXP might hold original count,
23299 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
23300 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
23301 bytes. Compensate if needed. */
23303 if (size_needed
< epilogue_size_needed
)
23306 expand_simple_binop (counter_mode (count_exp
), AND
, count_exp
,
23307 GEN_INT (size_needed
- 1), count_exp
, 1,
23309 if (tmp
!= count_exp
)
23310 emit_move_insn (count_exp
, tmp
);
23312 emit_label (label
);
23313 LABEL_NUSES (label
) = 1;
23316 if (count_exp
!= const0_rtx
&& epilogue_size_needed
> 1)
23318 if (force_loopy_epilogue
)
23319 expand_setmem_epilogue_via_loop (dst
, destreg
, val_exp
, count_exp
,
23320 epilogue_size_needed
);
23322 expand_setmem_epilogue (dst
, destreg
, promoted_val
, count_exp
,
23323 epilogue_size_needed
);
23325 if (jump_around_label
)
23326 emit_label (jump_around_label
);
23330 /* Expand the appropriate insns for doing strlen if not just doing
23333 out = result, initialized with the start address
23334 align_rtx = alignment of the address.
23335 scratch = scratch register, initialized with the startaddress when
23336 not aligned, otherwise undefined
23338 This is just the body. It needs the initializations mentioned above and
23339 some address computing at the end. These things are done in i386.md. */
23342 ix86_expand_strlensi_unroll_1 (rtx out
, rtx src
, rtx align_rtx
)
23346 rtx align_2_label
= NULL_RTX
;
23347 rtx align_3_label
= NULL_RTX
;
23348 rtx align_4_label
= gen_label_rtx ();
23349 rtx end_0_label
= gen_label_rtx ();
23351 rtx tmpreg
= gen_reg_rtx (SImode
);
23352 rtx scratch
= gen_reg_rtx (SImode
);
23356 if (CONST_INT_P (align_rtx
))
23357 align
= INTVAL (align_rtx
);
23359 /* Loop to check 1..3 bytes for null to get an aligned pointer. */
23361 /* Is there a known alignment and is it less than 4? */
23364 rtx scratch1
= gen_reg_rtx (Pmode
);
23365 emit_move_insn (scratch1
, out
);
23366 /* Is there a known alignment and is it not 2? */
23369 align_3_label
= gen_label_rtx (); /* Label when aligned to 3-byte */
23370 align_2_label
= gen_label_rtx (); /* Label when aligned to 2-byte */
23372 /* Leave just the 3 lower bits. */
23373 align_rtx
= expand_binop (Pmode
, and_optab
, scratch1
, GEN_INT (3),
23374 NULL_RTX
, 0, OPTAB_WIDEN
);
23376 emit_cmp_and_jump_insns (align_rtx
, const0_rtx
, EQ
, NULL
,
23377 Pmode
, 1, align_4_label
);
23378 emit_cmp_and_jump_insns (align_rtx
, const2_rtx
, EQ
, NULL
,
23379 Pmode
, 1, align_2_label
);
23380 emit_cmp_and_jump_insns (align_rtx
, const2_rtx
, GTU
, NULL
,
23381 Pmode
, 1, align_3_label
);
23385 /* Since the alignment is 2, we have to check 2 or 0 bytes;
23386 check if is aligned to 4 - byte. */
23388 align_rtx
= expand_binop (Pmode
, and_optab
, scratch1
, const2_rtx
,
23389 NULL_RTX
, 0, OPTAB_WIDEN
);
23391 emit_cmp_and_jump_insns (align_rtx
, const0_rtx
, EQ
, NULL
,
23392 Pmode
, 1, align_4_label
);
23395 mem
= change_address (src
, QImode
, out
);
23397 /* Now compare the bytes. */
23399 /* Compare the first n unaligned byte on a byte per byte basis. */
23400 emit_cmp_and_jump_insns (mem
, const0_rtx
, EQ
, NULL
,
23401 QImode
, 1, end_0_label
);
23403 /* Increment the address. */
23404 emit_insn (ix86_gen_add3 (out
, out
, const1_rtx
));
23406 /* Not needed with an alignment of 2 */
23409 emit_label (align_2_label
);
23411 emit_cmp_and_jump_insns (mem
, const0_rtx
, EQ
, NULL
, QImode
, 1,
23414 emit_insn (ix86_gen_add3 (out
, out
, const1_rtx
));
23416 emit_label (align_3_label
);
23419 emit_cmp_and_jump_insns (mem
, const0_rtx
, EQ
, NULL
, QImode
, 1,
23422 emit_insn (ix86_gen_add3 (out
, out
, const1_rtx
));
23425 /* Generate loop to check 4 bytes at a time. It is not a good idea to
23426 align this loop. It gives only huge programs, but does not help to
23428 emit_label (align_4_label
);
23430 mem
= change_address (src
, SImode
, out
);
23431 emit_move_insn (scratch
, mem
);
23432 emit_insn (ix86_gen_add3 (out
, out
, GEN_INT (4)));
23434 /* This formula yields a nonzero result iff one of the bytes is zero.
23435 This saves three branches inside loop and many cycles. */
23437 emit_insn (gen_addsi3 (tmpreg
, scratch
, GEN_INT (-0x01010101)));
23438 emit_insn (gen_one_cmplsi2 (scratch
, scratch
));
23439 emit_insn (gen_andsi3 (tmpreg
, tmpreg
, scratch
));
23440 emit_insn (gen_andsi3 (tmpreg
, tmpreg
,
23441 gen_int_mode (0x80808080, SImode
)));
23442 emit_cmp_and_jump_insns (tmpreg
, const0_rtx
, EQ
, 0, SImode
, 1,
23447 rtx reg
= gen_reg_rtx (SImode
);
23448 rtx reg2
= gen_reg_rtx (Pmode
);
23449 emit_move_insn (reg
, tmpreg
);
23450 emit_insn (gen_lshrsi3 (reg
, reg
, GEN_INT (16)));
23452 /* If zero is not in the first two bytes, move two bytes forward. */
23453 emit_insn (gen_testsi_ccno_1 (tmpreg
, GEN_INT (0x8080)));
23454 tmp
= gen_rtx_REG (CCNOmode
, FLAGS_REG
);
23455 tmp
= gen_rtx_EQ (VOIDmode
, tmp
, const0_rtx
);
23456 emit_insn (gen_rtx_SET (VOIDmode
, tmpreg
,
23457 gen_rtx_IF_THEN_ELSE (SImode
, tmp
,
23460 /* Emit lea manually to avoid clobbering of flags. */
23461 emit_insn (gen_rtx_SET (SImode
, reg2
,
23462 gen_rtx_PLUS (Pmode
, out
, const2_rtx
)));
23464 tmp
= gen_rtx_REG (CCNOmode
, FLAGS_REG
);
23465 tmp
= gen_rtx_EQ (VOIDmode
, tmp
, const0_rtx
);
23466 emit_insn (gen_rtx_SET (VOIDmode
, out
,
23467 gen_rtx_IF_THEN_ELSE (Pmode
, tmp
,
23473 rtx end_2_label
= gen_label_rtx ();
23474 /* Is zero in the first two bytes? */
23476 emit_insn (gen_testsi_ccno_1 (tmpreg
, GEN_INT (0x8080)));
23477 tmp
= gen_rtx_REG (CCNOmode
, FLAGS_REG
);
23478 tmp
= gen_rtx_NE (VOIDmode
, tmp
, const0_rtx
);
23479 tmp
= gen_rtx_IF_THEN_ELSE (VOIDmode
, tmp
,
23480 gen_rtx_LABEL_REF (VOIDmode
, end_2_label
),
23482 tmp
= emit_jump_insn (gen_rtx_SET (VOIDmode
, pc_rtx
, tmp
));
23483 JUMP_LABEL (tmp
) = end_2_label
;
23485 /* Not in the first two. Move two bytes forward. */
23486 emit_insn (gen_lshrsi3 (tmpreg
, tmpreg
, GEN_INT (16)));
23487 emit_insn (ix86_gen_add3 (out
, out
, const2_rtx
));
23489 emit_label (end_2_label
);
23493 /* Avoid branch in fixing the byte. */
23494 tmpreg
= gen_lowpart (QImode
, tmpreg
);
23495 emit_insn (gen_addqi3_cc (tmpreg
, tmpreg
, tmpreg
));
23496 tmp
= gen_rtx_REG (CCmode
, FLAGS_REG
);
23497 cmp
= gen_rtx_LTU (VOIDmode
, tmp
, const0_rtx
);
23498 emit_insn (ix86_gen_sub3_carry (out
, out
, GEN_INT (3), tmp
, cmp
));
23500 emit_label (end_0_label
);
23503 /* Expand strlen. */
23506 ix86_expand_strlen (rtx out
, rtx src
, rtx eoschar
, rtx align
)
23508 rtx addr
, scratch1
, scratch2
, scratch3
, scratch4
;
23510 /* The generic case of strlen expander is long. Avoid it's
23511 expanding unless TARGET_INLINE_ALL_STRINGOPS. */
23513 if (TARGET_UNROLL_STRLEN
&& eoschar
== const0_rtx
&& optimize
> 1
23514 && !TARGET_INLINE_ALL_STRINGOPS
23515 && !optimize_insn_for_size_p ()
23516 && (!CONST_INT_P (align
) || INTVAL (align
) < 4))
23519 addr
= force_reg (Pmode
, XEXP (src
, 0));
23520 scratch1
= gen_reg_rtx (Pmode
);
23522 if (TARGET_UNROLL_STRLEN
&& eoschar
== const0_rtx
&& optimize
> 1
23523 && !optimize_insn_for_size_p ())
23525 /* Well it seems that some optimizer does not combine a call like
23526 foo(strlen(bar), strlen(bar));
23527 when the move and the subtraction is done here. It does calculate
23528 the length just once when these instructions are done inside of
23529 output_strlen_unroll(). But I think since &bar[strlen(bar)] is
23530 often used and I use one fewer register for the lifetime of
23531 output_strlen_unroll() this is better. */
23533 emit_move_insn (out
, addr
);
23535 ix86_expand_strlensi_unroll_1 (out
, src
, align
);
23537 /* strlensi_unroll_1 returns the address of the zero at the end of
23538 the string, like memchr(), so compute the length by subtracting
23539 the start address. */
23540 emit_insn (ix86_gen_sub3 (out
, out
, addr
));
23546 /* Can't use this if the user has appropriated eax, ecx, or edi. */
23547 if (fixed_regs
[AX_REG
] || fixed_regs
[CX_REG
] || fixed_regs
[DI_REG
])
23550 scratch2
= gen_reg_rtx (Pmode
);
23551 scratch3
= gen_reg_rtx (Pmode
);
23552 scratch4
= force_reg (Pmode
, constm1_rtx
);
23554 emit_move_insn (scratch3
, addr
);
23555 eoschar
= force_reg (QImode
, eoschar
);
23557 src
= replace_equiv_address_nv (src
, scratch3
);
23559 /* If .md starts supporting :P, this can be done in .md. */
23560 unspec
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (4, src
, eoschar
, align
,
23561 scratch4
), UNSPEC_SCAS
);
23562 emit_insn (gen_strlenqi_1 (scratch1
, scratch3
, unspec
));
23563 emit_insn (ix86_gen_one_cmpl2 (scratch2
, scratch1
));
23564 emit_insn (ix86_gen_add3 (out
, scratch2
, constm1_rtx
));
23569 /* For given symbol (function) construct code to compute address of it's PLT
23570 entry in large x86-64 PIC model. */
23572 construct_plt_address (rtx symbol
)
23576 gcc_assert (GET_CODE (symbol
) == SYMBOL_REF
);
23577 gcc_assert (ix86_cmodel
== CM_LARGE_PIC
);
23578 gcc_assert (Pmode
== DImode
);
23580 tmp
= gen_reg_rtx (Pmode
);
23581 unspec
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, symbol
), UNSPEC_PLTOFF
);
23583 emit_move_insn (tmp
, gen_rtx_CONST (Pmode
, unspec
));
23584 emit_insn (ix86_gen_add3 (tmp
, tmp
, pic_offset_table_rtx
));
23589 ix86_expand_call (rtx retval
, rtx fnaddr
, rtx callarg1
,
23591 rtx pop
, bool sibcall
)
23593 /* We need to represent that SI and DI registers are clobbered
23595 static int clobbered_registers
[] = {
23596 XMM6_REG
, XMM7_REG
, XMM8_REG
,
23597 XMM9_REG
, XMM10_REG
, XMM11_REG
,
23598 XMM12_REG
, XMM13_REG
, XMM14_REG
,
23599 XMM15_REG
, SI_REG
, DI_REG
23601 rtx vec
[ARRAY_SIZE (clobbered_registers
) + 3];
23602 rtx use
= NULL
, call
;
23603 unsigned int vec_len
;
23605 if (pop
== const0_rtx
)
23607 gcc_assert (!TARGET_64BIT
|| !pop
);
23609 if (TARGET_MACHO
&& !TARGET_64BIT
)
23612 if (flag_pic
&& GET_CODE (XEXP (fnaddr
, 0)) == SYMBOL_REF
)
23613 fnaddr
= machopic_indirect_call_target (fnaddr
);
23618 /* Static functions and indirect calls don't need the pic register. */
23619 if (flag_pic
&& (!TARGET_64BIT
|| ix86_cmodel
== CM_LARGE_PIC
)
23620 && GET_CODE (XEXP (fnaddr
, 0)) == SYMBOL_REF
23621 && ! SYMBOL_REF_LOCAL_P (XEXP (fnaddr
, 0)))
23622 use_reg (&use
, pic_offset_table_rtx
);
23625 if (TARGET_64BIT
&& INTVAL (callarg2
) >= 0)
23627 rtx al
= gen_rtx_REG (QImode
, AX_REG
);
23628 emit_move_insn (al
, callarg2
);
23629 use_reg (&use
, al
);
23632 if (ix86_cmodel
== CM_LARGE_PIC
23634 && GET_CODE (XEXP (fnaddr
, 0)) == SYMBOL_REF
23635 && !local_symbolic_operand (XEXP (fnaddr
, 0), VOIDmode
))
23636 fnaddr
= gen_rtx_MEM (QImode
, construct_plt_address (XEXP (fnaddr
, 0)));
23638 ? !sibcall_insn_operand (XEXP (fnaddr
, 0), word_mode
)
23639 : !call_insn_operand (XEXP (fnaddr
, 0), word_mode
))
23641 fnaddr
= convert_to_mode (word_mode
, XEXP (fnaddr
, 0), 1);
23642 fnaddr
= gen_rtx_MEM (QImode
, copy_to_mode_reg (word_mode
, fnaddr
));
23646 call
= gen_rtx_CALL (VOIDmode
, fnaddr
, callarg1
);
23648 call
= gen_rtx_SET (VOIDmode
, retval
, call
);
23649 vec
[vec_len
++] = call
;
23653 pop
= gen_rtx_PLUS (Pmode
, stack_pointer_rtx
, pop
);
23654 pop
= gen_rtx_SET (VOIDmode
, stack_pointer_rtx
, pop
);
23655 vec
[vec_len
++] = pop
;
23658 if (TARGET_64BIT_MS_ABI
23659 && (!callarg2
|| INTVAL (callarg2
) != -2))
23663 vec
[vec_len
++] = gen_rtx_UNSPEC (VOIDmode
, gen_rtvec (1, const0_rtx
),
23664 UNSPEC_MS_TO_SYSV_CALL
);
23666 for (i
= 0; i
< ARRAY_SIZE (clobbered_registers
); i
++)
23668 = gen_rtx_CLOBBER (VOIDmode
,
23669 gen_rtx_REG (SSE_REGNO_P (clobbered_registers
[i
])
23671 clobbered_registers
[i
]));
23675 call
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec_v (vec_len
, vec
));
23676 call
= emit_call_insn (call
);
23678 CALL_INSN_FUNCTION_USAGE (call
) = use
;
23683 /* Output the assembly for a call instruction. */
23686 ix86_output_call_insn (rtx insn
, rtx call_op
)
23688 bool direct_p
= constant_call_address_operand (call_op
, VOIDmode
);
23689 bool seh_nop_p
= false;
23692 if (SIBLING_CALL_P (insn
))
23696 /* SEH epilogue detection requires the indirect branch case
23697 to include REX.W. */
23698 else if (TARGET_SEH
)
23699 xasm
= "rex.W jmp %A0";
23703 output_asm_insn (xasm
, &call_op
);
23707 /* SEH unwinding can require an extra nop to be emitted in several
23708 circumstances. Determine if we have one of those. */
23713 for (i
= NEXT_INSN (insn
); i
; i
= NEXT_INSN (i
))
23715 /* If we get to another real insn, we don't need the nop. */
23719 /* If we get to the epilogue note, prevent a catch region from
23720 being adjacent to the standard epilogue sequence. If non-
23721 call-exceptions, we'll have done this during epilogue emission. */
23722 if (NOTE_P (i
) && NOTE_KIND (i
) == NOTE_INSN_EPILOGUE_BEG
23723 && !flag_non_call_exceptions
23724 && !can_throw_internal (insn
))
23731 /* If we didn't find a real insn following the call, prevent the
23732 unwinder from looking into the next function. */
23738 xasm
= "call\t%P0";
23740 xasm
= "call\t%A0";
23742 output_asm_insn (xasm
, &call_op
);
23750 /* Clear stack slot assignments remembered from previous functions.
23751 This is called from INIT_EXPANDERS once before RTL is emitted for each
23754 static struct machine_function
*
23755 ix86_init_machine_status (void)
23757 struct machine_function
*f
;
23759 f
= ggc_alloc_cleared_machine_function ();
23760 f
->use_fast_prologue_epilogue_nregs
= -1;
23761 f
->call_abi
= ix86_abi
;
23766 /* Return a MEM corresponding to a stack slot with mode MODE.
23767 Allocate a new slot if necessary.
23769 The RTL for a function can have several slots available: N is
23770 which slot to use. */
23773 assign_386_stack_local (enum machine_mode mode
, enum ix86_stack_slot n
)
23775 struct stack_local_entry
*s
;
23777 gcc_assert (n
< MAX_386_STACK_LOCALS
);
23779 for (s
= ix86_stack_locals
; s
; s
= s
->next
)
23780 if (s
->mode
== mode
&& s
->n
== n
)
23781 return validize_mem (copy_rtx (s
->rtl
));
23783 s
= ggc_alloc_stack_local_entry ();
23786 s
->rtl
= assign_stack_local (mode
, GET_MODE_SIZE (mode
), 0);
23788 s
->next
= ix86_stack_locals
;
23789 ix86_stack_locals
= s
;
23790 return validize_mem (s
->rtl
);
23794 ix86_instantiate_decls (void)
23796 struct stack_local_entry
*s
;
23798 for (s
= ix86_stack_locals
; s
; s
= s
->next
)
23799 if (s
->rtl
!= NULL_RTX
)
23800 instantiate_decl_rtl (s
->rtl
);
23803 /* Calculate the length of the memory address in the instruction encoding.
23804 Includes addr32 prefix, does not include the one-byte modrm, opcode,
23805 or other prefixes. We never generate addr32 prefix for LEA insn. */
23808 memory_address_length (rtx addr
, bool lea
)
23810 struct ix86_address parts
;
23811 rtx base
, index
, disp
;
23815 if (GET_CODE (addr
) == PRE_DEC
23816 || GET_CODE (addr
) == POST_INC
23817 || GET_CODE (addr
) == PRE_MODIFY
23818 || GET_CODE (addr
) == POST_MODIFY
)
23821 ok
= ix86_decompose_address (addr
, &parts
);
23824 len
= (parts
.seg
== SEG_DEFAULT
) ? 0 : 1;
23826 /* If this is not LEA instruction, add the length of addr32 prefix. */
23827 if (TARGET_64BIT
&& !lea
23828 && (SImode_address_operand (addr
, VOIDmode
)
23829 || (parts
.base
&& GET_MODE (parts
.base
) == SImode
)
23830 || (parts
.index
&& GET_MODE (parts
.index
) == SImode
)))
23834 index
= parts
.index
;
23837 if (base
&& GET_CODE (base
) == SUBREG
)
23838 base
= SUBREG_REG (base
);
23839 if (index
&& GET_CODE (index
) == SUBREG
)
23840 index
= SUBREG_REG (index
);
23842 gcc_assert (base
== NULL_RTX
|| REG_P (base
));
23843 gcc_assert (index
== NULL_RTX
|| REG_P (index
));
23846 - esp as the base always wants an index,
23847 - ebp as the base always wants a displacement,
23848 - r12 as the base always wants an index,
23849 - r13 as the base always wants a displacement. */
23851 /* Register Indirect. */
23852 if (base
&& !index
&& !disp
)
23854 /* esp (for its index) and ebp (for its displacement) need
23855 the two-byte modrm form. Similarly for r12 and r13 in 64-bit
23857 if (base
== arg_pointer_rtx
23858 || base
== frame_pointer_rtx
23859 || REGNO (base
) == SP_REG
23860 || REGNO (base
) == BP_REG
23861 || REGNO (base
) == R12_REG
23862 || REGNO (base
) == R13_REG
)
23866 /* Direct Addressing. In 64-bit mode mod 00 r/m 5
23867 is not disp32, but disp32(%rip), so for disp32
23868 SIB byte is needed, unless print_operand_address
23869 optimizes it into disp32(%rip) or (%rip) is implied
23871 else if (disp
&& !base
&& !index
)
23878 if (GET_CODE (disp
) == CONST
)
23879 symbol
= XEXP (disp
, 0);
23880 if (GET_CODE (symbol
) == PLUS
23881 && CONST_INT_P (XEXP (symbol
, 1)))
23882 symbol
= XEXP (symbol
, 0);
23884 if (GET_CODE (symbol
) != LABEL_REF
23885 && (GET_CODE (symbol
) != SYMBOL_REF
23886 || SYMBOL_REF_TLS_MODEL (symbol
) != 0)
23887 && (GET_CODE (symbol
) != UNSPEC
23888 || (XINT (symbol
, 1) != UNSPEC_GOTPCREL
23889 && XINT (symbol
, 1) != UNSPEC_PCREL
23890 && XINT (symbol
, 1) != UNSPEC_GOTNTPOFF
)))
23896 /* Find the length of the displacement constant. */
23899 if (base
&& satisfies_constraint_K (disp
))
23904 /* ebp always wants a displacement. Similarly r13. */
23905 else if (base
&& (REGNO (base
) == BP_REG
|| REGNO (base
) == R13_REG
))
23908 /* An index requires the two-byte modrm form.... */
23910 /* ...like esp (or r12), which always wants an index. */
23911 || base
== arg_pointer_rtx
23912 || base
== frame_pointer_rtx
23913 || (base
&& (REGNO (base
) == SP_REG
|| REGNO (base
) == R12_REG
)))
23920 /* Compute default value for "length_immediate" attribute. When SHORTFORM
23921 is set, expect that insn have 8bit immediate alternative. */
23923 ix86_attr_length_immediate_default (rtx insn
, bool shortform
)
23927 extract_insn_cached (insn
);
23928 for (i
= recog_data
.n_operands
- 1; i
>= 0; --i
)
23929 if (CONSTANT_P (recog_data
.operand
[i
]))
23931 enum attr_mode mode
= get_attr_mode (insn
);
23934 if (shortform
&& CONST_INT_P (recog_data
.operand
[i
]))
23936 HOST_WIDE_INT ival
= INTVAL (recog_data
.operand
[i
]);
23943 ival
= trunc_int_for_mode (ival
, HImode
);
23946 ival
= trunc_int_for_mode (ival
, SImode
);
23951 if (IN_RANGE (ival
, -128, 127))
23968 /* Immediates for DImode instructions are encoded
23969 as 32bit sign extended values. */
23974 fatal_insn ("unknown insn mode", insn
);
23980 /* Compute default value for "length_address" attribute. */
23982 ix86_attr_length_address_default (rtx insn
)
23986 if (get_attr_type (insn
) == TYPE_LEA
)
23988 rtx set
= PATTERN (insn
), addr
;
23990 if (GET_CODE (set
) == PARALLEL
)
23991 set
= XVECEXP (set
, 0, 0);
23993 gcc_assert (GET_CODE (set
) == SET
);
23995 addr
= SET_SRC (set
);
23997 return memory_address_length (addr
, true);
24000 extract_insn_cached (insn
);
24001 for (i
= recog_data
.n_operands
- 1; i
>= 0; --i
)
24002 if (MEM_P (recog_data
.operand
[i
]))
24004 constrain_operands_cached (reload_completed
);
24005 if (which_alternative
!= -1)
24007 const char *constraints
= recog_data
.constraints
[i
];
24008 int alt
= which_alternative
;
24010 while (*constraints
== '=' || *constraints
== '+')
24013 while (*constraints
++ != ',')
24015 /* Skip ignored operands. */
24016 if (*constraints
== 'X')
24019 return memory_address_length (XEXP (recog_data
.operand
[i
], 0), false);
24024 /* Compute default value for "length_vex" attribute. It includes
24025 2 or 3 byte VEX prefix and 1 opcode byte. */
24028 ix86_attr_length_vex_default (rtx insn
, bool has_0f_opcode
, bool has_vex_w
)
24032 /* Only 0f opcode can use 2 byte VEX prefix and VEX W bit uses 3
24033 byte VEX prefix. */
24034 if (!has_0f_opcode
|| has_vex_w
)
24037 /* We can always use 2 byte VEX prefix in 32bit. */
24041 extract_insn_cached (insn
);
24043 for (i
= recog_data
.n_operands
- 1; i
>= 0; --i
)
24044 if (REG_P (recog_data
.operand
[i
]))
24046 /* REX.W bit uses 3 byte VEX prefix. */
24047 if (GET_MODE (recog_data
.operand
[i
]) == DImode
24048 && GENERAL_REG_P (recog_data
.operand
[i
]))
24053 /* REX.X or REX.B bits use 3 byte VEX prefix. */
24054 if (MEM_P (recog_data
.operand
[i
])
24055 && x86_extended_reg_mentioned_p (recog_data
.operand
[i
]))
24062 /* Return the maximum number of instructions a cpu can issue. */
24065 ix86_issue_rate (void)
24069 case PROCESSOR_PENTIUM
:
24070 case PROCESSOR_ATOM
:
24072 case PROCESSOR_BTVER2
:
24075 case PROCESSOR_PENTIUMPRO
:
24076 case PROCESSOR_PENTIUM4
:
24077 case PROCESSOR_CORE2
:
24078 case PROCESSOR_COREI7
:
24079 case PROCESSOR_HASWELL
:
24080 case PROCESSOR_ATHLON
:
24082 case PROCESSOR_AMDFAM10
:
24083 case PROCESSOR_NOCONA
:
24084 case PROCESSOR_GENERIC32
:
24085 case PROCESSOR_GENERIC64
:
24086 case PROCESSOR_BDVER1
:
24087 case PROCESSOR_BDVER2
:
24088 case PROCESSOR_BDVER3
:
24089 case PROCESSOR_BTVER1
:
24097 /* A subroutine of ix86_adjust_cost -- return TRUE iff INSN reads flags set
24098 by DEP_INSN and nothing set by DEP_INSN. */
24101 ix86_flags_dependent (rtx insn
, rtx dep_insn
, enum attr_type insn_type
)
24105 /* Simplify the test for uninteresting insns. */
24106 if (insn_type
!= TYPE_SETCC
24107 && insn_type
!= TYPE_ICMOV
24108 && insn_type
!= TYPE_FCMOV
24109 && insn_type
!= TYPE_IBR
)
24112 if ((set
= single_set (dep_insn
)) != 0)
24114 set
= SET_DEST (set
);
24117 else if (GET_CODE (PATTERN (dep_insn
)) == PARALLEL
24118 && XVECLEN (PATTERN (dep_insn
), 0) == 2
24119 && GET_CODE (XVECEXP (PATTERN (dep_insn
), 0, 0)) == SET
24120 && GET_CODE (XVECEXP (PATTERN (dep_insn
), 0, 1)) == SET
)
24122 set
= SET_DEST (XVECEXP (PATTERN (dep_insn
), 0, 0));
24123 set2
= SET_DEST (XVECEXP (PATTERN (dep_insn
), 0, 0));
24128 if (!REG_P (set
) || REGNO (set
) != FLAGS_REG
)
24131 /* This test is true if the dependent insn reads the flags but
24132 not any other potentially set register. */
24133 if (!reg_overlap_mentioned_p (set
, PATTERN (insn
)))
24136 if (set2
&& reg_overlap_mentioned_p (set2
, PATTERN (insn
)))
24142 /* Return true iff USE_INSN has a memory address with operands set by
24146 ix86_agi_dependent (rtx set_insn
, rtx use_insn
)
24149 extract_insn_cached (use_insn
);
24150 for (i
= recog_data
.n_operands
- 1; i
>= 0; --i
)
24151 if (MEM_P (recog_data
.operand
[i
]))
24153 rtx addr
= XEXP (recog_data
.operand
[i
], 0);
24154 return modified_in_p (addr
, set_insn
) != 0;
24160 ix86_adjust_cost (rtx insn
, rtx link
, rtx dep_insn
, int cost
)
24162 enum attr_type insn_type
, dep_insn_type
;
24163 enum attr_memory memory
;
24165 int dep_insn_code_number
;
24167 /* Anti and output dependencies have zero cost on all CPUs. */
24168 if (REG_NOTE_KIND (link
) != 0)
24171 dep_insn_code_number
= recog_memoized (dep_insn
);
24173 /* If we can't recognize the insns, we can't really do anything. */
24174 if (dep_insn_code_number
< 0 || recog_memoized (insn
) < 0)
24177 insn_type
= get_attr_type (insn
);
24178 dep_insn_type
= get_attr_type (dep_insn
);
24182 case PROCESSOR_PENTIUM
:
24183 /* Address Generation Interlock adds a cycle of latency. */
24184 if (insn_type
== TYPE_LEA
)
24186 rtx addr
= PATTERN (insn
);
24188 if (GET_CODE (addr
) == PARALLEL
)
24189 addr
= XVECEXP (addr
, 0, 0);
24191 gcc_assert (GET_CODE (addr
) == SET
);
24193 addr
= SET_SRC (addr
);
24194 if (modified_in_p (addr
, dep_insn
))
24197 else if (ix86_agi_dependent (dep_insn
, insn
))
24200 /* ??? Compares pair with jump/setcc. */
24201 if (ix86_flags_dependent (insn
, dep_insn
, insn_type
))
24204 /* Floating point stores require value to be ready one cycle earlier. */
24205 if (insn_type
== TYPE_FMOV
24206 && get_attr_memory (insn
) == MEMORY_STORE
24207 && !ix86_agi_dependent (dep_insn
, insn
))
24211 case PROCESSOR_PENTIUMPRO
:
24212 memory
= get_attr_memory (insn
);
24214 /* INT->FP conversion is expensive. */
24215 if (get_attr_fp_int_src (dep_insn
))
24218 /* There is one cycle extra latency between an FP op and a store. */
24219 if (insn_type
== TYPE_FMOV
24220 && (set
= single_set (dep_insn
)) != NULL_RTX
24221 && (set2
= single_set (insn
)) != NULL_RTX
24222 && rtx_equal_p (SET_DEST (set
), SET_SRC (set2
))
24223 && MEM_P (SET_DEST (set2
)))
24226 /* Show ability of reorder buffer to hide latency of load by executing
24227 in parallel with previous instruction in case
24228 previous instruction is not needed to compute the address. */
24229 if ((memory
== MEMORY_LOAD
|| memory
== MEMORY_BOTH
)
24230 && !ix86_agi_dependent (dep_insn
, insn
))
24232 /* Claim moves to take one cycle, as core can issue one load
24233 at time and the next load can start cycle later. */
24234 if (dep_insn_type
== TYPE_IMOV
24235 || dep_insn_type
== TYPE_FMOV
)
24243 memory
= get_attr_memory (insn
);
24245 /* The esp dependency is resolved before the instruction is really
24247 if ((insn_type
== TYPE_PUSH
|| insn_type
== TYPE_POP
)
24248 && (dep_insn_type
== TYPE_PUSH
|| dep_insn_type
== TYPE_POP
))
24251 /* INT->FP conversion is expensive. */
24252 if (get_attr_fp_int_src (dep_insn
))
24255 /* Show ability of reorder buffer to hide latency of load by executing
24256 in parallel with previous instruction in case
24257 previous instruction is not needed to compute the address. */
24258 if ((memory
== MEMORY_LOAD
|| memory
== MEMORY_BOTH
)
24259 && !ix86_agi_dependent (dep_insn
, insn
))
24261 /* Claim moves to take one cycle, as core can issue one load
24262 at time and the next load can start cycle later. */
24263 if (dep_insn_type
== TYPE_IMOV
24264 || dep_insn_type
== TYPE_FMOV
)
24273 case PROCESSOR_ATHLON
:
24275 case PROCESSOR_AMDFAM10
:
24276 case PROCESSOR_BDVER1
:
24277 case PROCESSOR_BDVER2
:
24278 case PROCESSOR_BDVER3
:
24279 case PROCESSOR_BTVER1
:
24280 case PROCESSOR_BTVER2
:
24281 case PROCESSOR_ATOM
:
24282 case PROCESSOR_GENERIC32
:
24283 case PROCESSOR_GENERIC64
:
24284 memory
= get_attr_memory (insn
);
24286 /* Show ability of reorder buffer to hide latency of load by executing
24287 in parallel with previous instruction in case
24288 previous instruction is not needed to compute the address. */
24289 if ((memory
== MEMORY_LOAD
|| memory
== MEMORY_BOTH
)
24290 && !ix86_agi_dependent (dep_insn
, insn
))
24292 enum attr_unit unit
= get_attr_unit (insn
);
24295 /* Because of the difference between the length of integer and
24296 floating unit pipeline preparation stages, the memory operands
24297 for floating point are cheaper.
24299 ??? For Athlon it the difference is most probably 2. */
24300 if (unit
== UNIT_INTEGER
|| unit
== UNIT_UNKNOWN
)
24303 loadcost
= TARGET_ATHLON
? 2 : 0;
24305 if (cost
>= loadcost
)
24318 /* How many alternative schedules to try. This should be as wide as the
24319 scheduling freedom in the DFA, but no wider. Making this value too
24320 large results extra work for the scheduler. */
24323 ia32_multipass_dfa_lookahead (void)
24327 case PROCESSOR_PENTIUM
:
24330 case PROCESSOR_PENTIUMPRO
:
24334 case PROCESSOR_CORE2
:
24335 case PROCESSOR_COREI7
:
24336 case PROCESSOR_HASWELL
:
24337 case PROCESSOR_ATOM
:
24338 /* Generally, we want haifa-sched:max_issue() to look ahead as far
24339 as many instructions can be executed on a cycle, i.e.,
24340 issue_rate. I wonder why tuning for many CPUs does not do this. */
24341 if (reload_completed
)
24342 return ix86_issue_rate ();
24343 /* Don't use lookahead for pre-reload schedule to save compile time. */
24351 /* Try to reorder ready list to take advantage of Atom pipelined IMUL
24352 execution. It is applied if
24353 (1) IMUL instruction is on the top of list;
24354 (2) There exists the only producer of independent IMUL instruction in
24356 (3) Put found producer on the top of ready list.
24357 Returns issue rate. */
24360 ix86_sched_reorder(FILE *dump
, int sched_verbose
, rtx
*ready
, int *pn_ready
,
24361 int clock_var ATTRIBUTE_UNUSED
)
24363 static int issue_rate
= -1;
24364 int n_ready
= *pn_ready
;
24365 rtx insn
, insn1
, insn2
;
24367 sd_iterator_def sd_it
;
24371 /* Set up issue rate. */
24372 issue_rate
= ix86_issue_rate();
24374 /* Do reodering for Atom only. */
24375 if (ix86_tune
!= PROCESSOR_ATOM
)
24377 /* Do not perform ready list reodering for pre-reload schedule pass. */
24378 if (!reload_completed
)
24380 /* Nothing to do if ready list contains only 1 instruction. */
24384 /* Check that IMUL instruction is on the top of ready list. */
24385 insn
= ready
[n_ready
- 1];
24386 if (!NONDEBUG_INSN_P (insn
))
24388 insn
= PATTERN (insn
);
24389 if (GET_CODE (insn
) == PARALLEL
)
24390 insn
= XVECEXP (insn
, 0, 0);
24391 if (GET_CODE (insn
) != SET
)
24393 if (!(GET_CODE (SET_SRC (insn
)) == MULT
24394 && GET_MODE (SET_SRC (insn
)) == SImode
))
24397 /* Search for producer of independent IMUL instruction. */
24398 for (i
= n_ready
- 2; i
>= 0; i
--)
24401 if (!NONDEBUG_INSN_P (insn
))
24403 /* Skip IMUL instruction. */
24404 insn2
= PATTERN (insn
);
24405 if (GET_CODE (insn2
) == PARALLEL
)
24406 insn2
= XVECEXP (insn2
, 0, 0);
24407 if (GET_CODE (insn2
) == SET
24408 && GET_CODE (SET_SRC (insn2
)) == MULT
24409 && GET_MODE (SET_SRC (insn2
)) == SImode
)
24412 FOR_EACH_DEP (insn
, SD_LIST_FORW
, sd_it
, dep
)
24415 con
= DEP_CON (dep
);
24416 if (!NONDEBUG_INSN_P (con
))
24418 insn1
= PATTERN (con
);
24419 if (GET_CODE (insn1
) == PARALLEL
)
24420 insn1
= XVECEXP (insn1
, 0, 0);
24422 if (GET_CODE (insn1
) == SET
24423 && GET_CODE (SET_SRC (insn1
)) == MULT
24424 && GET_MODE (SET_SRC (insn1
)) == SImode
)
24426 sd_iterator_def sd_it1
;
24428 /* Check if there is no other dependee for IMUL. */
24430 FOR_EACH_DEP (con
, SD_LIST_BACK
, sd_it1
, dep1
)
24433 pro
= DEP_PRO (dep1
);
24434 if (!NONDEBUG_INSN_P (pro
))
24447 return issue_rate
; /* Didn't find IMUL producer. */
24449 if (sched_verbose
> 1)
24450 fprintf(dump
, ";;\tatom sched_reorder: swap %d and %d insns\n",
24451 INSN_UID (ready
[index
]), INSN_UID (ready
[n_ready
- 1]));
24453 /* Put IMUL producer (ready[index]) at the top of ready list. */
24454 insn1
= ready
[index
];
24455 for (i
= index
; i
< n_ready
- 1; i
++)
24456 ready
[i
] = ready
[i
+ 1];
24457 ready
[n_ready
- 1] = insn1
;
24463 ix86_class_likely_spilled_p (reg_class_t
);
24465 /* Returns true if lhs of insn is HW function argument register and set up
24466 is_spilled to true if it is likely spilled HW register. */
24468 insn_is_function_arg (rtx insn
, bool* is_spilled
)
24472 if (!NONDEBUG_INSN_P (insn
))
24474 /* Call instructions are not movable, ignore it. */
24477 insn
= PATTERN (insn
);
24478 if (GET_CODE (insn
) == PARALLEL
)
24479 insn
= XVECEXP (insn
, 0, 0);
24480 if (GET_CODE (insn
) != SET
)
24482 dst
= SET_DEST (insn
);
24483 if (REG_P (dst
) && HARD_REGISTER_P (dst
)
24484 && ix86_function_arg_regno_p (REGNO (dst
)))
24486 /* Is it likely spilled HW register? */
24487 if (!TEST_HARD_REG_BIT (fixed_reg_set
, REGNO (dst
))
24488 && ix86_class_likely_spilled_p (REGNO_REG_CLASS (REGNO (dst
))))
24489 *is_spilled
= true;
24495 /* Add output dependencies for chain of function adjacent arguments if only
24496 there is a move to likely spilled HW register. Return first argument
24497 if at least one dependence was added or NULL otherwise. */
24499 add_parameter_dependencies (rtx call
, rtx head
)
24503 rtx first_arg
= NULL
;
24504 bool is_spilled
= false;
24506 head
= PREV_INSN (head
);
24508 /* Find nearest to call argument passing instruction. */
24511 last
= PREV_INSN (last
);
24514 if (!NONDEBUG_INSN_P (last
))
24516 if (insn_is_function_arg (last
, &is_spilled
))
24524 insn
= PREV_INSN (last
);
24525 if (!INSN_P (insn
))
24529 if (!NONDEBUG_INSN_P (insn
))
24534 if (insn_is_function_arg (insn
, &is_spilled
))
24536 /* Add output depdendence between two function arguments if chain
24537 of output arguments contains likely spilled HW registers. */
24539 add_dependence (last
, insn
, REG_DEP_OUTPUT
);
24540 first_arg
= last
= insn
;
24550 /* Add output or anti dependency from insn to first_arg to restrict its code
24553 avoid_func_arg_motion (rtx first_arg
, rtx insn
)
24558 set
= single_set (insn
);
24561 tmp
= SET_DEST (set
);
24564 /* Add output dependency to the first function argument. */
24565 add_dependence (first_arg
, insn
, REG_DEP_OUTPUT
);
24568 /* Add anti dependency. */
24569 add_dependence (first_arg
, insn
, REG_DEP_ANTI
);
24572 /* Avoid cross block motion of function argument through adding dependency
24573 from the first non-jump instruction in bb. */
24575 add_dependee_for_func_arg (rtx arg
, basic_block bb
)
24577 rtx insn
= BB_END (bb
);
24581 if (NONDEBUG_INSN_P (insn
) && NONJUMP_INSN_P (insn
))
24583 rtx set
= single_set (insn
);
24586 avoid_func_arg_motion (arg
, insn
);
24590 if (insn
== BB_HEAD (bb
))
24592 insn
= PREV_INSN (insn
);
24596 /* Hook for pre-reload schedule - avoid motion of function arguments
24597 passed in likely spilled HW registers. */
24599 ix86_dependencies_evaluation_hook (rtx head
, rtx tail
)
24602 rtx first_arg
= NULL
;
24603 if (reload_completed
)
24605 while (head
!= tail
&& DEBUG_INSN_P (head
))
24606 head
= NEXT_INSN (head
);
24607 for (insn
= tail
; insn
!= head
; insn
= PREV_INSN (insn
))
24608 if (INSN_P (insn
) && CALL_P (insn
))
24610 first_arg
= add_parameter_dependencies (insn
, head
);
24613 /* Add dependee for first argument to predecessors if only
24614 region contains more than one block. */
24615 basic_block bb
= BLOCK_FOR_INSN (insn
);
24616 int rgn
= CONTAINING_RGN (bb
->index
);
24617 int nr_blks
= RGN_NR_BLOCKS (rgn
);
24618 /* Skip trivial regions and region head blocks that can have
24619 predecessors outside of region. */
24620 if (nr_blks
> 1 && BLOCK_TO_BB (bb
->index
) != 0)
24624 /* Assume that region is SCC, i.e. all immediate predecessors
24625 of non-head block are in the same region. */
24626 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
24628 /* Avoid creating of loop-carried dependencies through
24629 using topological odering in region. */
24630 if (BLOCK_TO_BB (bb
->index
) > BLOCK_TO_BB (e
->src
->index
))
24631 add_dependee_for_func_arg (first_arg
, e
->src
);
24639 else if (first_arg
)
24640 avoid_func_arg_motion (first_arg
, insn
);
24643 /* Hook for pre-reload schedule - set priority of moves from likely spilled
24644 HW registers to maximum, to schedule them at soon as possible. These are
24645 moves from function argument registers at the top of the function entry
24646 and moves from function return value registers after call. */
24648 ix86_adjust_priority (rtx insn
, int priority
)
24652 if (reload_completed
)
24655 if (!NONDEBUG_INSN_P (insn
))
24658 set
= single_set (insn
);
24661 rtx tmp
= SET_SRC (set
);
24663 && HARD_REGISTER_P (tmp
)
24664 && !TEST_HARD_REG_BIT (fixed_reg_set
, REGNO (tmp
))
24665 && ix86_class_likely_spilled_p (REGNO_REG_CLASS (REGNO (tmp
))))
24666 return current_sched_info
->sched_max_insns_priority
;
24672 /* Model decoder of Core 2/i7.
24673 Below hooks for multipass scheduling (see haifa-sched.c:max_issue)
24674 track the instruction fetch block boundaries and make sure that long
24675 (9+ bytes) instructions are assigned to D0. */
24677 /* Maximum length of an insn that can be handled by
24678 a secondary decoder unit. '8' for Core 2/i7. */
24679 static int core2i7_secondary_decoder_max_insn_size
;
24681 /* Ifetch block size, i.e., number of bytes decoder reads per cycle.
24682 '16' for Core 2/i7. */
24683 static int core2i7_ifetch_block_size
;
24685 /* Maximum number of instructions decoder can handle per cycle.
24686 '6' for Core 2/i7. */
24687 static int core2i7_ifetch_block_max_insns
;
24689 typedef struct ix86_first_cycle_multipass_data_
*
24690 ix86_first_cycle_multipass_data_t
;
24691 typedef const struct ix86_first_cycle_multipass_data_
*
24692 const_ix86_first_cycle_multipass_data_t
;
24694 /* A variable to store target state across calls to max_issue within
24696 static struct ix86_first_cycle_multipass_data_ _ix86_first_cycle_multipass_data
,
24697 *ix86_first_cycle_multipass_data
= &_ix86_first_cycle_multipass_data
;
24699 /* Initialize DATA. */
24701 core2i7_first_cycle_multipass_init (void *_data
)
24703 ix86_first_cycle_multipass_data_t data
24704 = (ix86_first_cycle_multipass_data_t
) _data
;
24706 data
->ifetch_block_len
= 0;
24707 data
->ifetch_block_n_insns
= 0;
24708 data
->ready_try_change
= NULL
;
24709 data
->ready_try_change_size
= 0;
24712 /* Advancing the cycle; reset ifetch block counts. */
24714 core2i7_dfa_post_advance_cycle (void)
24716 ix86_first_cycle_multipass_data_t data
= ix86_first_cycle_multipass_data
;
24718 gcc_assert (data
->ifetch_block_n_insns
<= core2i7_ifetch_block_max_insns
);
24720 data
->ifetch_block_len
= 0;
24721 data
->ifetch_block_n_insns
= 0;
24724 static int min_insn_size (rtx
);
24726 /* Filter out insns from ready_try that the core will not be able to issue
24727 on current cycle due to decoder. */
24729 core2i7_first_cycle_multipass_filter_ready_try
24730 (const_ix86_first_cycle_multipass_data_t data
,
24731 char *ready_try
, int n_ready
, bool first_cycle_insn_p
)
24738 if (ready_try
[n_ready
])
24741 insn
= get_ready_element (n_ready
);
24742 insn_size
= min_insn_size (insn
);
24744 if (/* If this is a too long an insn for a secondary decoder ... */
24745 (!first_cycle_insn_p
24746 && insn_size
> core2i7_secondary_decoder_max_insn_size
)
24747 /* ... or it would not fit into the ifetch block ... */
24748 || data
->ifetch_block_len
+ insn_size
> core2i7_ifetch_block_size
24749 /* ... or the decoder is full already ... */
24750 || data
->ifetch_block_n_insns
+ 1 > core2i7_ifetch_block_max_insns
)
24751 /* ... mask the insn out. */
24753 ready_try
[n_ready
] = 1;
24755 if (data
->ready_try_change
)
24756 bitmap_set_bit (data
->ready_try_change
, n_ready
);
24761 /* Prepare for a new round of multipass lookahead scheduling. */
24763 core2i7_first_cycle_multipass_begin (void *_data
, char *ready_try
, int n_ready
,
24764 bool first_cycle_insn_p
)
24766 ix86_first_cycle_multipass_data_t data
24767 = (ix86_first_cycle_multipass_data_t
) _data
;
24768 const_ix86_first_cycle_multipass_data_t prev_data
24769 = ix86_first_cycle_multipass_data
;
24771 /* Restore the state from the end of the previous round. */
24772 data
->ifetch_block_len
= prev_data
->ifetch_block_len
;
24773 data
->ifetch_block_n_insns
= prev_data
->ifetch_block_n_insns
;
24775 /* Filter instructions that cannot be issued on current cycle due to
24776 decoder restrictions. */
24777 core2i7_first_cycle_multipass_filter_ready_try (data
, ready_try
, n_ready
,
24778 first_cycle_insn_p
);
24781 /* INSN is being issued in current solution. Account for its impact on
24782 the decoder model. */
24784 core2i7_first_cycle_multipass_issue (void *_data
, char *ready_try
, int n_ready
,
24785 rtx insn
, const void *_prev_data
)
24787 ix86_first_cycle_multipass_data_t data
24788 = (ix86_first_cycle_multipass_data_t
) _data
;
24789 const_ix86_first_cycle_multipass_data_t prev_data
24790 = (const_ix86_first_cycle_multipass_data_t
) _prev_data
;
24792 int insn_size
= min_insn_size (insn
);
24794 data
->ifetch_block_len
= prev_data
->ifetch_block_len
+ insn_size
;
24795 data
->ifetch_block_n_insns
= prev_data
->ifetch_block_n_insns
+ 1;
24796 gcc_assert (data
->ifetch_block_len
<= core2i7_ifetch_block_size
24797 && data
->ifetch_block_n_insns
<= core2i7_ifetch_block_max_insns
);
24799 /* Allocate or resize the bitmap for storing INSN's effect on ready_try. */
24800 if (!data
->ready_try_change
)
24802 data
->ready_try_change
= sbitmap_alloc (n_ready
);
24803 data
->ready_try_change_size
= n_ready
;
24805 else if (data
->ready_try_change_size
< n_ready
)
24807 data
->ready_try_change
= sbitmap_resize (data
->ready_try_change
,
24809 data
->ready_try_change_size
= n_ready
;
24811 bitmap_clear (data
->ready_try_change
);
24813 /* Filter out insns from ready_try that the core will not be able to issue
24814 on current cycle due to decoder. */
24815 core2i7_first_cycle_multipass_filter_ready_try (data
, ready_try
, n_ready
,
24819 /* Revert the effect on ready_try. */
24821 core2i7_first_cycle_multipass_backtrack (const void *_data
,
24823 int n_ready ATTRIBUTE_UNUSED
)
24825 const_ix86_first_cycle_multipass_data_t data
24826 = (const_ix86_first_cycle_multipass_data_t
) _data
;
24827 unsigned int i
= 0;
24828 sbitmap_iterator sbi
;
24830 gcc_assert (bitmap_last_set_bit (data
->ready_try_change
) < n_ready
);
24831 EXECUTE_IF_SET_IN_BITMAP (data
->ready_try_change
, 0, i
, sbi
)
24837 /* Save the result of multipass lookahead scheduling for the next round. */
24839 core2i7_first_cycle_multipass_end (const void *_data
)
24841 const_ix86_first_cycle_multipass_data_t data
24842 = (const_ix86_first_cycle_multipass_data_t
) _data
;
24843 ix86_first_cycle_multipass_data_t next_data
24844 = ix86_first_cycle_multipass_data
;
24848 next_data
->ifetch_block_len
= data
->ifetch_block_len
;
24849 next_data
->ifetch_block_n_insns
= data
->ifetch_block_n_insns
;
24853 /* Deallocate target data. */
24855 core2i7_first_cycle_multipass_fini (void *_data
)
24857 ix86_first_cycle_multipass_data_t data
24858 = (ix86_first_cycle_multipass_data_t
) _data
;
24860 if (data
->ready_try_change
)
24862 sbitmap_free (data
->ready_try_change
);
24863 data
->ready_try_change
= NULL
;
24864 data
->ready_try_change_size
= 0;
24868 /* Prepare for scheduling pass. */
24870 ix86_sched_init_global (FILE *dump ATTRIBUTE_UNUSED
,
24871 int verbose ATTRIBUTE_UNUSED
,
24872 int max_uid ATTRIBUTE_UNUSED
)
24874 /* Install scheduling hooks for current CPU. Some of these hooks are used
24875 in time-critical parts of the scheduler, so we only set them up when
24876 they are actually used. */
24879 case PROCESSOR_CORE2
:
24880 case PROCESSOR_COREI7
:
24881 case PROCESSOR_HASWELL
:
24882 /* Do not perform multipass scheduling for pre-reload schedule
24883 to save compile time. */
24884 if (reload_completed
)
24886 targetm
.sched
.dfa_post_advance_cycle
24887 = core2i7_dfa_post_advance_cycle
;
24888 targetm
.sched
.first_cycle_multipass_init
24889 = core2i7_first_cycle_multipass_init
;
24890 targetm
.sched
.first_cycle_multipass_begin
24891 = core2i7_first_cycle_multipass_begin
;
24892 targetm
.sched
.first_cycle_multipass_issue
24893 = core2i7_first_cycle_multipass_issue
;
24894 targetm
.sched
.first_cycle_multipass_backtrack
24895 = core2i7_first_cycle_multipass_backtrack
;
24896 targetm
.sched
.first_cycle_multipass_end
24897 = core2i7_first_cycle_multipass_end
;
24898 targetm
.sched
.first_cycle_multipass_fini
24899 = core2i7_first_cycle_multipass_fini
;
24901 /* Set decoder parameters. */
24902 core2i7_secondary_decoder_max_insn_size
= 8;
24903 core2i7_ifetch_block_size
= 16;
24904 core2i7_ifetch_block_max_insns
= 6;
24907 /* ... Fall through ... */
24909 targetm
.sched
.dfa_post_advance_cycle
= NULL
;
24910 targetm
.sched
.first_cycle_multipass_init
= NULL
;
24911 targetm
.sched
.first_cycle_multipass_begin
= NULL
;
24912 targetm
.sched
.first_cycle_multipass_issue
= NULL
;
24913 targetm
.sched
.first_cycle_multipass_backtrack
= NULL
;
24914 targetm
.sched
.first_cycle_multipass_end
= NULL
;
24915 targetm
.sched
.first_cycle_multipass_fini
= NULL
;
24921 /* Compute the alignment given to a constant that is being placed in memory.
24922 EXP is the constant and ALIGN is the alignment that the object would
24924 The value of this function is used instead of that alignment to align
24928 ix86_constant_alignment (tree exp
, int align
)
24930 if (TREE_CODE (exp
) == REAL_CST
|| TREE_CODE (exp
) == VECTOR_CST
24931 || TREE_CODE (exp
) == INTEGER_CST
)
24933 if (TYPE_MODE (TREE_TYPE (exp
)) == DFmode
&& align
< 64)
24935 else if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (exp
))) && align
< 128)
24938 else if (!optimize_size
&& TREE_CODE (exp
) == STRING_CST
24939 && TREE_STRING_LENGTH (exp
) >= 31 && align
< BITS_PER_WORD
)
24940 return BITS_PER_WORD
;
24945 /* Compute the alignment for a static variable.
24946 TYPE is the data type, and ALIGN is the alignment that
24947 the object would ordinarily have. The value of this function is used
24948 instead of that alignment to align the object. */
24951 ix86_data_alignment (tree type
, int align
)
24953 int max_align
= optimize_size
? BITS_PER_WORD
: MIN (256, MAX_OFILE_ALIGNMENT
);
24955 if (AGGREGATE_TYPE_P (type
)
24956 && TYPE_SIZE (type
)
24957 && TREE_CODE (TYPE_SIZE (type
)) == INTEGER_CST
24958 && (TREE_INT_CST_LOW (TYPE_SIZE (type
)) >= (unsigned) max_align
24959 || TREE_INT_CST_HIGH (TYPE_SIZE (type
)))
24960 && align
< max_align
)
24963 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
24964 to 16byte boundary. */
24967 if (AGGREGATE_TYPE_P (type
)
24968 && TYPE_SIZE (type
)
24969 && TREE_CODE (TYPE_SIZE (type
)) == INTEGER_CST
24970 && (TREE_INT_CST_LOW (TYPE_SIZE (type
)) >= 128
24971 || TREE_INT_CST_HIGH (TYPE_SIZE (type
))) && align
< 128)
24975 if (TREE_CODE (type
) == ARRAY_TYPE
)
24977 if (TYPE_MODE (TREE_TYPE (type
)) == DFmode
&& align
< 64)
24979 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type
))) && align
< 128)
24982 else if (TREE_CODE (type
) == COMPLEX_TYPE
)
24985 if (TYPE_MODE (type
) == DCmode
&& align
< 64)
24987 if ((TYPE_MODE (type
) == XCmode
24988 || TYPE_MODE (type
) == TCmode
) && align
< 128)
24991 else if ((TREE_CODE (type
) == RECORD_TYPE
24992 || TREE_CODE (type
) == UNION_TYPE
24993 || TREE_CODE (type
) == QUAL_UNION_TYPE
)
24994 && TYPE_FIELDS (type
))
24996 if (DECL_MODE (TYPE_FIELDS (type
)) == DFmode
&& align
< 64)
24998 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type
))) && align
< 128)
25001 else if (TREE_CODE (type
) == REAL_TYPE
|| TREE_CODE (type
) == VECTOR_TYPE
25002 || TREE_CODE (type
) == INTEGER_TYPE
)
25004 if (TYPE_MODE (type
) == DFmode
&& align
< 64)
25006 if (ALIGN_MODE_128 (TYPE_MODE (type
)) && align
< 128)
25013 /* Compute the alignment for a local variable or a stack slot. EXP is
25014 the data type or decl itself, MODE is the widest mode available and
25015 ALIGN is the alignment that the object would ordinarily have. The
25016 value of this macro is used instead of that alignment to align the
25020 ix86_local_alignment (tree exp
, enum machine_mode mode
,
25021 unsigned int align
)
25025 if (exp
&& DECL_P (exp
))
25027 type
= TREE_TYPE (exp
);
25036 /* Don't do dynamic stack realignment for long long objects with
25037 -mpreferred-stack-boundary=2. */
25040 && ix86_preferred_stack_boundary
< 64
25041 && (mode
== DImode
|| (type
&& TYPE_MODE (type
) == DImode
))
25042 && (!type
|| !TYPE_USER_ALIGN (type
))
25043 && (!decl
|| !DECL_USER_ALIGN (decl
)))
25046 /* If TYPE is NULL, we are allocating a stack slot for caller-save
25047 register in MODE. We will return the largest alignment of XF
25051 if (mode
== XFmode
&& align
< GET_MODE_ALIGNMENT (DFmode
))
25052 align
= GET_MODE_ALIGNMENT (DFmode
);
25056 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
25057 to 16byte boundary. Exact wording is:
25059 An array uses the same alignment as its elements, except that a local or
25060 global array variable of length at least 16 bytes or
25061 a C99 variable-length array variable always has alignment of at least 16 bytes.
25063 This was added to allow use of aligned SSE instructions at arrays. This
25064 rule is meant for static storage (where compiler can not do the analysis
25065 by itself). We follow it for automatic variables only when convenient.
25066 We fully control everything in the function compiled and functions from
25067 other unit can not rely on the alignment.
25069 Exclude va_list type. It is the common case of local array where
25070 we can not benefit from the alignment. */
25071 if (TARGET_64BIT
&& optimize_function_for_speed_p (cfun
)
25074 if (AGGREGATE_TYPE_P (type
)
25075 && (va_list_type_node
== NULL_TREE
25076 || (TYPE_MAIN_VARIANT (type
)
25077 != TYPE_MAIN_VARIANT (va_list_type_node
)))
25078 && TYPE_SIZE (type
)
25079 && TREE_CODE (TYPE_SIZE (type
)) == INTEGER_CST
25080 && (TREE_INT_CST_LOW (TYPE_SIZE (type
)) >= 16
25081 || TREE_INT_CST_HIGH (TYPE_SIZE (type
))) && align
< 128)
25084 if (TREE_CODE (type
) == ARRAY_TYPE
)
25086 if (TYPE_MODE (TREE_TYPE (type
)) == DFmode
&& align
< 64)
25088 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type
))) && align
< 128)
25091 else if (TREE_CODE (type
) == COMPLEX_TYPE
)
25093 if (TYPE_MODE (type
) == DCmode
&& align
< 64)
25095 if ((TYPE_MODE (type
) == XCmode
25096 || TYPE_MODE (type
) == TCmode
) && align
< 128)
25099 else if ((TREE_CODE (type
) == RECORD_TYPE
25100 || TREE_CODE (type
) == UNION_TYPE
25101 || TREE_CODE (type
) == QUAL_UNION_TYPE
)
25102 && TYPE_FIELDS (type
))
25104 if (DECL_MODE (TYPE_FIELDS (type
)) == DFmode
&& align
< 64)
25106 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type
))) && align
< 128)
25109 else if (TREE_CODE (type
) == REAL_TYPE
|| TREE_CODE (type
) == VECTOR_TYPE
25110 || TREE_CODE (type
) == INTEGER_TYPE
)
25113 if (TYPE_MODE (type
) == DFmode
&& align
< 64)
25115 if (ALIGN_MODE_128 (TYPE_MODE (type
)) && align
< 128)
25121 /* Compute the minimum required alignment for dynamic stack realignment
25122 purposes for a local variable, parameter or a stack slot. EXP is
25123 the data type or decl itself, MODE is its mode and ALIGN is the
25124 alignment that the object would ordinarily have. */
25127 ix86_minimum_alignment (tree exp
, enum machine_mode mode
,
25128 unsigned int align
)
25132 if (exp
&& DECL_P (exp
))
25134 type
= TREE_TYPE (exp
);
25143 if (TARGET_64BIT
|| align
!= 64 || ix86_preferred_stack_boundary
>= 64)
25146 /* Don't do dynamic stack realignment for long long objects with
25147 -mpreferred-stack-boundary=2. */
25148 if ((mode
== DImode
|| (type
&& TYPE_MODE (type
) == DImode
))
25149 && (!type
|| !TYPE_USER_ALIGN (type
))
25150 && (!decl
|| !DECL_USER_ALIGN (decl
)))
25156 /* Find a location for the static chain incoming to a nested function.
25157 This is a register, unless all free registers are used by arguments. */
25160 ix86_static_chain (const_tree fndecl
, bool incoming_p
)
25164 if (!DECL_STATIC_CHAIN (fndecl
))
25169 /* We always use R10 in 64-bit mode. */
25177 /* By default in 32-bit mode we use ECX to pass the static chain. */
25180 fntype
= TREE_TYPE (fndecl
);
25181 ccvt
= ix86_get_callcvt (fntype
);
25182 if ((ccvt
& IX86_CALLCVT_FASTCALL
) != 0)
25184 /* Fastcall functions use ecx/edx for arguments, which leaves
25185 us with EAX for the static chain.
25186 Thiscall functions use ecx for arguments, which also
25187 leaves us with EAX for the static chain. */
25190 else if ((ccvt
& IX86_CALLCVT_THISCALL
) != 0)
25192 /* Thiscall functions use ecx for arguments, which leaves
25193 us with EAX and EDX for the static chain.
25194 We are using for abi-compatibility EAX. */
25197 else if (ix86_function_regparm (fntype
, fndecl
) == 3)
25199 /* For regparm 3, we have no free call-clobbered registers in
25200 which to store the static chain. In order to implement this,
25201 we have the trampoline push the static chain to the stack.
25202 However, we can't push a value below the return address when
25203 we call the nested function directly, so we have to use an
25204 alternate entry point. For this we use ESI, and have the
25205 alternate entry point push ESI, so that things appear the
25206 same once we're executing the nested function. */
25209 if (fndecl
== current_function_decl
)
25210 ix86_static_chain_on_stack
= true;
25211 return gen_frame_mem (SImode
,
25212 plus_constant (Pmode
,
25213 arg_pointer_rtx
, -8));
25219 return gen_rtx_REG (Pmode
, regno
);
25222 /* Emit RTL insns to initialize the variable parts of a trampoline.
25223 FNDECL is the decl of the target address; M_TRAMP is a MEM for
25224 the trampoline, and CHAIN_VALUE is an RTX for the static chain
25225 to be passed to the target function. */
25228 ix86_trampoline_init (rtx m_tramp
, tree fndecl
, rtx chain_value
)
25234 fnaddr
= XEXP (DECL_RTL (fndecl
), 0);
25240 /* Load the function address to r11. Try to load address using
25241 the shorter movl instead of movabs. We may want to support
25242 movq for kernel mode, but kernel does not use trampolines at
25243 the moment. FNADDR is a 32bit address and may not be in
25244 DImode when ptr_mode == SImode. Always use movl in this
25246 if (ptr_mode
== SImode
25247 || x86_64_zext_immediate_operand (fnaddr
, VOIDmode
))
25249 fnaddr
= copy_addr_to_reg (fnaddr
);
25251 mem
= adjust_address (m_tramp
, HImode
, offset
);
25252 emit_move_insn (mem
, gen_int_mode (0xbb41, HImode
));
25254 mem
= adjust_address (m_tramp
, SImode
, offset
+ 2);
25255 emit_move_insn (mem
, gen_lowpart (SImode
, fnaddr
));
25260 mem
= adjust_address (m_tramp
, HImode
, offset
);
25261 emit_move_insn (mem
, gen_int_mode (0xbb49, HImode
));
25263 mem
= adjust_address (m_tramp
, DImode
, offset
+ 2);
25264 emit_move_insn (mem
, fnaddr
);
25268 /* Load static chain using movabs to r10. Use the shorter movl
25269 instead of movabs when ptr_mode == SImode. */
25270 if (ptr_mode
== SImode
)
25281 mem
= adjust_address (m_tramp
, HImode
, offset
);
25282 emit_move_insn (mem
, gen_int_mode (opcode
, HImode
));
25284 mem
= adjust_address (m_tramp
, ptr_mode
, offset
+ 2);
25285 emit_move_insn (mem
, chain_value
);
25288 /* Jump to r11; the last (unused) byte is a nop, only there to
25289 pad the write out to a single 32-bit store. */
25290 mem
= adjust_address (m_tramp
, SImode
, offset
);
25291 emit_move_insn (mem
, gen_int_mode (0x90e3ff49, SImode
));
25298 /* Depending on the static chain location, either load a register
25299 with a constant, or push the constant to the stack. All of the
25300 instructions are the same size. */
25301 chain
= ix86_static_chain (fndecl
, true);
25304 switch (REGNO (chain
))
25307 opcode
= 0xb8; break;
25309 opcode
= 0xb9; break;
25311 gcc_unreachable ();
25317 mem
= adjust_address (m_tramp
, QImode
, offset
);
25318 emit_move_insn (mem
, gen_int_mode (opcode
, QImode
));
25320 mem
= adjust_address (m_tramp
, SImode
, offset
+ 1);
25321 emit_move_insn (mem
, chain_value
);
25324 mem
= adjust_address (m_tramp
, QImode
, offset
);
25325 emit_move_insn (mem
, gen_int_mode (0xe9, QImode
));
25327 mem
= adjust_address (m_tramp
, SImode
, offset
+ 1);
25329 /* Compute offset from the end of the jmp to the target function.
25330 In the case in which the trampoline stores the static chain on
25331 the stack, we need to skip the first insn which pushes the
25332 (call-saved) register static chain; this push is 1 byte. */
25334 disp
= expand_binop (SImode
, sub_optab
, fnaddr
,
25335 plus_constant (Pmode
, XEXP (m_tramp
, 0),
25336 offset
- (MEM_P (chain
) ? 1 : 0)),
25337 NULL_RTX
, 1, OPTAB_DIRECT
);
25338 emit_move_insn (mem
, disp
);
25341 gcc_assert (offset
<= TRAMPOLINE_SIZE
);
25343 #ifdef HAVE_ENABLE_EXECUTE_STACK
25344 #ifdef CHECK_EXECUTE_STACK_ENABLED
25345 if (CHECK_EXECUTE_STACK_ENABLED
)
25347 emit_library_call (gen_rtx_SYMBOL_REF (Pmode
, "__enable_execute_stack"),
25348 LCT_NORMAL
, VOIDmode
, 1, XEXP (m_tramp
, 0), Pmode
);
25352 /* The following file contains several enumerations and data structures
25353 built from the definitions in i386-builtin-types.def. */
25355 #include "i386-builtin-types.inc"
25357 /* Table for the ix86 builtin non-function types. */
25358 static GTY(()) tree ix86_builtin_type_tab
[(int) IX86_BT_LAST_CPTR
+ 1];
25360 /* Retrieve an element from the above table, building some of
25361 the types lazily. */
25364 ix86_get_builtin_type (enum ix86_builtin_type tcode
)
25366 unsigned int index
;
25369 gcc_assert ((unsigned)tcode
< ARRAY_SIZE(ix86_builtin_type_tab
));
25371 type
= ix86_builtin_type_tab
[(int) tcode
];
25375 gcc_assert (tcode
> IX86_BT_LAST_PRIM
);
25376 if (tcode
<= IX86_BT_LAST_VECT
)
25378 enum machine_mode mode
;
25380 index
= tcode
- IX86_BT_LAST_PRIM
- 1;
25381 itype
= ix86_get_builtin_type (ix86_builtin_type_vect_base
[index
]);
25382 mode
= ix86_builtin_type_vect_mode
[index
];
25384 type
= build_vector_type_for_mode (itype
, mode
);
25390 index
= tcode
- IX86_BT_LAST_VECT
- 1;
25391 if (tcode
<= IX86_BT_LAST_PTR
)
25392 quals
= TYPE_UNQUALIFIED
;
25394 quals
= TYPE_QUAL_CONST
;
25396 itype
= ix86_get_builtin_type (ix86_builtin_type_ptr_base
[index
]);
25397 if (quals
!= TYPE_UNQUALIFIED
)
25398 itype
= build_qualified_type (itype
, quals
);
25400 type
= build_pointer_type (itype
);
25403 ix86_builtin_type_tab
[(int) tcode
] = type
;
25407 /* Table for the ix86 builtin function types. */
25408 static GTY(()) tree ix86_builtin_func_type_tab
[(int) IX86_BT_LAST_ALIAS
+ 1];
25410 /* Retrieve an element from the above table, building some of
25411 the types lazily. */
25414 ix86_get_builtin_func_type (enum ix86_builtin_func_type tcode
)
25418 gcc_assert ((unsigned)tcode
< ARRAY_SIZE (ix86_builtin_func_type_tab
));
25420 type
= ix86_builtin_func_type_tab
[(int) tcode
];
25424 if (tcode
<= IX86_BT_LAST_FUNC
)
25426 unsigned start
= ix86_builtin_func_start
[(int) tcode
];
25427 unsigned after
= ix86_builtin_func_start
[(int) tcode
+ 1];
25428 tree rtype
, atype
, args
= void_list_node
;
25431 rtype
= ix86_get_builtin_type (ix86_builtin_func_args
[start
]);
25432 for (i
= after
- 1; i
> start
; --i
)
25434 atype
= ix86_get_builtin_type (ix86_builtin_func_args
[i
]);
25435 args
= tree_cons (NULL
, atype
, args
);
25438 type
= build_function_type (rtype
, args
);
25442 unsigned index
= tcode
- IX86_BT_LAST_FUNC
- 1;
25443 enum ix86_builtin_func_type icode
;
25445 icode
= ix86_builtin_func_alias_base
[index
];
25446 type
= ix86_get_builtin_func_type (icode
);
25449 ix86_builtin_func_type_tab
[(int) tcode
] = type
;
25454 /* Codes for all the SSE/MMX builtins. */
25457 IX86_BUILTIN_ADDPS
,
25458 IX86_BUILTIN_ADDSS
,
25459 IX86_BUILTIN_DIVPS
,
25460 IX86_BUILTIN_DIVSS
,
25461 IX86_BUILTIN_MULPS
,
25462 IX86_BUILTIN_MULSS
,
25463 IX86_BUILTIN_SUBPS
,
25464 IX86_BUILTIN_SUBSS
,
25466 IX86_BUILTIN_CMPEQPS
,
25467 IX86_BUILTIN_CMPLTPS
,
25468 IX86_BUILTIN_CMPLEPS
,
25469 IX86_BUILTIN_CMPGTPS
,
25470 IX86_BUILTIN_CMPGEPS
,
25471 IX86_BUILTIN_CMPNEQPS
,
25472 IX86_BUILTIN_CMPNLTPS
,
25473 IX86_BUILTIN_CMPNLEPS
,
25474 IX86_BUILTIN_CMPNGTPS
,
25475 IX86_BUILTIN_CMPNGEPS
,
25476 IX86_BUILTIN_CMPORDPS
,
25477 IX86_BUILTIN_CMPUNORDPS
,
25478 IX86_BUILTIN_CMPEQSS
,
25479 IX86_BUILTIN_CMPLTSS
,
25480 IX86_BUILTIN_CMPLESS
,
25481 IX86_BUILTIN_CMPNEQSS
,
25482 IX86_BUILTIN_CMPNLTSS
,
25483 IX86_BUILTIN_CMPNLESS
,
25484 IX86_BUILTIN_CMPNGTSS
,
25485 IX86_BUILTIN_CMPNGESS
,
25486 IX86_BUILTIN_CMPORDSS
,
25487 IX86_BUILTIN_CMPUNORDSS
,
25489 IX86_BUILTIN_COMIEQSS
,
25490 IX86_BUILTIN_COMILTSS
,
25491 IX86_BUILTIN_COMILESS
,
25492 IX86_BUILTIN_COMIGTSS
,
25493 IX86_BUILTIN_COMIGESS
,
25494 IX86_BUILTIN_COMINEQSS
,
25495 IX86_BUILTIN_UCOMIEQSS
,
25496 IX86_BUILTIN_UCOMILTSS
,
25497 IX86_BUILTIN_UCOMILESS
,
25498 IX86_BUILTIN_UCOMIGTSS
,
25499 IX86_BUILTIN_UCOMIGESS
,
25500 IX86_BUILTIN_UCOMINEQSS
,
25502 IX86_BUILTIN_CVTPI2PS
,
25503 IX86_BUILTIN_CVTPS2PI
,
25504 IX86_BUILTIN_CVTSI2SS
,
25505 IX86_BUILTIN_CVTSI642SS
,
25506 IX86_BUILTIN_CVTSS2SI
,
25507 IX86_BUILTIN_CVTSS2SI64
,
25508 IX86_BUILTIN_CVTTPS2PI
,
25509 IX86_BUILTIN_CVTTSS2SI
,
25510 IX86_BUILTIN_CVTTSS2SI64
,
25512 IX86_BUILTIN_MAXPS
,
25513 IX86_BUILTIN_MAXSS
,
25514 IX86_BUILTIN_MINPS
,
25515 IX86_BUILTIN_MINSS
,
25517 IX86_BUILTIN_LOADUPS
,
25518 IX86_BUILTIN_STOREUPS
,
25519 IX86_BUILTIN_MOVSS
,
25521 IX86_BUILTIN_MOVHLPS
,
25522 IX86_BUILTIN_MOVLHPS
,
25523 IX86_BUILTIN_LOADHPS
,
25524 IX86_BUILTIN_LOADLPS
,
25525 IX86_BUILTIN_STOREHPS
,
25526 IX86_BUILTIN_STORELPS
,
25528 IX86_BUILTIN_MASKMOVQ
,
25529 IX86_BUILTIN_MOVMSKPS
,
25530 IX86_BUILTIN_PMOVMSKB
,
25532 IX86_BUILTIN_MOVNTPS
,
25533 IX86_BUILTIN_MOVNTQ
,
25535 IX86_BUILTIN_LOADDQU
,
25536 IX86_BUILTIN_STOREDQU
,
25538 IX86_BUILTIN_PACKSSWB
,
25539 IX86_BUILTIN_PACKSSDW
,
25540 IX86_BUILTIN_PACKUSWB
,
25542 IX86_BUILTIN_PADDB
,
25543 IX86_BUILTIN_PADDW
,
25544 IX86_BUILTIN_PADDD
,
25545 IX86_BUILTIN_PADDQ
,
25546 IX86_BUILTIN_PADDSB
,
25547 IX86_BUILTIN_PADDSW
,
25548 IX86_BUILTIN_PADDUSB
,
25549 IX86_BUILTIN_PADDUSW
,
25550 IX86_BUILTIN_PSUBB
,
25551 IX86_BUILTIN_PSUBW
,
25552 IX86_BUILTIN_PSUBD
,
25553 IX86_BUILTIN_PSUBQ
,
25554 IX86_BUILTIN_PSUBSB
,
25555 IX86_BUILTIN_PSUBSW
,
25556 IX86_BUILTIN_PSUBUSB
,
25557 IX86_BUILTIN_PSUBUSW
,
25560 IX86_BUILTIN_PANDN
,
25564 IX86_BUILTIN_PAVGB
,
25565 IX86_BUILTIN_PAVGW
,
25567 IX86_BUILTIN_PCMPEQB
,
25568 IX86_BUILTIN_PCMPEQW
,
25569 IX86_BUILTIN_PCMPEQD
,
25570 IX86_BUILTIN_PCMPGTB
,
25571 IX86_BUILTIN_PCMPGTW
,
25572 IX86_BUILTIN_PCMPGTD
,
25574 IX86_BUILTIN_PMADDWD
,
25576 IX86_BUILTIN_PMAXSW
,
25577 IX86_BUILTIN_PMAXUB
,
25578 IX86_BUILTIN_PMINSW
,
25579 IX86_BUILTIN_PMINUB
,
25581 IX86_BUILTIN_PMULHUW
,
25582 IX86_BUILTIN_PMULHW
,
25583 IX86_BUILTIN_PMULLW
,
25585 IX86_BUILTIN_PSADBW
,
25586 IX86_BUILTIN_PSHUFW
,
25588 IX86_BUILTIN_PSLLW
,
25589 IX86_BUILTIN_PSLLD
,
25590 IX86_BUILTIN_PSLLQ
,
25591 IX86_BUILTIN_PSRAW
,
25592 IX86_BUILTIN_PSRAD
,
25593 IX86_BUILTIN_PSRLW
,
25594 IX86_BUILTIN_PSRLD
,
25595 IX86_BUILTIN_PSRLQ
,
25596 IX86_BUILTIN_PSLLWI
,
25597 IX86_BUILTIN_PSLLDI
,
25598 IX86_BUILTIN_PSLLQI
,
25599 IX86_BUILTIN_PSRAWI
,
25600 IX86_BUILTIN_PSRADI
,
25601 IX86_BUILTIN_PSRLWI
,
25602 IX86_BUILTIN_PSRLDI
,
25603 IX86_BUILTIN_PSRLQI
,
25605 IX86_BUILTIN_PUNPCKHBW
,
25606 IX86_BUILTIN_PUNPCKHWD
,
25607 IX86_BUILTIN_PUNPCKHDQ
,
25608 IX86_BUILTIN_PUNPCKLBW
,
25609 IX86_BUILTIN_PUNPCKLWD
,
25610 IX86_BUILTIN_PUNPCKLDQ
,
25612 IX86_BUILTIN_SHUFPS
,
25614 IX86_BUILTIN_RCPPS
,
25615 IX86_BUILTIN_RCPSS
,
25616 IX86_BUILTIN_RSQRTPS
,
25617 IX86_BUILTIN_RSQRTPS_NR
,
25618 IX86_BUILTIN_RSQRTSS
,
25619 IX86_BUILTIN_RSQRTF
,
25620 IX86_BUILTIN_SQRTPS
,
25621 IX86_BUILTIN_SQRTPS_NR
,
25622 IX86_BUILTIN_SQRTSS
,
25624 IX86_BUILTIN_UNPCKHPS
,
25625 IX86_BUILTIN_UNPCKLPS
,
25627 IX86_BUILTIN_ANDPS
,
25628 IX86_BUILTIN_ANDNPS
,
25630 IX86_BUILTIN_XORPS
,
25633 IX86_BUILTIN_LDMXCSR
,
25634 IX86_BUILTIN_STMXCSR
,
25635 IX86_BUILTIN_SFENCE
,
25637 IX86_BUILTIN_FXSAVE
,
25638 IX86_BUILTIN_FXRSTOR
,
25639 IX86_BUILTIN_FXSAVE64
,
25640 IX86_BUILTIN_FXRSTOR64
,
25642 IX86_BUILTIN_XSAVE
,
25643 IX86_BUILTIN_XRSTOR
,
25644 IX86_BUILTIN_XSAVE64
,
25645 IX86_BUILTIN_XRSTOR64
,
25647 IX86_BUILTIN_XSAVEOPT
,
25648 IX86_BUILTIN_XSAVEOPT64
,
25650 /* 3DNow! Original */
25651 IX86_BUILTIN_FEMMS
,
25652 IX86_BUILTIN_PAVGUSB
,
25653 IX86_BUILTIN_PF2ID
,
25654 IX86_BUILTIN_PFACC
,
25655 IX86_BUILTIN_PFADD
,
25656 IX86_BUILTIN_PFCMPEQ
,
25657 IX86_BUILTIN_PFCMPGE
,
25658 IX86_BUILTIN_PFCMPGT
,
25659 IX86_BUILTIN_PFMAX
,
25660 IX86_BUILTIN_PFMIN
,
25661 IX86_BUILTIN_PFMUL
,
25662 IX86_BUILTIN_PFRCP
,
25663 IX86_BUILTIN_PFRCPIT1
,
25664 IX86_BUILTIN_PFRCPIT2
,
25665 IX86_BUILTIN_PFRSQIT1
,
25666 IX86_BUILTIN_PFRSQRT
,
25667 IX86_BUILTIN_PFSUB
,
25668 IX86_BUILTIN_PFSUBR
,
25669 IX86_BUILTIN_PI2FD
,
25670 IX86_BUILTIN_PMULHRW
,
25672 /* 3DNow! Athlon Extensions */
25673 IX86_BUILTIN_PF2IW
,
25674 IX86_BUILTIN_PFNACC
,
25675 IX86_BUILTIN_PFPNACC
,
25676 IX86_BUILTIN_PI2FW
,
25677 IX86_BUILTIN_PSWAPDSI
,
25678 IX86_BUILTIN_PSWAPDSF
,
25681 IX86_BUILTIN_ADDPD
,
25682 IX86_BUILTIN_ADDSD
,
25683 IX86_BUILTIN_DIVPD
,
25684 IX86_BUILTIN_DIVSD
,
25685 IX86_BUILTIN_MULPD
,
25686 IX86_BUILTIN_MULSD
,
25687 IX86_BUILTIN_SUBPD
,
25688 IX86_BUILTIN_SUBSD
,
25690 IX86_BUILTIN_CMPEQPD
,
25691 IX86_BUILTIN_CMPLTPD
,
25692 IX86_BUILTIN_CMPLEPD
,
25693 IX86_BUILTIN_CMPGTPD
,
25694 IX86_BUILTIN_CMPGEPD
,
25695 IX86_BUILTIN_CMPNEQPD
,
25696 IX86_BUILTIN_CMPNLTPD
,
25697 IX86_BUILTIN_CMPNLEPD
,
25698 IX86_BUILTIN_CMPNGTPD
,
25699 IX86_BUILTIN_CMPNGEPD
,
25700 IX86_BUILTIN_CMPORDPD
,
25701 IX86_BUILTIN_CMPUNORDPD
,
25702 IX86_BUILTIN_CMPEQSD
,
25703 IX86_BUILTIN_CMPLTSD
,
25704 IX86_BUILTIN_CMPLESD
,
25705 IX86_BUILTIN_CMPNEQSD
,
25706 IX86_BUILTIN_CMPNLTSD
,
25707 IX86_BUILTIN_CMPNLESD
,
25708 IX86_BUILTIN_CMPORDSD
,
25709 IX86_BUILTIN_CMPUNORDSD
,
25711 IX86_BUILTIN_COMIEQSD
,
25712 IX86_BUILTIN_COMILTSD
,
25713 IX86_BUILTIN_COMILESD
,
25714 IX86_BUILTIN_COMIGTSD
,
25715 IX86_BUILTIN_COMIGESD
,
25716 IX86_BUILTIN_COMINEQSD
,
25717 IX86_BUILTIN_UCOMIEQSD
,
25718 IX86_BUILTIN_UCOMILTSD
,
25719 IX86_BUILTIN_UCOMILESD
,
25720 IX86_BUILTIN_UCOMIGTSD
,
25721 IX86_BUILTIN_UCOMIGESD
,
25722 IX86_BUILTIN_UCOMINEQSD
,
25724 IX86_BUILTIN_MAXPD
,
25725 IX86_BUILTIN_MAXSD
,
25726 IX86_BUILTIN_MINPD
,
25727 IX86_BUILTIN_MINSD
,
25729 IX86_BUILTIN_ANDPD
,
25730 IX86_BUILTIN_ANDNPD
,
25732 IX86_BUILTIN_XORPD
,
25734 IX86_BUILTIN_SQRTPD
,
25735 IX86_BUILTIN_SQRTSD
,
25737 IX86_BUILTIN_UNPCKHPD
,
25738 IX86_BUILTIN_UNPCKLPD
,
25740 IX86_BUILTIN_SHUFPD
,
25742 IX86_BUILTIN_LOADUPD
,
25743 IX86_BUILTIN_STOREUPD
,
25744 IX86_BUILTIN_MOVSD
,
25746 IX86_BUILTIN_LOADHPD
,
25747 IX86_BUILTIN_LOADLPD
,
25749 IX86_BUILTIN_CVTDQ2PD
,
25750 IX86_BUILTIN_CVTDQ2PS
,
25752 IX86_BUILTIN_CVTPD2DQ
,
25753 IX86_BUILTIN_CVTPD2PI
,
25754 IX86_BUILTIN_CVTPD2PS
,
25755 IX86_BUILTIN_CVTTPD2DQ
,
25756 IX86_BUILTIN_CVTTPD2PI
,
25758 IX86_BUILTIN_CVTPI2PD
,
25759 IX86_BUILTIN_CVTSI2SD
,
25760 IX86_BUILTIN_CVTSI642SD
,
25762 IX86_BUILTIN_CVTSD2SI
,
25763 IX86_BUILTIN_CVTSD2SI64
,
25764 IX86_BUILTIN_CVTSD2SS
,
25765 IX86_BUILTIN_CVTSS2SD
,
25766 IX86_BUILTIN_CVTTSD2SI
,
25767 IX86_BUILTIN_CVTTSD2SI64
,
25769 IX86_BUILTIN_CVTPS2DQ
,
25770 IX86_BUILTIN_CVTPS2PD
,
25771 IX86_BUILTIN_CVTTPS2DQ
,
25773 IX86_BUILTIN_MOVNTI
,
25774 IX86_BUILTIN_MOVNTI64
,
25775 IX86_BUILTIN_MOVNTPD
,
25776 IX86_BUILTIN_MOVNTDQ
,
25778 IX86_BUILTIN_MOVQ128
,
25781 IX86_BUILTIN_MASKMOVDQU
,
25782 IX86_BUILTIN_MOVMSKPD
,
25783 IX86_BUILTIN_PMOVMSKB128
,
25785 IX86_BUILTIN_PACKSSWB128
,
25786 IX86_BUILTIN_PACKSSDW128
,
25787 IX86_BUILTIN_PACKUSWB128
,
25789 IX86_BUILTIN_PADDB128
,
25790 IX86_BUILTIN_PADDW128
,
25791 IX86_BUILTIN_PADDD128
,
25792 IX86_BUILTIN_PADDQ128
,
25793 IX86_BUILTIN_PADDSB128
,
25794 IX86_BUILTIN_PADDSW128
,
25795 IX86_BUILTIN_PADDUSB128
,
25796 IX86_BUILTIN_PADDUSW128
,
25797 IX86_BUILTIN_PSUBB128
,
25798 IX86_BUILTIN_PSUBW128
,
25799 IX86_BUILTIN_PSUBD128
,
25800 IX86_BUILTIN_PSUBQ128
,
25801 IX86_BUILTIN_PSUBSB128
,
25802 IX86_BUILTIN_PSUBSW128
,
25803 IX86_BUILTIN_PSUBUSB128
,
25804 IX86_BUILTIN_PSUBUSW128
,
25806 IX86_BUILTIN_PAND128
,
25807 IX86_BUILTIN_PANDN128
,
25808 IX86_BUILTIN_POR128
,
25809 IX86_BUILTIN_PXOR128
,
25811 IX86_BUILTIN_PAVGB128
,
25812 IX86_BUILTIN_PAVGW128
,
25814 IX86_BUILTIN_PCMPEQB128
,
25815 IX86_BUILTIN_PCMPEQW128
,
25816 IX86_BUILTIN_PCMPEQD128
,
25817 IX86_BUILTIN_PCMPGTB128
,
25818 IX86_BUILTIN_PCMPGTW128
,
25819 IX86_BUILTIN_PCMPGTD128
,
25821 IX86_BUILTIN_PMADDWD128
,
25823 IX86_BUILTIN_PMAXSW128
,
25824 IX86_BUILTIN_PMAXUB128
,
25825 IX86_BUILTIN_PMINSW128
,
25826 IX86_BUILTIN_PMINUB128
,
25828 IX86_BUILTIN_PMULUDQ
,
25829 IX86_BUILTIN_PMULUDQ128
,
25830 IX86_BUILTIN_PMULHUW128
,
25831 IX86_BUILTIN_PMULHW128
,
25832 IX86_BUILTIN_PMULLW128
,
25834 IX86_BUILTIN_PSADBW128
,
25835 IX86_BUILTIN_PSHUFHW
,
25836 IX86_BUILTIN_PSHUFLW
,
25837 IX86_BUILTIN_PSHUFD
,
25839 IX86_BUILTIN_PSLLDQI128
,
25840 IX86_BUILTIN_PSLLWI128
,
25841 IX86_BUILTIN_PSLLDI128
,
25842 IX86_BUILTIN_PSLLQI128
,
25843 IX86_BUILTIN_PSRAWI128
,
25844 IX86_BUILTIN_PSRADI128
,
25845 IX86_BUILTIN_PSRLDQI128
,
25846 IX86_BUILTIN_PSRLWI128
,
25847 IX86_BUILTIN_PSRLDI128
,
25848 IX86_BUILTIN_PSRLQI128
,
25850 IX86_BUILTIN_PSLLDQ128
,
25851 IX86_BUILTIN_PSLLW128
,
25852 IX86_BUILTIN_PSLLD128
,
25853 IX86_BUILTIN_PSLLQ128
,
25854 IX86_BUILTIN_PSRAW128
,
25855 IX86_BUILTIN_PSRAD128
,
25856 IX86_BUILTIN_PSRLW128
,
25857 IX86_BUILTIN_PSRLD128
,
25858 IX86_BUILTIN_PSRLQ128
,
25860 IX86_BUILTIN_PUNPCKHBW128
,
25861 IX86_BUILTIN_PUNPCKHWD128
,
25862 IX86_BUILTIN_PUNPCKHDQ128
,
25863 IX86_BUILTIN_PUNPCKHQDQ128
,
25864 IX86_BUILTIN_PUNPCKLBW128
,
25865 IX86_BUILTIN_PUNPCKLWD128
,
25866 IX86_BUILTIN_PUNPCKLDQ128
,
25867 IX86_BUILTIN_PUNPCKLQDQ128
,
25869 IX86_BUILTIN_CLFLUSH
,
25870 IX86_BUILTIN_MFENCE
,
25871 IX86_BUILTIN_LFENCE
,
25872 IX86_BUILTIN_PAUSE
,
25874 IX86_BUILTIN_BSRSI
,
25875 IX86_BUILTIN_BSRDI
,
25876 IX86_BUILTIN_RDPMC
,
25877 IX86_BUILTIN_RDTSC
,
25878 IX86_BUILTIN_RDTSCP
,
25879 IX86_BUILTIN_ROLQI
,
25880 IX86_BUILTIN_ROLHI
,
25881 IX86_BUILTIN_RORQI
,
25882 IX86_BUILTIN_RORHI
,
25885 IX86_BUILTIN_ADDSUBPS
,
25886 IX86_BUILTIN_HADDPS
,
25887 IX86_BUILTIN_HSUBPS
,
25888 IX86_BUILTIN_MOVSHDUP
,
25889 IX86_BUILTIN_MOVSLDUP
,
25890 IX86_BUILTIN_ADDSUBPD
,
25891 IX86_BUILTIN_HADDPD
,
25892 IX86_BUILTIN_HSUBPD
,
25893 IX86_BUILTIN_LDDQU
,
25895 IX86_BUILTIN_MONITOR
,
25896 IX86_BUILTIN_MWAIT
,
25899 IX86_BUILTIN_PHADDW
,
25900 IX86_BUILTIN_PHADDD
,
25901 IX86_BUILTIN_PHADDSW
,
25902 IX86_BUILTIN_PHSUBW
,
25903 IX86_BUILTIN_PHSUBD
,
25904 IX86_BUILTIN_PHSUBSW
,
25905 IX86_BUILTIN_PMADDUBSW
,
25906 IX86_BUILTIN_PMULHRSW
,
25907 IX86_BUILTIN_PSHUFB
,
25908 IX86_BUILTIN_PSIGNB
,
25909 IX86_BUILTIN_PSIGNW
,
25910 IX86_BUILTIN_PSIGND
,
25911 IX86_BUILTIN_PALIGNR
,
25912 IX86_BUILTIN_PABSB
,
25913 IX86_BUILTIN_PABSW
,
25914 IX86_BUILTIN_PABSD
,
25916 IX86_BUILTIN_PHADDW128
,
25917 IX86_BUILTIN_PHADDD128
,
25918 IX86_BUILTIN_PHADDSW128
,
25919 IX86_BUILTIN_PHSUBW128
,
25920 IX86_BUILTIN_PHSUBD128
,
25921 IX86_BUILTIN_PHSUBSW128
,
25922 IX86_BUILTIN_PMADDUBSW128
,
25923 IX86_BUILTIN_PMULHRSW128
,
25924 IX86_BUILTIN_PSHUFB128
,
25925 IX86_BUILTIN_PSIGNB128
,
25926 IX86_BUILTIN_PSIGNW128
,
25927 IX86_BUILTIN_PSIGND128
,
25928 IX86_BUILTIN_PALIGNR128
,
25929 IX86_BUILTIN_PABSB128
,
25930 IX86_BUILTIN_PABSW128
,
25931 IX86_BUILTIN_PABSD128
,
25933 /* AMDFAM10 - SSE4A New Instructions. */
25934 IX86_BUILTIN_MOVNTSD
,
25935 IX86_BUILTIN_MOVNTSS
,
25936 IX86_BUILTIN_EXTRQI
,
25937 IX86_BUILTIN_EXTRQ
,
25938 IX86_BUILTIN_INSERTQI
,
25939 IX86_BUILTIN_INSERTQ
,
25942 IX86_BUILTIN_BLENDPD
,
25943 IX86_BUILTIN_BLENDPS
,
25944 IX86_BUILTIN_BLENDVPD
,
25945 IX86_BUILTIN_BLENDVPS
,
25946 IX86_BUILTIN_PBLENDVB128
,
25947 IX86_BUILTIN_PBLENDW128
,
25952 IX86_BUILTIN_INSERTPS128
,
25954 IX86_BUILTIN_MOVNTDQA
,
25955 IX86_BUILTIN_MPSADBW128
,
25956 IX86_BUILTIN_PACKUSDW128
,
25957 IX86_BUILTIN_PCMPEQQ
,
25958 IX86_BUILTIN_PHMINPOSUW128
,
25960 IX86_BUILTIN_PMAXSB128
,
25961 IX86_BUILTIN_PMAXSD128
,
25962 IX86_BUILTIN_PMAXUD128
,
25963 IX86_BUILTIN_PMAXUW128
,
25965 IX86_BUILTIN_PMINSB128
,
25966 IX86_BUILTIN_PMINSD128
,
25967 IX86_BUILTIN_PMINUD128
,
25968 IX86_BUILTIN_PMINUW128
,
25970 IX86_BUILTIN_PMOVSXBW128
,
25971 IX86_BUILTIN_PMOVSXBD128
,
25972 IX86_BUILTIN_PMOVSXBQ128
,
25973 IX86_BUILTIN_PMOVSXWD128
,
25974 IX86_BUILTIN_PMOVSXWQ128
,
25975 IX86_BUILTIN_PMOVSXDQ128
,
25977 IX86_BUILTIN_PMOVZXBW128
,
25978 IX86_BUILTIN_PMOVZXBD128
,
25979 IX86_BUILTIN_PMOVZXBQ128
,
25980 IX86_BUILTIN_PMOVZXWD128
,
25981 IX86_BUILTIN_PMOVZXWQ128
,
25982 IX86_BUILTIN_PMOVZXDQ128
,
25984 IX86_BUILTIN_PMULDQ128
,
25985 IX86_BUILTIN_PMULLD128
,
25987 IX86_BUILTIN_ROUNDSD
,
25988 IX86_BUILTIN_ROUNDSS
,
25990 IX86_BUILTIN_ROUNDPD
,
25991 IX86_BUILTIN_ROUNDPS
,
25993 IX86_BUILTIN_FLOORPD
,
25994 IX86_BUILTIN_CEILPD
,
25995 IX86_BUILTIN_TRUNCPD
,
25996 IX86_BUILTIN_RINTPD
,
25997 IX86_BUILTIN_ROUNDPD_AZ
,
25999 IX86_BUILTIN_FLOORPD_VEC_PACK_SFIX
,
26000 IX86_BUILTIN_CEILPD_VEC_PACK_SFIX
,
26001 IX86_BUILTIN_ROUNDPD_AZ_VEC_PACK_SFIX
,
26003 IX86_BUILTIN_FLOORPS
,
26004 IX86_BUILTIN_CEILPS
,
26005 IX86_BUILTIN_TRUNCPS
,
26006 IX86_BUILTIN_RINTPS
,
26007 IX86_BUILTIN_ROUNDPS_AZ
,
26009 IX86_BUILTIN_FLOORPS_SFIX
,
26010 IX86_BUILTIN_CEILPS_SFIX
,
26011 IX86_BUILTIN_ROUNDPS_AZ_SFIX
,
26013 IX86_BUILTIN_PTESTZ
,
26014 IX86_BUILTIN_PTESTC
,
26015 IX86_BUILTIN_PTESTNZC
,
26017 IX86_BUILTIN_VEC_INIT_V2SI
,
26018 IX86_BUILTIN_VEC_INIT_V4HI
,
26019 IX86_BUILTIN_VEC_INIT_V8QI
,
26020 IX86_BUILTIN_VEC_EXT_V2DF
,
26021 IX86_BUILTIN_VEC_EXT_V2DI
,
26022 IX86_BUILTIN_VEC_EXT_V4SF
,
26023 IX86_BUILTIN_VEC_EXT_V4SI
,
26024 IX86_BUILTIN_VEC_EXT_V8HI
,
26025 IX86_BUILTIN_VEC_EXT_V2SI
,
26026 IX86_BUILTIN_VEC_EXT_V4HI
,
26027 IX86_BUILTIN_VEC_EXT_V16QI
,
26028 IX86_BUILTIN_VEC_SET_V2DI
,
26029 IX86_BUILTIN_VEC_SET_V4SF
,
26030 IX86_BUILTIN_VEC_SET_V4SI
,
26031 IX86_BUILTIN_VEC_SET_V8HI
,
26032 IX86_BUILTIN_VEC_SET_V4HI
,
26033 IX86_BUILTIN_VEC_SET_V16QI
,
26035 IX86_BUILTIN_VEC_PACK_SFIX
,
26036 IX86_BUILTIN_VEC_PACK_SFIX256
,
26039 IX86_BUILTIN_CRC32QI
,
26040 IX86_BUILTIN_CRC32HI
,
26041 IX86_BUILTIN_CRC32SI
,
26042 IX86_BUILTIN_CRC32DI
,
26044 IX86_BUILTIN_PCMPESTRI128
,
26045 IX86_BUILTIN_PCMPESTRM128
,
26046 IX86_BUILTIN_PCMPESTRA128
,
26047 IX86_BUILTIN_PCMPESTRC128
,
26048 IX86_BUILTIN_PCMPESTRO128
,
26049 IX86_BUILTIN_PCMPESTRS128
,
26050 IX86_BUILTIN_PCMPESTRZ128
,
26051 IX86_BUILTIN_PCMPISTRI128
,
26052 IX86_BUILTIN_PCMPISTRM128
,
26053 IX86_BUILTIN_PCMPISTRA128
,
26054 IX86_BUILTIN_PCMPISTRC128
,
26055 IX86_BUILTIN_PCMPISTRO128
,
26056 IX86_BUILTIN_PCMPISTRS128
,
26057 IX86_BUILTIN_PCMPISTRZ128
,
26059 IX86_BUILTIN_PCMPGTQ
,
26061 /* AES instructions */
26062 IX86_BUILTIN_AESENC128
,
26063 IX86_BUILTIN_AESENCLAST128
,
26064 IX86_BUILTIN_AESDEC128
,
26065 IX86_BUILTIN_AESDECLAST128
,
26066 IX86_BUILTIN_AESIMC128
,
26067 IX86_BUILTIN_AESKEYGENASSIST128
,
26069 /* PCLMUL instruction */
26070 IX86_BUILTIN_PCLMULQDQ128
,
26073 IX86_BUILTIN_ADDPD256
,
26074 IX86_BUILTIN_ADDPS256
,
26075 IX86_BUILTIN_ADDSUBPD256
,
26076 IX86_BUILTIN_ADDSUBPS256
,
26077 IX86_BUILTIN_ANDPD256
,
26078 IX86_BUILTIN_ANDPS256
,
26079 IX86_BUILTIN_ANDNPD256
,
26080 IX86_BUILTIN_ANDNPS256
,
26081 IX86_BUILTIN_BLENDPD256
,
26082 IX86_BUILTIN_BLENDPS256
,
26083 IX86_BUILTIN_BLENDVPD256
,
26084 IX86_BUILTIN_BLENDVPS256
,
26085 IX86_BUILTIN_DIVPD256
,
26086 IX86_BUILTIN_DIVPS256
,
26087 IX86_BUILTIN_DPPS256
,
26088 IX86_BUILTIN_HADDPD256
,
26089 IX86_BUILTIN_HADDPS256
,
26090 IX86_BUILTIN_HSUBPD256
,
26091 IX86_BUILTIN_HSUBPS256
,
26092 IX86_BUILTIN_MAXPD256
,
26093 IX86_BUILTIN_MAXPS256
,
26094 IX86_BUILTIN_MINPD256
,
26095 IX86_BUILTIN_MINPS256
,
26096 IX86_BUILTIN_MULPD256
,
26097 IX86_BUILTIN_MULPS256
,
26098 IX86_BUILTIN_ORPD256
,
26099 IX86_BUILTIN_ORPS256
,
26100 IX86_BUILTIN_SHUFPD256
,
26101 IX86_BUILTIN_SHUFPS256
,
26102 IX86_BUILTIN_SUBPD256
,
26103 IX86_BUILTIN_SUBPS256
,
26104 IX86_BUILTIN_XORPD256
,
26105 IX86_BUILTIN_XORPS256
,
26106 IX86_BUILTIN_CMPSD
,
26107 IX86_BUILTIN_CMPSS
,
26108 IX86_BUILTIN_CMPPD
,
26109 IX86_BUILTIN_CMPPS
,
26110 IX86_BUILTIN_CMPPD256
,
26111 IX86_BUILTIN_CMPPS256
,
26112 IX86_BUILTIN_CVTDQ2PD256
,
26113 IX86_BUILTIN_CVTDQ2PS256
,
26114 IX86_BUILTIN_CVTPD2PS256
,
26115 IX86_BUILTIN_CVTPS2DQ256
,
26116 IX86_BUILTIN_CVTPS2PD256
,
26117 IX86_BUILTIN_CVTTPD2DQ256
,
26118 IX86_BUILTIN_CVTPD2DQ256
,
26119 IX86_BUILTIN_CVTTPS2DQ256
,
26120 IX86_BUILTIN_EXTRACTF128PD256
,
26121 IX86_BUILTIN_EXTRACTF128PS256
,
26122 IX86_BUILTIN_EXTRACTF128SI256
,
26123 IX86_BUILTIN_VZEROALL
,
26124 IX86_BUILTIN_VZEROUPPER
,
26125 IX86_BUILTIN_VPERMILVARPD
,
26126 IX86_BUILTIN_VPERMILVARPS
,
26127 IX86_BUILTIN_VPERMILVARPD256
,
26128 IX86_BUILTIN_VPERMILVARPS256
,
26129 IX86_BUILTIN_VPERMILPD
,
26130 IX86_BUILTIN_VPERMILPS
,
26131 IX86_BUILTIN_VPERMILPD256
,
26132 IX86_BUILTIN_VPERMILPS256
,
26133 IX86_BUILTIN_VPERMIL2PD
,
26134 IX86_BUILTIN_VPERMIL2PS
,
26135 IX86_BUILTIN_VPERMIL2PD256
,
26136 IX86_BUILTIN_VPERMIL2PS256
,
26137 IX86_BUILTIN_VPERM2F128PD256
,
26138 IX86_BUILTIN_VPERM2F128PS256
,
26139 IX86_BUILTIN_VPERM2F128SI256
,
26140 IX86_BUILTIN_VBROADCASTSS
,
26141 IX86_BUILTIN_VBROADCASTSD256
,
26142 IX86_BUILTIN_VBROADCASTSS256
,
26143 IX86_BUILTIN_VBROADCASTPD256
,
26144 IX86_BUILTIN_VBROADCASTPS256
,
26145 IX86_BUILTIN_VINSERTF128PD256
,
26146 IX86_BUILTIN_VINSERTF128PS256
,
26147 IX86_BUILTIN_VINSERTF128SI256
,
26148 IX86_BUILTIN_LOADUPD256
,
26149 IX86_BUILTIN_LOADUPS256
,
26150 IX86_BUILTIN_STOREUPD256
,
26151 IX86_BUILTIN_STOREUPS256
,
26152 IX86_BUILTIN_LDDQU256
,
26153 IX86_BUILTIN_MOVNTDQ256
,
26154 IX86_BUILTIN_MOVNTPD256
,
26155 IX86_BUILTIN_MOVNTPS256
,
26156 IX86_BUILTIN_LOADDQU256
,
26157 IX86_BUILTIN_STOREDQU256
,
26158 IX86_BUILTIN_MASKLOADPD
,
26159 IX86_BUILTIN_MASKLOADPS
,
26160 IX86_BUILTIN_MASKSTOREPD
,
26161 IX86_BUILTIN_MASKSTOREPS
,
26162 IX86_BUILTIN_MASKLOADPD256
,
26163 IX86_BUILTIN_MASKLOADPS256
,
26164 IX86_BUILTIN_MASKSTOREPD256
,
26165 IX86_BUILTIN_MASKSTOREPS256
,
26166 IX86_BUILTIN_MOVSHDUP256
,
26167 IX86_BUILTIN_MOVSLDUP256
,
26168 IX86_BUILTIN_MOVDDUP256
,
26170 IX86_BUILTIN_SQRTPD256
,
26171 IX86_BUILTIN_SQRTPS256
,
26172 IX86_BUILTIN_SQRTPS_NR256
,
26173 IX86_BUILTIN_RSQRTPS256
,
26174 IX86_BUILTIN_RSQRTPS_NR256
,
26176 IX86_BUILTIN_RCPPS256
,
26178 IX86_BUILTIN_ROUNDPD256
,
26179 IX86_BUILTIN_ROUNDPS256
,
26181 IX86_BUILTIN_FLOORPD256
,
26182 IX86_BUILTIN_CEILPD256
,
26183 IX86_BUILTIN_TRUNCPD256
,
26184 IX86_BUILTIN_RINTPD256
,
26185 IX86_BUILTIN_ROUNDPD_AZ256
,
26187 IX86_BUILTIN_FLOORPD_VEC_PACK_SFIX256
,
26188 IX86_BUILTIN_CEILPD_VEC_PACK_SFIX256
,
26189 IX86_BUILTIN_ROUNDPD_AZ_VEC_PACK_SFIX256
,
26191 IX86_BUILTIN_FLOORPS256
,
26192 IX86_BUILTIN_CEILPS256
,
26193 IX86_BUILTIN_TRUNCPS256
,
26194 IX86_BUILTIN_RINTPS256
,
26195 IX86_BUILTIN_ROUNDPS_AZ256
,
26197 IX86_BUILTIN_FLOORPS_SFIX256
,
26198 IX86_BUILTIN_CEILPS_SFIX256
,
26199 IX86_BUILTIN_ROUNDPS_AZ_SFIX256
,
26201 IX86_BUILTIN_UNPCKHPD256
,
26202 IX86_BUILTIN_UNPCKLPD256
,
26203 IX86_BUILTIN_UNPCKHPS256
,
26204 IX86_BUILTIN_UNPCKLPS256
,
26206 IX86_BUILTIN_SI256_SI
,
26207 IX86_BUILTIN_PS256_PS
,
26208 IX86_BUILTIN_PD256_PD
,
26209 IX86_BUILTIN_SI_SI256
,
26210 IX86_BUILTIN_PS_PS256
,
26211 IX86_BUILTIN_PD_PD256
,
26213 IX86_BUILTIN_VTESTZPD
,
26214 IX86_BUILTIN_VTESTCPD
,
26215 IX86_BUILTIN_VTESTNZCPD
,
26216 IX86_BUILTIN_VTESTZPS
,
26217 IX86_BUILTIN_VTESTCPS
,
26218 IX86_BUILTIN_VTESTNZCPS
,
26219 IX86_BUILTIN_VTESTZPD256
,
26220 IX86_BUILTIN_VTESTCPD256
,
26221 IX86_BUILTIN_VTESTNZCPD256
,
26222 IX86_BUILTIN_VTESTZPS256
,
26223 IX86_BUILTIN_VTESTCPS256
,
26224 IX86_BUILTIN_VTESTNZCPS256
,
26225 IX86_BUILTIN_PTESTZ256
,
26226 IX86_BUILTIN_PTESTC256
,
26227 IX86_BUILTIN_PTESTNZC256
,
26229 IX86_BUILTIN_MOVMSKPD256
,
26230 IX86_BUILTIN_MOVMSKPS256
,
26233 IX86_BUILTIN_MPSADBW256
,
26234 IX86_BUILTIN_PABSB256
,
26235 IX86_BUILTIN_PABSW256
,
26236 IX86_BUILTIN_PABSD256
,
26237 IX86_BUILTIN_PACKSSDW256
,
26238 IX86_BUILTIN_PACKSSWB256
,
26239 IX86_BUILTIN_PACKUSDW256
,
26240 IX86_BUILTIN_PACKUSWB256
,
26241 IX86_BUILTIN_PADDB256
,
26242 IX86_BUILTIN_PADDW256
,
26243 IX86_BUILTIN_PADDD256
,
26244 IX86_BUILTIN_PADDQ256
,
26245 IX86_BUILTIN_PADDSB256
,
26246 IX86_BUILTIN_PADDSW256
,
26247 IX86_BUILTIN_PADDUSB256
,
26248 IX86_BUILTIN_PADDUSW256
,
26249 IX86_BUILTIN_PALIGNR256
,
26250 IX86_BUILTIN_AND256I
,
26251 IX86_BUILTIN_ANDNOT256I
,
26252 IX86_BUILTIN_PAVGB256
,
26253 IX86_BUILTIN_PAVGW256
,
26254 IX86_BUILTIN_PBLENDVB256
,
26255 IX86_BUILTIN_PBLENDVW256
,
26256 IX86_BUILTIN_PCMPEQB256
,
26257 IX86_BUILTIN_PCMPEQW256
,
26258 IX86_BUILTIN_PCMPEQD256
,
26259 IX86_BUILTIN_PCMPEQQ256
,
26260 IX86_BUILTIN_PCMPGTB256
,
26261 IX86_BUILTIN_PCMPGTW256
,
26262 IX86_BUILTIN_PCMPGTD256
,
26263 IX86_BUILTIN_PCMPGTQ256
,
26264 IX86_BUILTIN_PHADDW256
,
26265 IX86_BUILTIN_PHADDD256
,
26266 IX86_BUILTIN_PHADDSW256
,
26267 IX86_BUILTIN_PHSUBW256
,
26268 IX86_BUILTIN_PHSUBD256
,
26269 IX86_BUILTIN_PHSUBSW256
,
26270 IX86_BUILTIN_PMADDUBSW256
,
26271 IX86_BUILTIN_PMADDWD256
,
26272 IX86_BUILTIN_PMAXSB256
,
26273 IX86_BUILTIN_PMAXSW256
,
26274 IX86_BUILTIN_PMAXSD256
,
26275 IX86_BUILTIN_PMAXUB256
,
26276 IX86_BUILTIN_PMAXUW256
,
26277 IX86_BUILTIN_PMAXUD256
,
26278 IX86_BUILTIN_PMINSB256
,
26279 IX86_BUILTIN_PMINSW256
,
26280 IX86_BUILTIN_PMINSD256
,
26281 IX86_BUILTIN_PMINUB256
,
26282 IX86_BUILTIN_PMINUW256
,
26283 IX86_BUILTIN_PMINUD256
,
26284 IX86_BUILTIN_PMOVMSKB256
,
26285 IX86_BUILTIN_PMOVSXBW256
,
26286 IX86_BUILTIN_PMOVSXBD256
,
26287 IX86_BUILTIN_PMOVSXBQ256
,
26288 IX86_BUILTIN_PMOVSXWD256
,
26289 IX86_BUILTIN_PMOVSXWQ256
,
26290 IX86_BUILTIN_PMOVSXDQ256
,
26291 IX86_BUILTIN_PMOVZXBW256
,
26292 IX86_BUILTIN_PMOVZXBD256
,
26293 IX86_BUILTIN_PMOVZXBQ256
,
26294 IX86_BUILTIN_PMOVZXWD256
,
26295 IX86_BUILTIN_PMOVZXWQ256
,
26296 IX86_BUILTIN_PMOVZXDQ256
,
26297 IX86_BUILTIN_PMULDQ256
,
26298 IX86_BUILTIN_PMULHRSW256
,
26299 IX86_BUILTIN_PMULHUW256
,
26300 IX86_BUILTIN_PMULHW256
,
26301 IX86_BUILTIN_PMULLW256
,
26302 IX86_BUILTIN_PMULLD256
,
26303 IX86_BUILTIN_PMULUDQ256
,
26304 IX86_BUILTIN_POR256
,
26305 IX86_BUILTIN_PSADBW256
,
26306 IX86_BUILTIN_PSHUFB256
,
26307 IX86_BUILTIN_PSHUFD256
,
26308 IX86_BUILTIN_PSHUFHW256
,
26309 IX86_BUILTIN_PSHUFLW256
,
26310 IX86_BUILTIN_PSIGNB256
,
26311 IX86_BUILTIN_PSIGNW256
,
26312 IX86_BUILTIN_PSIGND256
,
26313 IX86_BUILTIN_PSLLDQI256
,
26314 IX86_BUILTIN_PSLLWI256
,
26315 IX86_BUILTIN_PSLLW256
,
26316 IX86_BUILTIN_PSLLDI256
,
26317 IX86_BUILTIN_PSLLD256
,
26318 IX86_BUILTIN_PSLLQI256
,
26319 IX86_BUILTIN_PSLLQ256
,
26320 IX86_BUILTIN_PSRAWI256
,
26321 IX86_BUILTIN_PSRAW256
,
26322 IX86_BUILTIN_PSRADI256
,
26323 IX86_BUILTIN_PSRAD256
,
26324 IX86_BUILTIN_PSRLDQI256
,
26325 IX86_BUILTIN_PSRLWI256
,
26326 IX86_BUILTIN_PSRLW256
,
26327 IX86_BUILTIN_PSRLDI256
,
26328 IX86_BUILTIN_PSRLD256
,
26329 IX86_BUILTIN_PSRLQI256
,
26330 IX86_BUILTIN_PSRLQ256
,
26331 IX86_BUILTIN_PSUBB256
,
26332 IX86_BUILTIN_PSUBW256
,
26333 IX86_BUILTIN_PSUBD256
,
26334 IX86_BUILTIN_PSUBQ256
,
26335 IX86_BUILTIN_PSUBSB256
,
26336 IX86_BUILTIN_PSUBSW256
,
26337 IX86_BUILTIN_PSUBUSB256
,
26338 IX86_BUILTIN_PSUBUSW256
,
26339 IX86_BUILTIN_PUNPCKHBW256
,
26340 IX86_BUILTIN_PUNPCKHWD256
,
26341 IX86_BUILTIN_PUNPCKHDQ256
,
26342 IX86_BUILTIN_PUNPCKHQDQ256
,
26343 IX86_BUILTIN_PUNPCKLBW256
,
26344 IX86_BUILTIN_PUNPCKLWD256
,
26345 IX86_BUILTIN_PUNPCKLDQ256
,
26346 IX86_BUILTIN_PUNPCKLQDQ256
,
26347 IX86_BUILTIN_PXOR256
,
26348 IX86_BUILTIN_MOVNTDQA256
,
26349 IX86_BUILTIN_VBROADCASTSS_PS
,
26350 IX86_BUILTIN_VBROADCASTSS_PS256
,
26351 IX86_BUILTIN_VBROADCASTSD_PD256
,
26352 IX86_BUILTIN_VBROADCASTSI256
,
26353 IX86_BUILTIN_PBLENDD256
,
26354 IX86_BUILTIN_PBLENDD128
,
26355 IX86_BUILTIN_PBROADCASTB256
,
26356 IX86_BUILTIN_PBROADCASTW256
,
26357 IX86_BUILTIN_PBROADCASTD256
,
26358 IX86_BUILTIN_PBROADCASTQ256
,
26359 IX86_BUILTIN_PBROADCASTB128
,
26360 IX86_BUILTIN_PBROADCASTW128
,
26361 IX86_BUILTIN_PBROADCASTD128
,
26362 IX86_BUILTIN_PBROADCASTQ128
,
26363 IX86_BUILTIN_VPERMVARSI256
,
26364 IX86_BUILTIN_VPERMDF256
,
26365 IX86_BUILTIN_VPERMVARSF256
,
26366 IX86_BUILTIN_VPERMDI256
,
26367 IX86_BUILTIN_VPERMTI256
,
26368 IX86_BUILTIN_VEXTRACT128I256
,
26369 IX86_BUILTIN_VINSERT128I256
,
26370 IX86_BUILTIN_MASKLOADD
,
26371 IX86_BUILTIN_MASKLOADQ
,
26372 IX86_BUILTIN_MASKLOADD256
,
26373 IX86_BUILTIN_MASKLOADQ256
,
26374 IX86_BUILTIN_MASKSTORED
,
26375 IX86_BUILTIN_MASKSTOREQ
,
26376 IX86_BUILTIN_MASKSTORED256
,
26377 IX86_BUILTIN_MASKSTOREQ256
,
26378 IX86_BUILTIN_PSLLVV4DI
,
26379 IX86_BUILTIN_PSLLVV2DI
,
26380 IX86_BUILTIN_PSLLVV8SI
,
26381 IX86_BUILTIN_PSLLVV4SI
,
26382 IX86_BUILTIN_PSRAVV8SI
,
26383 IX86_BUILTIN_PSRAVV4SI
,
26384 IX86_BUILTIN_PSRLVV4DI
,
26385 IX86_BUILTIN_PSRLVV2DI
,
26386 IX86_BUILTIN_PSRLVV8SI
,
26387 IX86_BUILTIN_PSRLVV4SI
,
26389 IX86_BUILTIN_GATHERSIV2DF
,
26390 IX86_BUILTIN_GATHERSIV4DF
,
26391 IX86_BUILTIN_GATHERDIV2DF
,
26392 IX86_BUILTIN_GATHERDIV4DF
,
26393 IX86_BUILTIN_GATHERSIV4SF
,
26394 IX86_BUILTIN_GATHERSIV8SF
,
26395 IX86_BUILTIN_GATHERDIV4SF
,
26396 IX86_BUILTIN_GATHERDIV8SF
,
26397 IX86_BUILTIN_GATHERSIV2DI
,
26398 IX86_BUILTIN_GATHERSIV4DI
,
26399 IX86_BUILTIN_GATHERDIV2DI
,
26400 IX86_BUILTIN_GATHERDIV4DI
,
26401 IX86_BUILTIN_GATHERSIV4SI
,
26402 IX86_BUILTIN_GATHERSIV8SI
,
26403 IX86_BUILTIN_GATHERDIV4SI
,
26404 IX86_BUILTIN_GATHERDIV8SI
,
26406 /* Alternate 4 element gather for the vectorizer where
26407 all operands are 32-byte wide. */
26408 IX86_BUILTIN_GATHERALTSIV4DF
,
26409 IX86_BUILTIN_GATHERALTDIV8SF
,
26410 IX86_BUILTIN_GATHERALTSIV4DI
,
26411 IX86_BUILTIN_GATHERALTDIV8SI
,
26413 /* TFmode support builtins. */
26415 IX86_BUILTIN_HUGE_VALQ
,
26416 IX86_BUILTIN_FABSQ
,
26417 IX86_BUILTIN_COPYSIGNQ
,
26419 /* Vectorizer support builtins. */
26420 IX86_BUILTIN_CPYSGNPS
,
26421 IX86_BUILTIN_CPYSGNPD
,
26422 IX86_BUILTIN_CPYSGNPS256
,
26423 IX86_BUILTIN_CPYSGNPD256
,
26425 /* FMA4 instructions. */
26426 IX86_BUILTIN_VFMADDSS
,
26427 IX86_BUILTIN_VFMADDSD
,
26428 IX86_BUILTIN_VFMADDPS
,
26429 IX86_BUILTIN_VFMADDPD
,
26430 IX86_BUILTIN_VFMADDPS256
,
26431 IX86_BUILTIN_VFMADDPD256
,
26432 IX86_BUILTIN_VFMADDSUBPS
,
26433 IX86_BUILTIN_VFMADDSUBPD
,
26434 IX86_BUILTIN_VFMADDSUBPS256
,
26435 IX86_BUILTIN_VFMADDSUBPD256
,
26437 /* FMA3 instructions. */
26438 IX86_BUILTIN_VFMADDSS3
,
26439 IX86_BUILTIN_VFMADDSD3
,
26441 /* XOP instructions. */
26442 IX86_BUILTIN_VPCMOV
,
26443 IX86_BUILTIN_VPCMOV_V2DI
,
26444 IX86_BUILTIN_VPCMOV_V4SI
,
26445 IX86_BUILTIN_VPCMOV_V8HI
,
26446 IX86_BUILTIN_VPCMOV_V16QI
,
26447 IX86_BUILTIN_VPCMOV_V4SF
,
26448 IX86_BUILTIN_VPCMOV_V2DF
,
26449 IX86_BUILTIN_VPCMOV256
,
26450 IX86_BUILTIN_VPCMOV_V4DI256
,
26451 IX86_BUILTIN_VPCMOV_V8SI256
,
26452 IX86_BUILTIN_VPCMOV_V16HI256
,
26453 IX86_BUILTIN_VPCMOV_V32QI256
,
26454 IX86_BUILTIN_VPCMOV_V8SF256
,
26455 IX86_BUILTIN_VPCMOV_V4DF256
,
26457 IX86_BUILTIN_VPPERM
,
26459 IX86_BUILTIN_VPMACSSWW
,
26460 IX86_BUILTIN_VPMACSWW
,
26461 IX86_BUILTIN_VPMACSSWD
,
26462 IX86_BUILTIN_VPMACSWD
,
26463 IX86_BUILTIN_VPMACSSDD
,
26464 IX86_BUILTIN_VPMACSDD
,
26465 IX86_BUILTIN_VPMACSSDQL
,
26466 IX86_BUILTIN_VPMACSSDQH
,
26467 IX86_BUILTIN_VPMACSDQL
,
26468 IX86_BUILTIN_VPMACSDQH
,
26469 IX86_BUILTIN_VPMADCSSWD
,
26470 IX86_BUILTIN_VPMADCSWD
,
26472 IX86_BUILTIN_VPHADDBW
,
26473 IX86_BUILTIN_VPHADDBD
,
26474 IX86_BUILTIN_VPHADDBQ
,
26475 IX86_BUILTIN_VPHADDWD
,
26476 IX86_BUILTIN_VPHADDWQ
,
26477 IX86_BUILTIN_VPHADDDQ
,
26478 IX86_BUILTIN_VPHADDUBW
,
26479 IX86_BUILTIN_VPHADDUBD
,
26480 IX86_BUILTIN_VPHADDUBQ
,
26481 IX86_BUILTIN_VPHADDUWD
,
26482 IX86_BUILTIN_VPHADDUWQ
,
26483 IX86_BUILTIN_VPHADDUDQ
,
26484 IX86_BUILTIN_VPHSUBBW
,
26485 IX86_BUILTIN_VPHSUBWD
,
26486 IX86_BUILTIN_VPHSUBDQ
,
26488 IX86_BUILTIN_VPROTB
,
26489 IX86_BUILTIN_VPROTW
,
26490 IX86_BUILTIN_VPROTD
,
26491 IX86_BUILTIN_VPROTQ
,
26492 IX86_BUILTIN_VPROTB_IMM
,
26493 IX86_BUILTIN_VPROTW_IMM
,
26494 IX86_BUILTIN_VPROTD_IMM
,
26495 IX86_BUILTIN_VPROTQ_IMM
,
26497 IX86_BUILTIN_VPSHLB
,
26498 IX86_BUILTIN_VPSHLW
,
26499 IX86_BUILTIN_VPSHLD
,
26500 IX86_BUILTIN_VPSHLQ
,
26501 IX86_BUILTIN_VPSHAB
,
26502 IX86_BUILTIN_VPSHAW
,
26503 IX86_BUILTIN_VPSHAD
,
26504 IX86_BUILTIN_VPSHAQ
,
26506 IX86_BUILTIN_VFRCZSS
,
26507 IX86_BUILTIN_VFRCZSD
,
26508 IX86_BUILTIN_VFRCZPS
,
26509 IX86_BUILTIN_VFRCZPD
,
26510 IX86_BUILTIN_VFRCZPS256
,
26511 IX86_BUILTIN_VFRCZPD256
,
26513 IX86_BUILTIN_VPCOMEQUB
,
26514 IX86_BUILTIN_VPCOMNEUB
,
26515 IX86_BUILTIN_VPCOMLTUB
,
26516 IX86_BUILTIN_VPCOMLEUB
,
26517 IX86_BUILTIN_VPCOMGTUB
,
26518 IX86_BUILTIN_VPCOMGEUB
,
26519 IX86_BUILTIN_VPCOMFALSEUB
,
26520 IX86_BUILTIN_VPCOMTRUEUB
,
26522 IX86_BUILTIN_VPCOMEQUW
,
26523 IX86_BUILTIN_VPCOMNEUW
,
26524 IX86_BUILTIN_VPCOMLTUW
,
26525 IX86_BUILTIN_VPCOMLEUW
,
26526 IX86_BUILTIN_VPCOMGTUW
,
26527 IX86_BUILTIN_VPCOMGEUW
,
26528 IX86_BUILTIN_VPCOMFALSEUW
,
26529 IX86_BUILTIN_VPCOMTRUEUW
,
26531 IX86_BUILTIN_VPCOMEQUD
,
26532 IX86_BUILTIN_VPCOMNEUD
,
26533 IX86_BUILTIN_VPCOMLTUD
,
26534 IX86_BUILTIN_VPCOMLEUD
,
26535 IX86_BUILTIN_VPCOMGTUD
,
26536 IX86_BUILTIN_VPCOMGEUD
,
26537 IX86_BUILTIN_VPCOMFALSEUD
,
26538 IX86_BUILTIN_VPCOMTRUEUD
,
26540 IX86_BUILTIN_VPCOMEQUQ
,
26541 IX86_BUILTIN_VPCOMNEUQ
,
26542 IX86_BUILTIN_VPCOMLTUQ
,
26543 IX86_BUILTIN_VPCOMLEUQ
,
26544 IX86_BUILTIN_VPCOMGTUQ
,
26545 IX86_BUILTIN_VPCOMGEUQ
,
26546 IX86_BUILTIN_VPCOMFALSEUQ
,
26547 IX86_BUILTIN_VPCOMTRUEUQ
,
26549 IX86_BUILTIN_VPCOMEQB
,
26550 IX86_BUILTIN_VPCOMNEB
,
26551 IX86_BUILTIN_VPCOMLTB
,
26552 IX86_BUILTIN_VPCOMLEB
,
26553 IX86_BUILTIN_VPCOMGTB
,
26554 IX86_BUILTIN_VPCOMGEB
,
26555 IX86_BUILTIN_VPCOMFALSEB
,
26556 IX86_BUILTIN_VPCOMTRUEB
,
26558 IX86_BUILTIN_VPCOMEQW
,
26559 IX86_BUILTIN_VPCOMNEW
,
26560 IX86_BUILTIN_VPCOMLTW
,
26561 IX86_BUILTIN_VPCOMLEW
,
26562 IX86_BUILTIN_VPCOMGTW
,
26563 IX86_BUILTIN_VPCOMGEW
,
26564 IX86_BUILTIN_VPCOMFALSEW
,
26565 IX86_BUILTIN_VPCOMTRUEW
,
26567 IX86_BUILTIN_VPCOMEQD
,
26568 IX86_BUILTIN_VPCOMNED
,
26569 IX86_BUILTIN_VPCOMLTD
,
26570 IX86_BUILTIN_VPCOMLED
,
26571 IX86_BUILTIN_VPCOMGTD
,
26572 IX86_BUILTIN_VPCOMGED
,
26573 IX86_BUILTIN_VPCOMFALSED
,
26574 IX86_BUILTIN_VPCOMTRUED
,
26576 IX86_BUILTIN_VPCOMEQQ
,
26577 IX86_BUILTIN_VPCOMNEQ
,
26578 IX86_BUILTIN_VPCOMLTQ
,
26579 IX86_BUILTIN_VPCOMLEQ
,
26580 IX86_BUILTIN_VPCOMGTQ
,
26581 IX86_BUILTIN_VPCOMGEQ
,
26582 IX86_BUILTIN_VPCOMFALSEQ
,
26583 IX86_BUILTIN_VPCOMTRUEQ
,
26585 /* LWP instructions. */
26586 IX86_BUILTIN_LLWPCB
,
26587 IX86_BUILTIN_SLWPCB
,
26588 IX86_BUILTIN_LWPVAL32
,
26589 IX86_BUILTIN_LWPVAL64
,
26590 IX86_BUILTIN_LWPINS32
,
26591 IX86_BUILTIN_LWPINS64
,
26596 IX86_BUILTIN_XBEGIN
,
26598 IX86_BUILTIN_XABORT
,
26599 IX86_BUILTIN_XTEST
,
26601 /* BMI instructions. */
26602 IX86_BUILTIN_BEXTR32
,
26603 IX86_BUILTIN_BEXTR64
,
26606 /* TBM instructions. */
26607 IX86_BUILTIN_BEXTRI32
,
26608 IX86_BUILTIN_BEXTRI64
,
26610 /* BMI2 instructions. */
26611 IX86_BUILTIN_BZHI32
,
26612 IX86_BUILTIN_BZHI64
,
26613 IX86_BUILTIN_PDEP32
,
26614 IX86_BUILTIN_PDEP64
,
26615 IX86_BUILTIN_PEXT32
,
26616 IX86_BUILTIN_PEXT64
,
26618 /* ADX instructions. */
26619 IX86_BUILTIN_ADDCARRYX32
,
26620 IX86_BUILTIN_ADDCARRYX64
,
26622 /* FSGSBASE instructions. */
26623 IX86_BUILTIN_RDFSBASE32
,
26624 IX86_BUILTIN_RDFSBASE64
,
26625 IX86_BUILTIN_RDGSBASE32
,
26626 IX86_BUILTIN_RDGSBASE64
,
26627 IX86_BUILTIN_WRFSBASE32
,
26628 IX86_BUILTIN_WRFSBASE64
,
26629 IX86_BUILTIN_WRGSBASE32
,
26630 IX86_BUILTIN_WRGSBASE64
,
26632 /* RDRND instructions. */
26633 IX86_BUILTIN_RDRAND16_STEP
,
26634 IX86_BUILTIN_RDRAND32_STEP
,
26635 IX86_BUILTIN_RDRAND64_STEP
,
26637 /* RDSEED instructions. */
26638 IX86_BUILTIN_RDSEED16_STEP
,
26639 IX86_BUILTIN_RDSEED32_STEP
,
26640 IX86_BUILTIN_RDSEED64_STEP
,
26642 /* F16C instructions. */
26643 IX86_BUILTIN_CVTPH2PS
,
26644 IX86_BUILTIN_CVTPH2PS256
,
26645 IX86_BUILTIN_CVTPS2PH
,
26646 IX86_BUILTIN_CVTPS2PH256
,
26648 /* CFString built-in for darwin */
26649 IX86_BUILTIN_CFSTRING
,
26651 /* Builtins to get CPU type and supported features. */
26652 IX86_BUILTIN_CPU_INIT
,
26653 IX86_BUILTIN_CPU_IS
,
26654 IX86_BUILTIN_CPU_SUPPORTS
,
26659 /* Table for the ix86 builtin decls. */
26660 static GTY(()) tree ix86_builtins
[(int) IX86_BUILTIN_MAX
];
26662 /* Table of all of the builtin functions that are possible with different ISA's
26663 but are waiting to be built until a function is declared to use that
26665 struct builtin_isa
{
26666 const char *name
; /* function name */
26667 enum ix86_builtin_func_type tcode
; /* type to use in the declaration */
26668 HOST_WIDE_INT isa
; /* isa_flags this builtin is defined for */
26669 bool const_p
; /* true if the declaration is constant */
26670 bool set_and_not_built_p
;
26673 static struct builtin_isa ix86_builtins_isa
[(int) IX86_BUILTIN_MAX
];
26676 /* Add an ix86 target builtin function with CODE, NAME and TYPE. Save the MASK
26677 of which isa_flags to use in the ix86_builtins_isa array. Stores the
26678 function decl in the ix86_builtins array. Returns the function decl or
26679 NULL_TREE, if the builtin was not added.
26681 If the front end has a special hook for builtin functions, delay adding
26682 builtin functions that aren't in the current ISA until the ISA is changed
26683 with function specific optimization. Doing so, can save about 300K for the
26684 default compiler. When the builtin is expanded, check at that time whether
26687 If the front end doesn't have a special hook, record all builtins, even if
26688 it isn't an instruction set in the current ISA in case the user uses
26689 function specific options for a different ISA, so that we don't get scope
26690 errors if a builtin is added in the middle of a function scope. */
26693 def_builtin (HOST_WIDE_INT mask
, const char *name
,
26694 enum ix86_builtin_func_type tcode
,
26695 enum ix86_builtins code
)
26697 tree decl
= NULL_TREE
;
26699 if (!(mask
& OPTION_MASK_ISA_64BIT
) || TARGET_64BIT
)
26701 ix86_builtins_isa
[(int) code
].isa
= mask
;
26703 mask
&= ~OPTION_MASK_ISA_64BIT
;
26705 || (mask
& ix86_isa_flags
) != 0
26706 || (lang_hooks
.builtin_function
26707 == lang_hooks
.builtin_function_ext_scope
))
26710 tree type
= ix86_get_builtin_func_type (tcode
);
26711 decl
= add_builtin_function (name
, type
, code
, BUILT_IN_MD
,
26713 ix86_builtins
[(int) code
] = decl
;
26714 ix86_builtins_isa
[(int) code
].set_and_not_built_p
= false;
26718 ix86_builtins
[(int) code
] = NULL_TREE
;
26719 ix86_builtins_isa
[(int) code
].tcode
= tcode
;
26720 ix86_builtins_isa
[(int) code
].name
= name
;
26721 ix86_builtins_isa
[(int) code
].const_p
= false;
26722 ix86_builtins_isa
[(int) code
].set_and_not_built_p
= true;
26729 /* Like def_builtin, but also marks the function decl "const". */
26732 def_builtin_const (HOST_WIDE_INT mask
, const char *name
,
26733 enum ix86_builtin_func_type tcode
, enum ix86_builtins code
)
26735 tree decl
= def_builtin (mask
, name
, tcode
, code
);
26737 TREE_READONLY (decl
) = 1;
26739 ix86_builtins_isa
[(int) code
].const_p
= true;
26744 /* Add any new builtin functions for a given ISA that may not have been
26745 declared. This saves a bit of space compared to adding all of the
26746 declarations to the tree, even if we didn't use them. */
26749 ix86_add_new_builtins (HOST_WIDE_INT isa
)
26753 for (i
= 0; i
< (int)IX86_BUILTIN_MAX
; i
++)
26755 if ((ix86_builtins_isa
[i
].isa
& isa
) != 0
26756 && ix86_builtins_isa
[i
].set_and_not_built_p
)
26760 /* Don't define the builtin again. */
26761 ix86_builtins_isa
[i
].set_and_not_built_p
= false;
26763 type
= ix86_get_builtin_func_type (ix86_builtins_isa
[i
].tcode
);
26764 decl
= add_builtin_function_ext_scope (ix86_builtins_isa
[i
].name
,
26765 type
, i
, BUILT_IN_MD
, NULL
,
26768 ix86_builtins
[i
] = decl
;
26769 if (ix86_builtins_isa
[i
].const_p
)
26770 TREE_READONLY (decl
) = 1;
26775 /* Bits for builtin_description.flag. */
26777 /* Set when we don't support the comparison natively, and should
26778 swap_comparison in order to support it. */
26779 #define BUILTIN_DESC_SWAP_OPERANDS 1
26781 struct builtin_description
26783 const HOST_WIDE_INT mask
;
26784 const enum insn_code icode
;
26785 const char *const name
;
26786 const enum ix86_builtins code
;
26787 const enum rtx_code comparison
;
26791 static const struct builtin_description bdesc_comi
[] =
26793 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_comi
, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS
, UNEQ
, 0 },
26794 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_comi
, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS
, UNLT
, 0 },
26795 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_comi
, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS
, UNLE
, 0 },
26796 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_comi
, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS
, GT
, 0 },
26797 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_comi
, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS
, GE
, 0 },
26798 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_comi
, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS
, LTGT
, 0 },
26799 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_ucomi
, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS
, UNEQ
, 0 },
26800 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_ucomi
, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS
, UNLT
, 0 },
26801 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_ucomi
, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS
, UNLE
, 0 },
26802 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_ucomi
, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS
, GT
, 0 },
26803 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_ucomi
, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS
, GE
, 0 },
26804 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_ucomi
, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS
, LTGT
, 0 },
26805 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_comi
, "__builtin_ia32_comisdeq", IX86_BUILTIN_COMIEQSD
, UNEQ
, 0 },
26806 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_comi
, "__builtin_ia32_comisdlt", IX86_BUILTIN_COMILTSD
, UNLT
, 0 },
26807 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_comi
, "__builtin_ia32_comisdle", IX86_BUILTIN_COMILESD
, UNLE
, 0 },
26808 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_comi
, "__builtin_ia32_comisdgt", IX86_BUILTIN_COMIGTSD
, GT
, 0 },
26809 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_comi
, "__builtin_ia32_comisdge", IX86_BUILTIN_COMIGESD
, GE
, 0 },
26810 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_comi
, "__builtin_ia32_comisdneq", IX86_BUILTIN_COMINEQSD
, LTGT
, 0 },
26811 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_ucomi
, "__builtin_ia32_ucomisdeq", IX86_BUILTIN_UCOMIEQSD
, UNEQ
, 0 },
26812 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_ucomi
, "__builtin_ia32_ucomisdlt", IX86_BUILTIN_UCOMILTSD
, UNLT
, 0 },
26813 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_ucomi
, "__builtin_ia32_ucomisdle", IX86_BUILTIN_UCOMILESD
, UNLE
, 0 },
26814 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_ucomi
, "__builtin_ia32_ucomisdgt", IX86_BUILTIN_UCOMIGTSD
, GT
, 0 },
26815 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_ucomi
, "__builtin_ia32_ucomisdge", IX86_BUILTIN_UCOMIGESD
, GE
, 0 },
26816 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_ucomi
, "__builtin_ia32_ucomisdneq", IX86_BUILTIN_UCOMINEQSD
, LTGT
, 0 },
26819 static const struct builtin_description bdesc_pcmpestr
[] =
26822 { OPTION_MASK_ISA_SSE4_2
, CODE_FOR_sse4_2_pcmpestr
, "__builtin_ia32_pcmpestri128", IX86_BUILTIN_PCMPESTRI128
, UNKNOWN
, 0 },
26823 { OPTION_MASK_ISA_SSE4_2
, CODE_FOR_sse4_2_pcmpestr
, "__builtin_ia32_pcmpestrm128", IX86_BUILTIN_PCMPESTRM128
, UNKNOWN
, 0 },
26824 { OPTION_MASK_ISA_SSE4_2
, CODE_FOR_sse4_2_pcmpestr
, "__builtin_ia32_pcmpestria128", IX86_BUILTIN_PCMPESTRA128
, UNKNOWN
, (int) CCAmode
},
26825 { OPTION_MASK_ISA_SSE4_2
, CODE_FOR_sse4_2_pcmpestr
, "__builtin_ia32_pcmpestric128", IX86_BUILTIN_PCMPESTRC128
, UNKNOWN
, (int) CCCmode
},
26826 { OPTION_MASK_ISA_SSE4_2
, CODE_FOR_sse4_2_pcmpestr
, "__builtin_ia32_pcmpestrio128", IX86_BUILTIN_PCMPESTRO128
, UNKNOWN
, (int) CCOmode
},
26827 { OPTION_MASK_ISA_SSE4_2
, CODE_FOR_sse4_2_pcmpestr
, "__builtin_ia32_pcmpestris128", IX86_BUILTIN_PCMPESTRS128
, UNKNOWN
, (int) CCSmode
},
26828 { OPTION_MASK_ISA_SSE4_2
, CODE_FOR_sse4_2_pcmpestr
, "__builtin_ia32_pcmpestriz128", IX86_BUILTIN_PCMPESTRZ128
, UNKNOWN
, (int) CCZmode
},
26831 static const struct builtin_description bdesc_pcmpistr
[] =
26834 { OPTION_MASK_ISA_SSE4_2
, CODE_FOR_sse4_2_pcmpistr
, "__builtin_ia32_pcmpistri128", IX86_BUILTIN_PCMPISTRI128
, UNKNOWN
, 0 },
26835 { OPTION_MASK_ISA_SSE4_2
, CODE_FOR_sse4_2_pcmpistr
, "__builtin_ia32_pcmpistrm128", IX86_BUILTIN_PCMPISTRM128
, UNKNOWN
, 0 },
26836 { OPTION_MASK_ISA_SSE4_2
, CODE_FOR_sse4_2_pcmpistr
, "__builtin_ia32_pcmpistria128", IX86_BUILTIN_PCMPISTRA128
, UNKNOWN
, (int) CCAmode
},
26837 { OPTION_MASK_ISA_SSE4_2
, CODE_FOR_sse4_2_pcmpistr
, "__builtin_ia32_pcmpistric128", IX86_BUILTIN_PCMPISTRC128
, UNKNOWN
, (int) CCCmode
},
26838 { OPTION_MASK_ISA_SSE4_2
, CODE_FOR_sse4_2_pcmpistr
, "__builtin_ia32_pcmpistrio128", IX86_BUILTIN_PCMPISTRO128
, UNKNOWN
, (int) CCOmode
},
26839 { OPTION_MASK_ISA_SSE4_2
, CODE_FOR_sse4_2_pcmpistr
, "__builtin_ia32_pcmpistris128", IX86_BUILTIN_PCMPISTRS128
, UNKNOWN
, (int) CCSmode
},
26840 { OPTION_MASK_ISA_SSE4_2
, CODE_FOR_sse4_2_pcmpistr
, "__builtin_ia32_pcmpistriz128", IX86_BUILTIN_PCMPISTRZ128
, UNKNOWN
, (int) CCZmode
},
26843 /* Special builtins with variable number of arguments. */
26844 static const struct builtin_description bdesc_special_args
[] =
26846 { ~OPTION_MASK_ISA_64BIT
, CODE_FOR_nothing
, "__builtin_ia32_rdtsc", IX86_BUILTIN_RDTSC
, UNKNOWN
, (int) UINT64_FTYPE_VOID
},
26847 { ~OPTION_MASK_ISA_64BIT
, CODE_FOR_nothing
, "__builtin_ia32_rdtscp", IX86_BUILTIN_RDTSCP
, UNKNOWN
, (int) UINT64_FTYPE_PUNSIGNED
},
26848 { ~OPTION_MASK_ISA_64BIT
, CODE_FOR_pause
, "__builtin_ia32_pause", IX86_BUILTIN_PAUSE
, UNKNOWN
, (int) VOID_FTYPE_VOID
},
26851 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_emms
, "__builtin_ia32_emms", IX86_BUILTIN_EMMS
, UNKNOWN
, (int) VOID_FTYPE_VOID
},
26854 { OPTION_MASK_ISA_3DNOW
, CODE_FOR_mmx_femms
, "__builtin_ia32_femms", IX86_BUILTIN_FEMMS
, UNKNOWN
, (int) VOID_FTYPE_VOID
},
26856 /* FXSR, XSAVE and XSAVEOPT */
26857 { OPTION_MASK_ISA_FXSR
, CODE_FOR_nothing
, "__builtin_ia32_fxsave", IX86_BUILTIN_FXSAVE
, UNKNOWN
, (int) VOID_FTYPE_PVOID
},
26858 { OPTION_MASK_ISA_FXSR
, CODE_FOR_nothing
, "__builtin_ia32_fxrstor", IX86_BUILTIN_FXRSTOR
, UNKNOWN
, (int) VOID_FTYPE_PVOID
},
26859 { OPTION_MASK_ISA_XSAVE
, CODE_FOR_nothing
, "__builtin_ia32_xsave", IX86_BUILTIN_XSAVE
, UNKNOWN
, (int) VOID_FTYPE_PVOID_INT64
},
26860 { OPTION_MASK_ISA_XSAVE
, CODE_FOR_nothing
, "__builtin_ia32_xrstor", IX86_BUILTIN_XRSTOR
, UNKNOWN
, (int) VOID_FTYPE_PVOID_INT64
},
26861 { OPTION_MASK_ISA_XSAVEOPT
, CODE_FOR_nothing
, "__builtin_ia32_xsaveopt", IX86_BUILTIN_XSAVEOPT
, UNKNOWN
, (int) VOID_FTYPE_PVOID_INT64
},
26863 { OPTION_MASK_ISA_FXSR
| OPTION_MASK_ISA_64BIT
, CODE_FOR_nothing
, "__builtin_ia32_fxsave64", IX86_BUILTIN_FXSAVE64
, UNKNOWN
, (int) VOID_FTYPE_PVOID
},
26864 { OPTION_MASK_ISA_FXSR
| OPTION_MASK_ISA_64BIT
, CODE_FOR_nothing
, "__builtin_ia32_fxrstor64", IX86_BUILTIN_FXRSTOR64
, UNKNOWN
, (int) VOID_FTYPE_PVOID
},
26865 { OPTION_MASK_ISA_XSAVE
| OPTION_MASK_ISA_64BIT
, CODE_FOR_nothing
, "__builtin_ia32_xsave64", IX86_BUILTIN_XSAVE64
, UNKNOWN
, (int) VOID_FTYPE_PVOID_INT64
},
26866 { OPTION_MASK_ISA_XSAVE
| OPTION_MASK_ISA_64BIT
, CODE_FOR_nothing
, "__builtin_ia32_xrstor64", IX86_BUILTIN_XRSTOR64
, UNKNOWN
, (int) VOID_FTYPE_PVOID_INT64
},
26867 { OPTION_MASK_ISA_XSAVEOPT
| OPTION_MASK_ISA_64BIT
, CODE_FOR_nothing
, "__builtin_ia32_xsaveopt64", IX86_BUILTIN_XSAVEOPT64
, UNKNOWN
, (int) VOID_FTYPE_PVOID_INT64
},
26870 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_storeups
, "__builtin_ia32_storeups", IX86_BUILTIN_STOREUPS
, UNKNOWN
, (int) VOID_FTYPE_PFLOAT_V4SF
},
26871 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_movntv4sf
, "__builtin_ia32_movntps", IX86_BUILTIN_MOVNTPS
, UNKNOWN
, (int) VOID_FTYPE_PFLOAT_V4SF
},
26872 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_loadups
, "__builtin_ia32_loadups", IX86_BUILTIN_LOADUPS
, UNKNOWN
, (int) V4SF_FTYPE_PCFLOAT
},
26874 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_loadhps_exp
, "__builtin_ia32_loadhps", IX86_BUILTIN_LOADHPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_PCV2SF
},
26875 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_loadlps_exp
, "__builtin_ia32_loadlps", IX86_BUILTIN_LOADLPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_PCV2SF
},
26876 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_storehps
, "__builtin_ia32_storehps", IX86_BUILTIN_STOREHPS
, UNKNOWN
, (int) VOID_FTYPE_PV2SF_V4SF
},
26877 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_storelps
, "__builtin_ia32_storelps", IX86_BUILTIN_STORELPS
, UNKNOWN
, (int) VOID_FTYPE_PV2SF_V4SF
},
26879 /* SSE or 3DNow!A */
26880 { OPTION_MASK_ISA_SSE
| OPTION_MASK_ISA_3DNOW_A
, CODE_FOR_sse_sfence
, "__builtin_ia32_sfence", IX86_BUILTIN_SFENCE
, UNKNOWN
, (int) VOID_FTYPE_VOID
},
26881 { OPTION_MASK_ISA_SSE
| OPTION_MASK_ISA_3DNOW_A
, CODE_FOR_sse_movntq
, "__builtin_ia32_movntq", IX86_BUILTIN_MOVNTQ
, UNKNOWN
, (int) VOID_FTYPE_PULONGLONG_ULONGLONG
},
26884 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_lfence
, "__builtin_ia32_lfence", IX86_BUILTIN_LFENCE
, UNKNOWN
, (int) VOID_FTYPE_VOID
},
26885 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_mfence
, 0, IX86_BUILTIN_MFENCE
, UNKNOWN
, (int) VOID_FTYPE_VOID
},
26886 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_storeupd
, "__builtin_ia32_storeupd", IX86_BUILTIN_STOREUPD
, UNKNOWN
, (int) VOID_FTYPE_PDOUBLE_V2DF
},
26887 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_storedqu
, "__builtin_ia32_storedqu", IX86_BUILTIN_STOREDQU
, UNKNOWN
, (int) VOID_FTYPE_PCHAR_V16QI
},
26888 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_movntv2df
, "__builtin_ia32_movntpd", IX86_BUILTIN_MOVNTPD
, UNKNOWN
, (int) VOID_FTYPE_PDOUBLE_V2DF
},
26889 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_movntv2di
, "__builtin_ia32_movntdq", IX86_BUILTIN_MOVNTDQ
, UNKNOWN
, (int) VOID_FTYPE_PV2DI_V2DI
},
26890 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_movntisi
, "__builtin_ia32_movnti", IX86_BUILTIN_MOVNTI
, UNKNOWN
, (int) VOID_FTYPE_PINT_INT
},
26891 { OPTION_MASK_ISA_SSE2
| OPTION_MASK_ISA_64BIT
, CODE_FOR_sse2_movntidi
, "__builtin_ia32_movnti64", IX86_BUILTIN_MOVNTI64
, UNKNOWN
, (int) VOID_FTYPE_PLONGLONG_LONGLONG
},
26892 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_loadupd
, "__builtin_ia32_loadupd", IX86_BUILTIN_LOADUPD
, UNKNOWN
, (int) V2DF_FTYPE_PCDOUBLE
},
26893 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_loaddqu
, "__builtin_ia32_loaddqu", IX86_BUILTIN_LOADDQU
, UNKNOWN
, (int) V16QI_FTYPE_PCCHAR
},
26895 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_loadhpd_exp
, "__builtin_ia32_loadhpd", IX86_BUILTIN_LOADHPD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_PCDOUBLE
},
26896 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_loadlpd_exp
, "__builtin_ia32_loadlpd", IX86_BUILTIN_LOADLPD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_PCDOUBLE
},
26899 { OPTION_MASK_ISA_SSE3
, CODE_FOR_sse3_lddqu
, "__builtin_ia32_lddqu", IX86_BUILTIN_LDDQU
, UNKNOWN
, (int) V16QI_FTYPE_PCCHAR
},
26902 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_sse4_1_movntdqa
, "__builtin_ia32_movntdqa", IX86_BUILTIN_MOVNTDQA
, UNKNOWN
, (int) V2DI_FTYPE_PV2DI
},
26905 { OPTION_MASK_ISA_SSE4A
, CODE_FOR_sse4a_vmmovntv2df
, "__builtin_ia32_movntsd", IX86_BUILTIN_MOVNTSD
, UNKNOWN
, (int) VOID_FTYPE_PDOUBLE_V2DF
},
26906 { OPTION_MASK_ISA_SSE4A
, CODE_FOR_sse4a_vmmovntv4sf
, "__builtin_ia32_movntss", IX86_BUILTIN_MOVNTSS
, UNKNOWN
, (int) VOID_FTYPE_PFLOAT_V4SF
},
26909 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vzeroall
, "__builtin_ia32_vzeroall", IX86_BUILTIN_VZEROALL
, UNKNOWN
, (int) VOID_FTYPE_VOID
},
26910 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vzeroupper
, "__builtin_ia32_vzeroupper", IX86_BUILTIN_VZEROUPPER
, UNKNOWN
, (int) VOID_FTYPE_VOID
},
26912 { OPTION_MASK_ISA_AVX
, CODE_FOR_vec_dupv4sf
, "__builtin_ia32_vbroadcastss", IX86_BUILTIN_VBROADCASTSS
, UNKNOWN
, (int) V4SF_FTYPE_PCFLOAT
},
26913 { OPTION_MASK_ISA_AVX
, CODE_FOR_vec_dupv4df
, "__builtin_ia32_vbroadcastsd256", IX86_BUILTIN_VBROADCASTSD256
, UNKNOWN
, (int) V4DF_FTYPE_PCDOUBLE
},
26914 { OPTION_MASK_ISA_AVX
, CODE_FOR_vec_dupv8sf
, "__builtin_ia32_vbroadcastss256", IX86_BUILTIN_VBROADCASTSS256
, UNKNOWN
, (int) V8SF_FTYPE_PCFLOAT
},
26915 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vbroadcastf128_v4df
, "__builtin_ia32_vbroadcastf128_pd256", IX86_BUILTIN_VBROADCASTPD256
, UNKNOWN
, (int) V4DF_FTYPE_PCV2DF
},
26916 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vbroadcastf128_v8sf
, "__builtin_ia32_vbroadcastf128_ps256", IX86_BUILTIN_VBROADCASTPS256
, UNKNOWN
, (int) V8SF_FTYPE_PCV4SF
},
26918 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_loadupd256
, "__builtin_ia32_loadupd256", IX86_BUILTIN_LOADUPD256
, UNKNOWN
, (int) V4DF_FTYPE_PCDOUBLE
},
26919 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_loadups256
, "__builtin_ia32_loadups256", IX86_BUILTIN_LOADUPS256
, UNKNOWN
, (int) V8SF_FTYPE_PCFLOAT
},
26920 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_storeupd256
, "__builtin_ia32_storeupd256", IX86_BUILTIN_STOREUPD256
, UNKNOWN
, (int) VOID_FTYPE_PDOUBLE_V4DF
},
26921 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_storeups256
, "__builtin_ia32_storeups256", IX86_BUILTIN_STOREUPS256
, UNKNOWN
, (int) VOID_FTYPE_PFLOAT_V8SF
},
26922 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_loaddqu256
, "__builtin_ia32_loaddqu256", IX86_BUILTIN_LOADDQU256
, UNKNOWN
, (int) V32QI_FTYPE_PCCHAR
},
26923 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_storedqu256
, "__builtin_ia32_storedqu256", IX86_BUILTIN_STOREDQU256
, UNKNOWN
, (int) VOID_FTYPE_PCHAR_V32QI
},
26924 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_lddqu256
, "__builtin_ia32_lddqu256", IX86_BUILTIN_LDDQU256
, UNKNOWN
, (int) V32QI_FTYPE_PCCHAR
},
26926 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_movntv4di
, "__builtin_ia32_movntdq256", IX86_BUILTIN_MOVNTDQ256
, UNKNOWN
, (int) VOID_FTYPE_PV4DI_V4DI
},
26927 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_movntv4df
, "__builtin_ia32_movntpd256", IX86_BUILTIN_MOVNTPD256
, UNKNOWN
, (int) VOID_FTYPE_PDOUBLE_V4DF
},
26928 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_movntv8sf
, "__builtin_ia32_movntps256", IX86_BUILTIN_MOVNTPS256
, UNKNOWN
, (int) VOID_FTYPE_PFLOAT_V8SF
},
26930 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_maskloadpd
, "__builtin_ia32_maskloadpd", IX86_BUILTIN_MASKLOADPD
, UNKNOWN
, (int) V2DF_FTYPE_PCV2DF_V2DI
},
26931 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_maskloadps
, "__builtin_ia32_maskloadps", IX86_BUILTIN_MASKLOADPS
, UNKNOWN
, (int) V4SF_FTYPE_PCV4SF_V4SI
},
26932 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_maskloadpd256
, "__builtin_ia32_maskloadpd256", IX86_BUILTIN_MASKLOADPD256
, UNKNOWN
, (int) V4DF_FTYPE_PCV4DF_V4DI
},
26933 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_maskloadps256
, "__builtin_ia32_maskloadps256", IX86_BUILTIN_MASKLOADPS256
, UNKNOWN
, (int) V8SF_FTYPE_PCV8SF_V8SI
},
26934 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_maskstorepd
, "__builtin_ia32_maskstorepd", IX86_BUILTIN_MASKSTOREPD
, UNKNOWN
, (int) VOID_FTYPE_PV2DF_V2DI_V2DF
},
26935 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_maskstoreps
, "__builtin_ia32_maskstoreps", IX86_BUILTIN_MASKSTOREPS
, UNKNOWN
, (int) VOID_FTYPE_PV4SF_V4SI_V4SF
},
26936 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_maskstorepd256
, "__builtin_ia32_maskstorepd256", IX86_BUILTIN_MASKSTOREPD256
, UNKNOWN
, (int) VOID_FTYPE_PV4DF_V4DI_V4DF
},
26937 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_maskstoreps256
, "__builtin_ia32_maskstoreps256", IX86_BUILTIN_MASKSTOREPS256
, UNKNOWN
, (int) VOID_FTYPE_PV8SF_V8SI_V8SF
},
26940 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_movntdqa
, "__builtin_ia32_movntdqa256", IX86_BUILTIN_MOVNTDQA256
, UNKNOWN
, (int) V4DI_FTYPE_PV4DI
},
26941 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_maskloadd
, "__builtin_ia32_maskloadd", IX86_BUILTIN_MASKLOADD
, UNKNOWN
, (int) V4SI_FTYPE_PCV4SI_V4SI
},
26942 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_maskloadq
, "__builtin_ia32_maskloadq", IX86_BUILTIN_MASKLOADQ
, UNKNOWN
, (int) V2DI_FTYPE_PCV2DI_V2DI
},
26943 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_maskloadd256
, "__builtin_ia32_maskloadd256", IX86_BUILTIN_MASKLOADD256
, UNKNOWN
, (int) V8SI_FTYPE_PCV8SI_V8SI
},
26944 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_maskloadq256
, "__builtin_ia32_maskloadq256", IX86_BUILTIN_MASKLOADQ256
, UNKNOWN
, (int) V4DI_FTYPE_PCV4DI_V4DI
},
26945 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_maskstored
, "__builtin_ia32_maskstored", IX86_BUILTIN_MASKSTORED
, UNKNOWN
, (int) VOID_FTYPE_PV4SI_V4SI_V4SI
},
26946 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_maskstoreq
, "__builtin_ia32_maskstoreq", IX86_BUILTIN_MASKSTOREQ
, UNKNOWN
, (int) VOID_FTYPE_PV2DI_V2DI_V2DI
},
26947 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_maskstored256
, "__builtin_ia32_maskstored256", IX86_BUILTIN_MASKSTORED256
, UNKNOWN
, (int) VOID_FTYPE_PV8SI_V8SI_V8SI
},
26948 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_maskstoreq256
, "__builtin_ia32_maskstoreq256", IX86_BUILTIN_MASKSTOREQ256
, UNKNOWN
, (int) VOID_FTYPE_PV4DI_V4DI_V4DI
},
26950 { OPTION_MASK_ISA_LWP
, CODE_FOR_lwp_llwpcb
, "__builtin_ia32_llwpcb", IX86_BUILTIN_LLWPCB
, UNKNOWN
, (int) VOID_FTYPE_PVOID
},
26951 { OPTION_MASK_ISA_LWP
, CODE_FOR_lwp_slwpcb
, "__builtin_ia32_slwpcb", IX86_BUILTIN_SLWPCB
, UNKNOWN
, (int) PVOID_FTYPE_VOID
},
26952 { OPTION_MASK_ISA_LWP
, CODE_FOR_lwp_lwpvalsi3
, "__builtin_ia32_lwpval32", IX86_BUILTIN_LWPVAL32
, UNKNOWN
, (int) VOID_FTYPE_UINT_UINT_UINT
},
26953 { OPTION_MASK_ISA_LWP
, CODE_FOR_lwp_lwpvaldi3
, "__builtin_ia32_lwpval64", IX86_BUILTIN_LWPVAL64
, UNKNOWN
, (int) VOID_FTYPE_UINT64_UINT_UINT
},
26954 { OPTION_MASK_ISA_LWP
, CODE_FOR_lwp_lwpinssi3
, "__builtin_ia32_lwpins32", IX86_BUILTIN_LWPINS32
, UNKNOWN
, (int) UCHAR_FTYPE_UINT_UINT_UINT
},
26955 { OPTION_MASK_ISA_LWP
, CODE_FOR_lwp_lwpinsdi3
, "__builtin_ia32_lwpins64", IX86_BUILTIN_LWPINS64
, UNKNOWN
, (int) UCHAR_FTYPE_UINT64_UINT_UINT
},
26958 { OPTION_MASK_ISA_FSGSBASE
| OPTION_MASK_ISA_64BIT
, CODE_FOR_rdfsbasesi
, "__builtin_ia32_rdfsbase32", IX86_BUILTIN_RDFSBASE32
, UNKNOWN
, (int) UNSIGNED_FTYPE_VOID
},
26959 { OPTION_MASK_ISA_FSGSBASE
| OPTION_MASK_ISA_64BIT
, CODE_FOR_rdfsbasedi
, "__builtin_ia32_rdfsbase64", IX86_BUILTIN_RDFSBASE64
, UNKNOWN
, (int) UINT64_FTYPE_VOID
},
26960 { OPTION_MASK_ISA_FSGSBASE
| OPTION_MASK_ISA_64BIT
, CODE_FOR_rdgsbasesi
, "__builtin_ia32_rdgsbase32", IX86_BUILTIN_RDGSBASE32
, UNKNOWN
, (int) UNSIGNED_FTYPE_VOID
},
26961 { OPTION_MASK_ISA_FSGSBASE
| OPTION_MASK_ISA_64BIT
, CODE_FOR_rdgsbasedi
, "__builtin_ia32_rdgsbase64", IX86_BUILTIN_RDGSBASE64
, UNKNOWN
, (int) UINT64_FTYPE_VOID
},
26962 { OPTION_MASK_ISA_FSGSBASE
| OPTION_MASK_ISA_64BIT
, CODE_FOR_wrfsbasesi
, "__builtin_ia32_wrfsbase32", IX86_BUILTIN_WRFSBASE32
, UNKNOWN
, (int) VOID_FTYPE_UNSIGNED
},
26963 { OPTION_MASK_ISA_FSGSBASE
| OPTION_MASK_ISA_64BIT
, CODE_FOR_wrfsbasedi
, "__builtin_ia32_wrfsbase64", IX86_BUILTIN_WRFSBASE64
, UNKNOWN
, (int) VOID_FTYPE_UINT64
},
26964 { OPTION_MASK_ISA_FSGSBASE
| OPTION_MASK_ISA_64BIT
, CODE_FOR_wrgsbasesi
, "__builtin_ia32_wrgsbase32", IX86_BUILTIN_WRGSBASE32
, UNKNOWN
, (int) VOID_FTYPE_UNSIGNED
},
26965 { OPTION_MASK_ISA_FSGSBASE
| OPTION_MASK_ISA_64BIT
, CODE_FOR_wrgsbasedi
, "__builtin_ia32_wrgsbase64", IX86_BUILTIN_WRGSBASE64
, UNKNOWN
, (int) VOID_FTYPE_UINT64
},
26968 { OPTION_MASK_ISA_RTM
, CODE_FOR_xbegin
, "__builtin_ia32_xbegin", IX86_BUILTIN_XBEGIN
, UNKNOWN
, (int) UNSIGNED_FTYPE_VOID
},
26969 { OPTION_MASK_ISA_RTM
, CODE_FOR_xend
, "__builtin_ia32_xend", IX86_BUILTIN_XEND
, UNKNOWN
, (int) VOID_FTYPE_VOID
},
26970 { OPTION_MASK_ISA_RTM
, CODE_FOR_xtest
, "__builtin_ia32_xtest", IX86_BUILTIN_XTEST
, UNKNOWN
, (int) INT_FTYPE_VOID
},
26973 /* Builtins with variable number of arguments. */
26974 static const struct builtin_description bdesc_args
[] =
26976 { ~OPTION_MASK_ISA_64BIT
, CODE_FOR_bsr
, "__builtin_ia32_bsrsi", IX86_BUILTIN_BSRSI
, UNKNOWN
, (int) INT_FTYPE_INT
},
26977 { OPTION_MASK_ISA_64BIT
, CODE_FOR_bsr_rex64
, "__builtin_ia32_bsrdi", IX86_BUILTIN_BSRDI
, UNKNOWN
, (int) INT64_FTYPE_INT64
},
26978 { ~OPTION_MASK_ISA_64BIT
, CODE_FOR_nothing
, "__builtin_ia32_rdpmc", IX86_BUILTIN_RDPMC
, UNKNOWN
, (int) UINT64_FTYPE_INT
},
26979 { ~OPTION_MASK_ISA_64BIT
, CODE_FOR_rotlqi3
, "__builtin_ia32_rolqi", IX86_BUILTIN_ROLQI
, UNKNOWN
, (int) UINT8_FTYPE_UINT8_INT
},
26980 { ~OPTION_MASK_ISA_64BIT
, CODE_FOR_rotlhi3
, "__builtin_ia32_rolhi", IX86_BUILTIN_ROLHI
, UNKNOWN
, (int) UINT16_FTYPE_UINT16_INT
},
26981 { ~OPTION_MASK_ISA_64BIT
, CODE_FOR_rotrqi3
, "__builtin_ia32_rorqi", IX86_BUILTIN_RORQI
, UNKNOWN
, (int) UINT8_FTYPE_UINT8_INT
},
26982 { ~OPTION_MASK_ISA_64BIT
, CODE_FOR_rotrhi3
, "__builtin_ia32_rorhi", IX86_BUILTIN_RORHI
, UNKNOWN
, (int) UINT16_FTYPE_UINT16_INT
},
26985 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_addv8qi3
, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB
, UNKNOWN
, (int) V8QI_FTYPE_V8QI_V8QI
},
26986 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_addv4hi3
, "__builtin_ia32_paddw", IX86_BUILTIN_PADDW
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_V4HI
},
26987 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_addv2si3
, "__builtin_ia32_paddd", IX86_BUILTIN_PADDD
, UNKNOWN
, (int) V2SI_FTYPE_V2SI_V2SI
},
26988 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_subv8qi3
, "__builtin_ia32_psubb", IX86_BUILTIN_PSUBB
, UNKNOWN
, (int) V8QI_FTYPE_V8QI_V8QI
},
26989 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_subv4hi3
, "__builtin_ia32_psubw", IX86_BUILTIN_PSUBW
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_V4HI
},
26990 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_subv2si3
, "__builtin_ia32_psubd", IX86_BUILTIN_PSUBD
, UNKNOWN
, (int) V2SI_FTYPE_V2SI_V2SI
},
26992 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_ssaddv8qi3
, "__builtin_ia32_paddsb", IX86_BUILTIN_PADDSB
, UNKNOWN
, (int) V8QI_FTYPE_V8QI_V8QI
},
26993 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_ssaddv4hi3
, "__builtin_ia32_paddsw", IX86_BUILTIN_PADDSW
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_V4HI
},
26994 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_sssubv8qi3
, "__builtin_ia32_psubsb", IX86_BUILTIN_PSUBSB
, UNKNOWN
, (int) V8QI_FTYPE_V8QI_V8QI
},
26995 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_sssubv4hi3
, "__builtin_ia32_psubsw", IX86_BUILTIN_PSUBSW
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_V4HI
},
26996 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_usaddv8qi3
, "__builtin_ia32_paddusb", IX86_BUILTIN_PADDUSB
, UNKNOWN
, (int) V8QI_FTYPE_V8QI_V8QI
},
26997 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_usaddv4hi3
, "__builtin_ia32_paddusw", IX86_BUILTIN_PADDUSW
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_V4HI
},
26998 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_ussubv8qi3
, "__builtin_ia32_psubusb", IX86_BUILTIN_PSUBUSB
, UNKNOWN
, (int) V8QI_FTYPE_V8QI_V8QI
},
26999 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_ussubv4hi3
, "__builtin_ia32_psubusw", IX86_BUILTIN_PSUBUSW
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_V4HI
},
27001 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_mulv4hi3
, "__builtin_ia32_pmullw", IX86_BUILTIN_PMULLW
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_V4HI
},
27002 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_smulv4hi3_highpart
, "__builtin_ia32_pmulhw", IX86_BUILTIN_PMULHW
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_V4HI
},
27004 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_andv2si3
, "__builtin_ia32_pand", IX86_BUILTIN_PAND
, UNKNOWN
, (int) V2SI_FTYPE_V2SI_V2SI
},
27005 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_andnotv2si3
, "__builtin_ia32_pandn", IX86_BUILTIN_PANDN
, UNKNOWN
, (int) V2SI_FTYPE_V2SI_V2SI
},
27006 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_iorv2si3
, "__builtin_ia32_por", IX86_BUILTIN_POR
, UNKNOWN
, (int) V2SI_FTYPE_V2SI_V2SI
},
27007 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_xorv2si3
, "__builtin_ia32_pxor", IX86_BUILTIN_PXOR
, UNKNOWN
, (int) V2SI_FTYPE_V2SI_V2SI
},
27009 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_eqv8qi3
, "__builtin_ia32_pcmpeqb", IX86_BUILTIN_PCMPEQB
, UNKNOWN
, (int) V8QI_FTYPE_V8QI_V8QI
},
27010 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_eqv4hi3
, "__builtin_ia32_pcmpeqw", IX86_BUILTIN_PCMPEQW
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_V4HI
},
27011 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_eqv2si3
, "__builtin_ia32_pcmpeqd", IX86_BUILTIN_PCMPEQD
, UNKNOWN
, (int) V2SI_FTYPE_V2SI_V2SI
},
27012 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_gtv8qi3
, "__builtin_ia32_pcmpgtb", IX86_BUILTIN_PCMPGTB
, UNKNOWN
, (int) V8QI_FTYPE_V8QI_V8QI
},
27013 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_gtv4hi3
, "__builtin_ia32_pcmpgtw", IX86_BUILTIN_PCMPGTW
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_V4HI
},
27014 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_gtv2si3
, "__builtin_ia32_pcmpgtd", IX86_BUILTIN_PCMPGTD
, UNKNOWN
, (int) V2SI_FTYPE_V2SI_V2SI
},
27016 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_punpckhbw
, "__builtin_ia32_punpckhbw", IX86_BUILTIN_PUNPCKHBW
, UNKNOWN
, (int) V8QI_FTYPE_V8QI_V8QI
},
27017 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_punpckhwd
, "__builtin_ia32_punpckhwd", IX86_BUILTIN_PUNPCKHWD
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_V4HI
},
27018 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_punpckhdq
, "__builtin_ia32_punpckhdq", IX86_BUILTIN_PUNPCKHDQ
, UNKNOWN
, (int) V2SI_FTYPE_V2SI_V2SI
},
27019 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_punpcklbw
, "__builtin_ia32_punpcklbw", IX86_BUILTIN_PUNPCKLBW
, UNKNOWN
, (int) V8QI_FTYPE_V8QI_V8QI
},
27020 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_punpcklwd
, "__builtin_ia32_punpcklwd", IX86_BUILTIN_PUNPCKLWD
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_V4HI
},
27021 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_punpckldq
, "__builtin_ia32_punpckldq", IX86_BUILTIN_PUNPCKLDQ
, UNKNOWN
, (int) V2SI_FTYPE_V2SI_V2SI
},
27023 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_packsswb
, "__builtin_ia32_packsswb", IX86_BUILTIN_PACKSSWB
, UNKNOWN
, (int) V8QI_FTYPE_V4HI_V4HI
},
27024 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_packssdw
, "__builtin_ia32_packssdw", IX86_BUILTIN_PACKSSDW
, UNKNOWN
, (int) V4HI_FTYPE_V2SI_V2SI
},
27025 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_packuswb
, "__builtin_ia32_packuswb", IX86_BUILTIN_PACKUSWB
, UNKNOWN
, (int) V8QI_FTYPE_V4HI_V4HI
},
27027 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_pmaddwd
, "__builtin_ia32_pmaddwd", IX86_BUILTIN_PMADDWD
, UNKNOWN
, (int) V2SI_FTYPE_V4HI_V4HI
},
27029 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_ashlv4hi3
, "__builtin_ia32_psllwi", IX86_BUILTIN_PSLLWI
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_SI_COUNT
},
27030 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_ashlv2si3
, "__builtin_ia32_pslldi", IX86_BUILTIN_PSLLDI
, UNKNOWN
, (int) V2SI_FTYPE_V2SI_SI_COUNT
},
27031 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_ashlv1di3
, "__builtin_ia32_psllqi", IX86_BUILTIN_PSLLQI
, UNKNOWN
, (int) V1DI_FTYPE_V1DI_SI_COUNT
},
27032 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_ashlv4hi3
, "__builtin_ia32_psllw", IX86_BUILTIN_PSLLW
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_V4HI_COUNT
},
27033 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_ashlv2si3
, "__builtin_ia32_pslld", IX86_BUILTIN_PSLLD
, UNKNOWN
, (int) V2SI_FTYPE_V2SI_V2SI_COUNT
},
27034 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_ashlv1di3
, "__builtin_ia32_psllq", IX86_BUILTIN_PSLLQ
, UNKNOWN
, (int) V1DI_FTYPE_V1DI_V1DI_COUNT
},
27036 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_lshrv4hi3
, "__builtin_ia32_psrlwi", IX86_BUILTIN_PSRLWI
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_SI_COUNT
},
27037 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_lshrv2si3
, "__builtin_ia32_psrldi", IX86_BUILTIN_PSRLDI
, UNKNOWN
, (int) V2SI_FTYPE_V2SI_SI_COUNT
},
27038 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_lshrv1di3
, "__builtin_ia32_psrlqi", IX86_BUILTIN_PSRLQI
, UNKNOWN
, (int) V1DI_FTYPE_V1DI_SI_COUNT
},
27039 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_lshrv4hi3
, "__builtin_ia32_psrlw", IX86_BUILTIN_PSRLW
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_V4HI_COUNT
},
27040 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_lshrv2si3
, "__builtin_ia32_psrld", IX86_BUILTIN_PSRLD
, UNKNOWN
, (int) V2SI_FTYPE_V2SI_V2SI_COUNT
},
27041 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_lshrv1di3
, "__builtin_ia32_psrlq", IX86_BUILTIN_PSRLQ
, UNKNOWN
, (int) V1DI_FTYPE_V1DI_V1DI_COUNT
},
27043 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_ashrv4hi3
, "__builtin_ia32_psrawi", IX86_BUILTIN_PSRAWI
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_SI_COUNT
},
27044 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_ashrv2si3
, "__builtin_ia32_psradi", IX86_BUILTIN_PSRADI
, UNKNOWN
, (int) V2SI_FTYPE_V2SI_SI_COUNT
},
27045 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_ashrv4hi3
, "__builtin_ia32_psraw", IX86_BUILTIN_PSRAW
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_V4HI_COUNT
},
27046 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_ashrv2si3
, "__builtin_ia32_psrad", IX86_BUILTIN_PSRAD
, UNKNOWN
, (int) V2SI_FTYPE_V2SI_V2SI_COUNT
},
27049 { OPTION_MASK_ISA_3DNOW
, CODE_FOR_mmx_pf2id
, "__builtin_ia32_pf2id", IX86_BUILTIN_PF2ID
, UNKNOWN
, (int) V2SI_FTYPE_V2SF
},
27050 { OPTION_MASK_ISA_3DNOW
, CODE_FOR_mmx_floatv2si2
, "__builtin_ia32_pi2fd", IX86_BUILTIN_PI2FD
, UNKNOWN
, (int) V2SF_FTYPE_V2SI
},
27051 { OPTION_MASK_ISA_3DNOW
, CODE_FOR_mmx_rcpv2sf2
, "__builtin_ia32_pfrcp", IX86_BUILTIN_PFRCP
, UNKNOWN
, (int) V2SF_FTYPE_V2SF
},
27052 { OPTION_MASK_ISA_3DNOW
, CODE_FOR_mmx_rsqrtv2sf2
, "__builtin_ia32_pfrsqrt", IX86_BUILTIN_PFRSQRT
, UNKNOWN
, (int) V2SF_FTYPE_V2SF
},
27054 { OPTION_MASK_ISA_3DNOW
, CODE_FOR_mmx_uavgv8qi3
, "__builtin_ia32_pavgusb", IX86_BUILTIN_PAVGUSB
, UNKNOWN
, (int) V8QI_FTYPE_V8QI_V8QI
},
27055 { OPTION_MASK_ISA_3DNOW
, CODE_FOR_mmx_haddv2sf3
, "__builtin_ia32_pfacc", IX86_BUILTIN_PFACC
, UNKNOWN
, (int) V2SF_FTYPE_V2SF_V2SF
},
27056 { OPTION_MASK_ISA_3DNOW
, CODE_FOR_mmx_addv2sf3
, "__builtin_ia32_pfadd", IX86_BUILTIN_PFADD
, UNKNOWN
, (int) V2SF_FTYPE_V2SF_V2SF
},
27057 { OPTION_MASK_ISA_3DNOW
, CODE_FOR_mmx_eqv2sf3
, "__builtin_ia32_pfcmpeq", IX86_BUILTIN_PFCMPEQ
, UNKNOWN
, (int) V2SI_FTYPE_V2SF_V2SF
},
27058 { OPTION_MASK_ISA_3DNOW
, CODE_FOR_mmx_gev2sf3
, "__builtin_ia32_pfcmpge", IX86_BUILTIN_PFCMPGE
, UNKNOWN
, (int) V2SI_FTYPE_V2SF_V2SF
},
27059 { OPTION_MASK_ISA_3DNOW
, CODE_FOR_mmx_gtv2sf3
, "__builtin_ia32_pfcmpgt", IX86_BUILTIN_PFCMPGT
, UNKNOWN
, (int) V2SI_FTYPE_V2SF_V2SF
},
27060 { OPTION_MASK_ISA_3DNOW
, CODE_FOR_mmx_smaxv2sf3
, "__builtin_ia32_pfmax", IX86_BUILTIN_PFMAX
, UNKNOWN
, (int) V2SF_FTYPE_V2SF_V2SF
},
27061 { OPTION_MASK_ISA_3DNOW
, CODE_FOR_mmx_sminv2sf3
, "__builtin_ia32_pfmin", IX86_BUILTIN_PFMIN
, UNKNOWN
, (int) V2SF_FTYPE_V2SF_V2SF
},
27062 { OPTION_MASK_ISA_3DNOW
, CODE_FOR_mmx_mulv2sf3
, "__builtin_ia32_pfmul", IX86_BUILTIN_PFMUL
, UNKNOWN
, (int) V2SF_FTYPE_V2SF_V2SF
},
27063 { OPTION_MASK_ISA_3DNOW
, CODE_FOR_mmx_rcpit1v2sf3
, "__builtin_ia32_pfrcpit1", IX86_BUILTIN_PFRCPIT1
, UNKNOWN
, (int) V2SF_FTYPE_V2SF_V2SF
},
27064 { OPTION_MASK_ISA_3DNOW
, CODE_FOR_mmx_rcpit2v2sf3
, "__builtin_ia32_pfrcpit2", IX86_BUILTIN_PFRCPIT2
, UNKNOWN
, (int) V2SF_FTYPE_V2SF_V2SF
},
27065 { OPTION_MASK_ISA_3DNOW
, CODE_FOR_mmx_rsqit1v2sf3
, "__builtin_ia32_pfrsqit1", IX86_BUILTIN_PFRSQIT1
, UNKNOWN
, (int) V2SF_FTYPE_V2SF_V2SF
},
27066 { OPTION_MASK_ISA_3DNOW
, CODE_FOR_mmx_subv2sf3
, "__builtin_ia32_pfsub", IX86_BUILTIN_PFSUB
, UNKNOWN
, (int) V2SF_FTYPE_V2SF_V2SF
},
27067 { OPTION_MASK_ISA_3DNOW
, CODE_FOR_mmx_subrv2sf3
, "__builtin_ia32_pfsubr", IX86_BUILTIN_PFSUBR
, UNKNOWN
, (int) V2SF_FTYPE_V2SF_V2SF
},
27068 { OPTION_MASK_ISA_3DNOW
, CODE_FOR_mmx_pmulhrwv4hi3
, "__builtin_ia32_pmulhrw", IX86_BUILTIN_PMULHRW
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_V4HI
},
27071 { OPTION_MASK_ISA_3DNOW_A
, CODE_FOR_mmx_pf2iw
, "__builtin_ia32_pf2iw", IX86_BUILTIN_PF2IW
, UNKNOWN
, (int) V2SI_FTYPE_V2SF
},
27072 { OPTION_MASK_ISA_3DNOW_A
, CODE_FOR_mmx_pi2fw
, "__builtin_ia32_pi2fw", IX86_BUILTIN_PI2FW
, UNKNOWN
, (int) V2SF_FTYPE_V2SI
},
27073 { OPTION_MASK_ISA_3DNOW_A
, CODE_FOR_mmx_pswapdv2si2
, "__builtin_ia32_pswapdsi", IX86_BUILTIN_PSWAPDSI
, UNKNOWN
, (int) V2SI_FTYPE_V2SI
},
27074 { OPTION_MASK_ISA_3DNOW_A
, CODE_FOR_mmx_pswapdv2sf2
, "__builtin_ia32_pswapdsf", IX86_BUILTIN_PSWAPDSF
, UNKNOWN
, (int) V2SF_FTYPE_V2SF
},
27075 { OPTION_MASK_ISA_3DNOW_A
, CODE_FOR_mmx_hsubv2sf3
, "__builtin_ia32_pfnacc", IX86_BUILTIN_PFNACC
, UNKNOWN
, (int) V2SF_FTYPE_V2SF_V2SF
},
27076 { OPTION_MASK_ISA_3DNOW_A
, CODE_FOR_mmx_addsubv2sf3
, "__builtin_ia32_pfpnacc", IX86_BUILTIN_PFPNACC
, UNKNOWN
, (int) V2SF_FTYPE_V2SF_V2SF
},
27079 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_movmskps
, "__builtin_ia32_movmskps", IX86_BUILTIN_MOVMSKPS
, UNKNOWN
, (int) INT_FTYPE_V4SF
},
27080 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_sqrtv4sf2
, "__builtin_ia32_sqrtps", IX86_BUILTIN_SQRTPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF
},
27081 { OPTION_MASK_ISA_SSE
, CODE_FOR_sqrtv4sf2
, "__builtin_ia32_sqrtps_nr", IX86_BUILTIN_SQRTPS_NR
, UNKNOWN
, (int) V4SF_FTYPE_V4SF
},
27082 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_rsqrtv4sf2
, "__builtin_ia32_rsqrtps", IX86_BUILTIN_RSQRTPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF
},
27083 { OPTION_MASK_ISA_SSE
, CODE_FOR_rsqrtv4sf2
, "__builtin_ia32_rsqrtps_nr", IX86_BUILTIN_RSQRTPS_NR
, UNKNOWN
, (int) V4SF_FTYPE_V4SF
},
27084 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_rcpv4sf2
, "__builtin_ia32_rcpps", IX86_BUILTIN_RCPPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF
},
27085 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_cvtps2pi
, "__builtin_ia32_cvtps2pi", IX86_BUILTIN_CVTPS2PI
, UNKNOWN
, (int) V2SI_FTYPE_V4SF
},
27086 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_cvtss2si
, "__builtin_ia32_cvtss2si", IX86_BUILTIN_CVTSS2SI
, UNKNOWN
, (int) INT_FTYPE_V4SF
},
27087 { OPTION_MASK_ISA_SSE
| OPTION_MASK_ISA_64BIT
, CODE_FOR_sse_cvtss2siq
, "__builtin_ia32_cvtss2si64", IX86_BUILTIN_CVTSS2SI64
, UNKNOWN
, (int) INT64_FTYPE_V4SF
},
27088 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_cvttps2pi
, "__builtin_ia32_cvttps2pi", IX86_BUILTIN_CVTTPS2PI
, UNKNOWN
, (int) V2SI_FTYPE_V4SF
},
27089 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_cvttss2si
, "__builtin_ia32_cvttss2si", IX86_BUILTIN_CVTTSS2SI
, UNKNOWN
, (int) INT_FTYPE_V4SF
},
27090 { OPTION_MASK_ISA_SSE
| OPTION_MASK_ISA_64BIT
, CODE_FOR_sse_cvttss2siq
, "__builtin_ia32_cvttss2si64", IX86_BUILTIN_CVTTSS2SI64
, UNKNOWN
, (int) INT64_FTYPE_V4SF
},
27092 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_shufps
, "__builtin_ia32_shufps", IX86_BUILTIN_SHUFPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF_INT
},
27094 { OPTION_MASK_ISA_SSE
, CODE_FOR_addv4sf3
, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF
},
27095 { OPTION_MASK_ISA_SSE
, CODE_FOR_subv4sf3
, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF
},
27096 { OPTION_MASK_ISA_SSE
, CODE_FOR_mulv4sf3
, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF
},
27097 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_divv4sf3
, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF
},
27098 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_vmaddv4sf3
, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF
},
27099 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_vmsubv4sf3
, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF
},
27100 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_vmmulv4sf3
, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF
},
27101 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_vmdivv4sf3
, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF
},
27103 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_maskcmpv4sf3
, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS
, EQ
, (int) V4SF_FTYPE_V4SF_V4SF
},
27104 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_maskcmpv4sf3
, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS
, LT
, (int) V4SF_FTYPE_V4SF_V4SF
},
27105 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_maskcmpv4sf3
, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS
, LE
, (int) V4SF_FTYPE_V4SF_V4SF
},
27106 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_maskcmpv4sf3
, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS
, LT
, (int) V4SF_FTYPE_V4SF_V4SF_SWAP
},
27107 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_maskcmpv4sf3
, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS
, LE
, (int) V4SF_FTYPE_V4SF_V4SF_SWAP
},
27108 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_maskcmpv4sf3
, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS
, UNORDERED
, (int) V4SF_FTYPE_V4SF_V4SF
},
27109 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_maskcmpv4sf3
, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS
, NE
, (int) V4SF_FTYPE_V4SF_V4SF
},
27110 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_maskcmpv4sf3
, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS
, UNGE
, (int) V4SF_FTYPE_V4SF_V4SF
},
27111 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_maskcmpv4sf3
, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS
, UNGT
, (int) V4SF_FTYPE_V4SF_V4SF
},
27112 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_maskcmpv4sf3
, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS
, UNGE
, (int) V4SF_FTYPE_V4SF_V4SF_SWAP
},
27113 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_maskcmpv4sf3
, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS
, UNGT
, (int) V4SF_FTYPE_V4SF_V4SF_SWAP
},
27114 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_maskcmpv4sf3
, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS
, ORDERED
, (int) V4SF_FTYPE_V4SF_V4SF
},
27115 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_vmmaskcmpv4sf3
, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS
, EQ
, (int) V4SF_FTYPE_V4SF_V4SF
},
27116 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_vmmaskcmpv4sf3
, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS
, LT
, (int) V4SF_FTYPE_V4SF_V4SF
},
27117 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_vmmaskcmpv4sf3
, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS
, LE
, (int) V4SF_FTYPE_V4SF_V4SF
},
27118 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_vmmaskcmpv4sf3
, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS
, UNORDERED
, (int) V4SF_FTYPE_V4SF_V4SF
},
27119 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_vmmaskcmpv4sf3
, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS
, NE
, (int) V4SF_FTYPE_V4SF_V4SF
},
27120 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_vmmaskcmpv4sf3
, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS
, UNGE
, (int) V4SF_FTYPE_V4SF_V4SF
},
27121 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_vmmaskcmpv4sf3
, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS
, UNGT
, (int) V4SF_FTYPE_V4SF_V4SF
},
27122 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_vmmaskcmpv4sf3
, "__builtin_ia32_cmpngtss", IX86_BUILTIN_CMPNGTSS
, UNGE
, (int) V4SF_FTYPE_V4SF_V4SF_SWAP
},
27123 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_vmmaskcmpv4sf3
, "__builtin_ia32_cmpngess", IX86_BUILTIN_CMPNGESS
, UNGT
, (int) V4SF_FTYPE_V4SF_V4SF_SWAP
},
27124 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_vmmaskcmpv4sf3
, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS
, ORDERED
, (int) V4SF_FTYPE_V4SF_V4SF
},
27126 { OPTION_MASK_ISA_SSE
, CODE_FOR_sminv4sf3
, "__builtin_ia32_minps", IX86_BUILTIN_MINPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF
},
27127 { OPTION_MASK_ISA_SSE
, CODE_FOR_smaxv4sf3
, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF
},
27128 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_vmsminv4sf3
, "__builtin_ia32_minss", IX86_BUILTIN_MINSS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF
},
27129 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_vmsmaxv4sf3
, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF
},
27131 { OPTION_MASK_ISA_SSE
, CODE_FOR_andv4sf3
, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF
},
27132 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_andnotv4sf3
, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF
},
27133 { OPTION_MASK_ISA_SSE
, CODE_FOR_iorv4sf3
, "__builtin_ia32_orps", IX86_BUILTIN_ORPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF
},
27134 { OPTION_MASK_ISA_SSE
, CODE_FOR_xorv4sf3
, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF
},
27136 { OPTION_MASK_ISA_SSE
, CODE_FOR_copysignv4sf3
, "__builtin_ia32_copysignps", IX86_BUILTIN_CPYSGNPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF
},
27138 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_movss
, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF
},
27139 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_movhlps_exp
, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF
},
27140 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_movlhps_exp
, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF
},
27141 { OPTION_MASK_ISA_SSE
, CODE_FOR_vec_interleave_highv4sf
, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF
},
27142 { OPTION_MASK_ISA_SSE
, CODE_FOR_vec_interleave_lowv4sf
, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF
},
27144 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_cvtpi2ps
, "__builtin_ia32_cvtpi2ps", IX86_BUILTIN_CVTPI2PS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V2SI
},
27145 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_cvtsi2ss
, "__builtin_ia32_cvtsi2ss", IX86_BUILTIN_CVTSI2SS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_SI
},
27146 { OPTION_MASK_ISA_SSE
| OPTION_MASK_ISA_64BIT
, CODE_FOR_sse_cvtsi2ssq
, "__builtin_ia32_cvtsi642ss", IX86_BUILTIN_CVTSI642SS
, UNKNOWN
, V4SF_FTYPE_V4SF_DI
},
27148 { OPTION_MASK_ISA_SSE
, CODE_FOR_rsqrtsf2
, "__builtin_ia32_rsqrtf", IX86_BUILTIN_RSQRTF
, UNKNOWN
, (int) FLOAT_FTYPE_FLOAT
},
27150 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_vmsqrtv4sf2
, "__builtin_ia32_sqrtss", IX86_BUILTIN_SQRTSS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_VEC_MERGE
},
27151 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_vmrsqrtv4sf2
, "__builtin_ia32_rsqrtss", IX86_BUILTIN_RSQRTSS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_VEC_MERGE
},
27152 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_vmrcpv4sf2
, "__builtin_ia32_rcpss", IX86_BUILTIN_RCPSS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_VEC_MERGE
},
27154 { OPTION_MASK_ISA_SSE
, CODE_FOR_abstf2
, 0, IX86_BUILTIN_FABSQ
, UNKNOWN
, (int) FLOAT128_FTYPE_FLOAT128
},
27155 { OPTION_MASK_ISA_SSE
, CODE_FOR_copysigntf3
, 0, IX86_BUILTIN_COPYSIGNQ
, UNKNOWN
, (int) FLOAT128_FTYPE_FLOAT128_FLOAT128
},
27157 /* SSE MMX or 3Dnow!A */
27158 { OPTION_MASK_ISA_SSE
| OPTION_MASK_ISA_3DNOW_A
, CODE_FOR_mmx_uavgv8qi3
, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB
, UNKNOWN
, (int) V8QI_FTYPE_V8QI_V8QI
},
27159 { OPTION_MASK_ISA_SSE
| OPTION_MASK_ISA_3DNOW_A
, CODE_FOR_mmx_uavgv4hi3
, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_V4HI
},
27160 { OPTION_MASK_ISA_SSE
| OPTION_MASK_ISA_3DNOW_A
, CODE_FOR_mmx_umulv4hi3_highpart
, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_V4HI
},
27162 { OPTION_MASK_ISA_SSE
| OPTION_MASK_ISA_3DNOW_A
, CODE_FOR_mmx_umaxv8qi3
, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB
, UNKNOWN
, (int) V8QI_FTYPE_V8QI_V8QI
},
27163 { OPTION_MASK_ISA_SSE
| OPTION_MASK_ISA_3DNOW_A
, CODE_FOR_mmx_smaxv4hi3
, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_V4HI
},
27164 { OPTION_MASK_ISA_SSE
| OPTION_MASK_ISA_3DNOW_A
, CODE_FOR_mmx_uminv8qi3
, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB
, UNKNOWN
, (int) V8QI_FTYPE_V8QI_V8QI
},
27165 { OPTION_MASK_ISA_SSE
| OPTION_MASK_ISA_3DNOW_A
, CODE_FOR_mmx_sminv4hi3
, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_V4HI
},
27167 { OPTION_MASK_ISA_SSE
| OPTION_MASK_ISA_3DNOW_A
, CODE_FOR_mmx_psadbw
, "__builtin_ia32_psadbw", IX86_BUILTIN_PSADBW
, UNKNOWN
, (int) V1DI_FTYPE_V8QI_V8QI
},
27168 { OPTION_MASK_ISA_SSE
| OPTION_MASK_ISA_3DNOW_A
, CODE_FOR_mmx_pmovmskb
, "__builtin_ia32_pmovmskb", IX86_BUILTIN_PMOVMSKB
, UNKNOWN
, (int) INT_FTYPE_V8QI
},
27170 { OPTION_MASK_ISA_SSE
| OPTION_MASK_ISA_3DNOW_A
, CODE_FOR_mmx_pshufw
, "__builtin_ia32_pshufw", IX86_BUILTIN_PSHUFW
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_INT
},
27173 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_shufpd
, "__builtin_ia32_shufpd", IX86_BUILTIN_SHUFPD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF_INT
},
27175 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_movmskpd
, "__builtin_ia32_movmskpd", IX86_BUILTIN_MOVMSKPD
, UNKNOWN
, (int) INT_FTYPE_V2DF
},
27176 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_pmovmskb
, "__builtin_ia32_pmovmskb128", IX86_BUILTIN_PMOVMSKB128
, UNKNOWN
, (int) INT_FTYPE_V16QI
},
27177 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sqrtv2df2
, "__builtin_ia32_sqrtpd", IX86_BUILTIN_SQRTPD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF
},
27178 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_cvtdq2pd
, "__builtin_ia32_cvtdq2pd", IX86_BUILTIN_CVTDQ2PD
, UNKNOWN
, (int) V2DF_FTYPE_V4SI
},
27179 { OPTION_MASK_ISA_SSE2
, CODE_FOR_floatv4siv4sf2
, "__builtin_ia32_cvtdq2ps", IX86_BUILTIN_CVTDQ2PS
, UNKNOWN
, (int) V4SF_FTYPE_V4SI
},
27181 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_cvtpd2dq
, "__builtin_ia32_cvtpd2dq", IX86_BUILTIN_CVTPD2DQ
, UNKNOWN
, (int) V4SI_FTYPE_V2DF
},
27182 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_cvtpd2pi
, "__builtin_ia32_cvtpd2pi", IX86_BUILTIN_CVTPD2PI
, UNKNOWN
, (int) V2SI_FTYPE_V2DF
},
27183 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_cvtpd2ps
, "__builtin_ia32_cvtpd2ps", IX86_BUILTIN_CVTPD2PS
, UNKNOWN
, (int) V4SF_FTYPE_V2DF
},
27184 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_cvttpd2dq
, "__builtin_ia32_cvttpd2dq", IX86_BUILTIN_CVTTPD2DQ
, UNKNOWN
, (int) V4SI_FTYPE_V2DF
},
27185 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_cvttpd2pi
, "__builtin_ia32_cvttpd2pi", IX86_BUILTIN_CVTTPD2PI
, UNKNOWN
, (int) V2SI_FTYPE_V2DF
},
27187 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_cvtpi2pd
, "__builtin_ia32_cvtpi2pd", IX86_BUILTIN_CVTPI2PD
, UNKNOWN
, (int) V2DF_FTYPE_V2SI
},
27189 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_cvtsd2si
, "__builtin_ia32_cvtsd2si", IX86_BUILTIN_CVTSD2SI
, UNKNOWN
, (int) INT_FTYPE_V2DF
},
27190 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_cvttsd2si
, "__builtin_ia32_cvttsd2si", IX86_BUILTIN_CVTTSD2SI
, UNKNOWN
, (int) INT_FTYPE_V2DF
},
27191 { OPTION_MASK_ISA_SSE2
| OPTION_MASK_ISA_64BIT
, CODE_FOR_sse2_cvtsd2siq
, "__builtin_ia32_cvtsd2si64", IX86_BUILTIN_CVTSD2SI64
, UNKNOWN
, (int) INT64_FTYPE_V2DF
},
27192 { OPTION_MASK_ISA_SSE2
| OPTION_MASK_ISA_64BIT
, CODE_FOR_sse2_cvttsd2siq
, "__builtin_ia32_cvttsd2si64", IX86_BUILTIN_CVTTSD2SI64
, UNKNOWN
, (int) INT64_FTYPE_V2DF
},
27194 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_cvtps2dq
, "__builtin_ia32_cvtps2dq", IX86_BUILTIN_CVTPS2DQ
, UNKNOWN
, (int) V4SI_FTYPE_V4SF
},
27195 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_cvtps2pd
, "__builtin_ia32_cvtps2pd", IX86_BUILTIN_CVTPS2PD
, UNKNOWN
, (int) V2DF_FTYPE_V4SF
},
27196 { OPTION_MASK_ISA_SSE2
, CODE_FOR_fix_truncv4sfv4si2
, "__builtin_ia32_cvttps2dq", IX86_BUILTIN_CVTTPS2DQ
, UNKNOWN
, (int) V4SI_FTYPE_V4SF
},
27198 { OPTION_MASK_ISA_SSE2
, CODE_FOR_addv2df3
, "__builtin_ia32_addpd", IX86_BUILTIN_ADDPD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF
},
27199 { OPTION_MASK_ISA_SSE2
, CODE_FOR_subv2df3
, "__builtin_ia32_subpd", IX86_BUILTIN_SUBPD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF
},
27200 { OPTION_MASK_ISA_SSE2
, CODE_FOR_mulv2df3
, "__builtin_ia32_mulpd", IX86_BUILTIN_MULPD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF
},
27201 { OPTION_MASK_ISA_SSE2
, CODE_FOR_divv2df3
, "__builtin_ia32_divpd", IX86_BUILTIN_DIVPD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF
},
27202 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_vmaddv2df3
, "__builtin_ia32_addsd", IX86_BUILTIN_ADDSD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF
},
27203 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_vmsubv2df3
, "__builtin_ia32_subsd", IX86_BUILTIN_SUBSD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF
},
27204 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_vmmulv2df3
, "__builtin_ia32_mulsd", IX86_BUILTIN_MULSD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF
},
27205 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_vmdivv2df3
, "__builtin_ia32_divsd", IX86_BUILTIN_DIVSD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF
},
27207 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_maskcmpv2df3
, "__builtin_ia32_cmpeqpd", IX86_BUILTIN_CMPEQPD
, EQ
, (int) V2DF_FTYPE_V2DF_V2DF
},
27208 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_maskcmpv2df3
, "__builtin_ia32_cmpltpd", IX86_BUILTIN_CMPLTPD
, LT
, (int) V2DF_FTYPE_V2DF_V2DF
},
27209 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_maskcmpv2df3
, "__builtin_ia32_cmplepd", IX86_BUILTIN_CMPLEPD
, LE
, (int) V2DF_FTYPE_V2DF_V2DF
},
27210 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_maskcmpv2df3
, "__builtin_ia32_cmpgtpd", IX86_BUILTIN_CMPGTPD
, LT
, (int) V2DF_FTYPE_V2DF_V2DF_SWAP
},
27211 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_maskcmpv2df3
, "__builtin_ia32_cmpgepd", IX86_BUILTIN_CMPGEPD
, LE
, (int) V2DF_FTYPE_V2DF_V2DF_SWAP
},
27212 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_maskcmpv2df3
, "__builtin_ia32_cmpunordpd", IX86_BUILTIN_CMPUNORDPD
, UNORDERED
, (int) V2DF_FTYPE_V2DF_V2DF
},
27213 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_maskcmpv2df3
, "__builtin_ia32_cmpneqpd", IX86_BUILTIN_CMPNEQPD
, NE
, (int) V2DF_FTYPE_V2DF_V2DF
},
27214 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_maskcmpv2df3
, "__builtin_ia32_cmpnltpd", IX86_BUILTIN_CMPNLTPD
, UNGE
, (int) V2DF_FTYPE_V2DF_V2DF
},
27215 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_maskcmpv2df3
, "__builtin_ia32_cmpnlepd", IX86_BUILTIN_CMPNLEPD
, UNGT
, (int) V2DF_FTYPE_V2DF_V2DF
},
27216 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_maskcmpv2df3
, "__builtin_ia32_cmpngtpd", IX86_BUILTIN_CMPNGTPD
, UNGE
, (int) V2DF_FTYPE_V2DF_V2DF_SWAP
},
27217 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_maskcmpv2df3
, "__builtin_ia32_cmpngepd", IX86_BUILTIN_CMPNGEPD
, UNGT
, (int) V2DF_FTYPE_V2DF_V2DF_SWAP
},
27218 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_maskcmpv2df3
, "__builtin_ia32_cmpordpd", IX86_BUILTIN_CMPORDPD
, ORDERED
, (int) V2DF_FTYPE_V2DF_V2DF
},
27219 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_vmmaskcmpv2df3
, "__builtin_ia32_cmpeqsd", IX86_BUILTIN_CMPEQSD
, EQ
, (int) V2DF_FTYPE_V2DF_V2DF
},
27220 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_vmmaskcmpv2df3
, "__builtin_ia32_cmpltsd", IX86_BUILTIN_CMPLTSD
, LT
, (int) V2DF_FTYPE_V2DF_V2DF
},
27221 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_vmmaskcmpv2df3
, "__builtin_ia32_cmplesd", IX86_BUILTIN_CMPLESD
, LE
, (int) V2DF_FTYPE_V2DF_V2DF
},
27222 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_vmmaskcmpv2df3
, "__builtin_ia32_cmpunordsd", IX86_BUILTIN_CMPUNORDSD
, UNORDERED
, (int) V2DF_FTYPE_V2DF_V2DF
},
27223 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_vmmaskcmpv2df3
, "__builtin_ia32_cmpneqsd", IX86_BUILTIN_CMPNEQSD
, NE
, (int) V2DF_FTYPE_V2DF_V2DF
},
27224 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_vmmaskcmpv2df3
, "__builtin_ia32_cmpnltsd", IX86_BUILTIN_CMPNLTSD
, UNGE
, (int) V2DF_FTYPE_V2DF_V2DF
},
27225 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_vmmaskcmpv2df3
, "__builtin_ia32_cmpnlesd", IX86_BUILTIN_CMPNLESD
, UNGT
, (int) V2DF_FTYPE_V2DF_V2DF
},
27226 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_vmmaskcmpv2df3
, "__builtin_ia32_cmpordsd", IX86_BUILTIN_CMPORDSD
, ORDERED
, (int) V2DF_FTYPE_V2DF_V2DF
},
27228 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sminv2df3
, "__builtin_ia32_minpd", IX86_BUILTIN_MINPD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF
},
27229 { OPTION_MASK_ISA_SSE2
, CODE_FOR_smaxv2df3
, "__builtin_ia32_maxpd", IX86_BUILTIN_MAXPD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF
},
27230 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_vmsminv2df3
, "__builtin_ia32_minsd", IX86_BUILTIN_MINSD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF
},
27231 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_vmsmaxv2df3
, "__builtin_ia32_maxsd", IX86_BUILTIN_MAXSD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF
},
27233 { OPTION_MASK_ISA_SSE2
, CODE_FOR_andv2df3
, "__builtin_ia32_andpd", IX86_BUILTIN_ANDPD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF
},
27234 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_andnotv2df3
, "__builtin_ia32_andnpd", IX86_BUILTIN_ANDNPD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF
},
27235 { OPTION_MASK_ISA_SSE2
, CODE_FOR_iorv2df3
, "__builtin_ia32_orpd", IX86_BUILTIN_ORPD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF
},
27236 { OPTION_MASK_ISA_SSE2
, CODE_FOR_xorv2df3
, "__builtin_ia32_xorpd", IX86_BUILTIN_XORPD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF
},
27238 { OPTION_MASK_ISA_SSE2
, CODE_FOR_copysignv2df3
, "__builtin_ia32_copysignpd", IX86_BUILTIN_CPYSGNPD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF
},
27240 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_movsd
, "__builtin_ia32_movsd", IX86_BUILTIN_MOVSD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF
},
27241 { OPTION_MASK_ISA_SSE2
, CODE_FOR_vec_interleave_highv2df
, "__builtin_ia32_unpckhpd", IX86_BUILTIN_UNPCKHPD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF
},
27242 { OPTION_MASK_ISA_SSE2
, CODE_FOR_vec_interleave_lowv2df
, "__builtin_ia32_unpcklpd", IX86_BUILTIN_UNPCKLPD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF
},
27244 { OPTION_MASK_ISA_SSE2
, CODE_FOR_vec_pack_sfix_v2df
, "__builtin_ia32_vec_pack_sfix", IX86_BUILTIN_VEC_PACK_SFIX
, UNKNOWN
, (int) V4SI_FTYPE_V2DF_V2DF
},
27246 { OPTION_MASK_ISA_SSE2
, CODE_FOR_addv16qi3
, "__builtin_ia32_paddb128", IX86_BUILTIN_PADDB128
, UNKNOWN
, (int) V16QI_FTYPE_V16QI_V16QI
},
27247 { OPTION_MASK_ISA_SSE2
, CODE_FOR_addv8hi3
, "__builtin_ia32_paddw128", IX86_BUILTIN_PADDW128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_V8HI
},
27248 { OPTION_MASK_ISA_SSE2
, CODE_FOR_addv4si3
, "__builtin_ia32_paddd128", IX86_BUILTIN_PADDD128
, UNKNOWN
, (int) V4SI_FTYPE_V4SI_V4SI
},
27249 { OPTION_MASK_ISA_SSE2
, CODE_FOR_addv2di3
, "__builtin_ia32_paddq128", IX86_BUILTIN_PADDQ128
, UNKNOWN
, (int) V2DI_FTYPE_V2DI_V2DI
},
27250 { OPTION_MASK_ISA_SSE2
, CODE_FOR_subv16qi3
, "__builtin_ia32_psubb128", IX86_BUILTIN_PSUBB128
, UNKNOWN
, (int) V16QI_FTYPE_V16QI_V16QI
},
27251 { OPTION_MASK_ISA_SSE2
, CODE_FOR_subv8hi3
, "__builtin_ia32_psubw128", IX86_BUILTIN_PSUBW128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_V8HI
},
27252 { OPTION_MASK_ISA_SSE2
, CODE_FOR_subv4si3
, "__builtin_ia32_psubd128", IX86_BUILTIN_PSUBD128
, UNKNOWN
, (int) V4SI_FTYPE_V4SI_V4SI
},
27253 { OPTION_MASK_ISA_SSE2
, CODE_FOR_subv2di3
, "__builtin_ia32_psubq128", IX86_BUILTIN_PSUBQ128
, UNKNOWN
, (int) V2DI_FTYPE_V2DI_V2DI
},
27255 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_ssaddv16qi3
, "__builtin_ia32_paddsb128", IX86_BUILTIN_PADDSB128
, UNKNOWN
, (int) V16QI_FTYPE_V16QI_V16QI
},
27256 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_ssaddv8hi3
, "__builtin_ia32_paddsw128", IX86_BUILTIN_PADDSW128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_V8HI
},
27257 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_sssubv16qi3
, "__builtin_ia32_psubsb128", IX86_BUILTIN_PSUBSB128
, UNKNOWN
, (int) V16QI_FTYPE_V16QI_V16QI
},
27258 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_sssubv8hi3
, "__builtin_ia32_psubsw128", IX86_BUILTIN_PSUBSW128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_V8HI
},
27259 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_usaddv16qi3
, "__builtin_ia32_paddusb128", IX86_BUILTIN_PADDUSB128
, UNKNOWN
, (int) V16QI_FTYPE_V16QI_V16QI
},
27260 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_usaddv8hi3
, "__builtin_ia32_paddusw128", IX86_BUILTIN_PADDUSW128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_V8HI
},
27261 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_ussubv16qi3
, "__builtin_ia32_psubusb128", IX86_BUILTIN_PSUBUSB128
, UNKNOWN
, (int) V16QI_FTYPE_V16QI_V16QI
},
27262 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_ussubv8hi3
, "__builtin_ia32_psubusw128", IX86_BUILTIN_PSUBUSW128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_V8HI
},
27264 { OPTION_MASK_ISA_SSE2
, CODE_FOR_mulv8hi3
, "__builtin_ia32_pmullw128", IX86_BUILTIN_PMULLW128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_V8HI
},
27265 { OPTION_MASK_ISA_SSE2
, CODE_FOR_smulv8hi3_highpart
, "__builtin_ia32_pmulhw128", IX86_BUILTIN_PMULHW128
, UNKNOWN
,(int) V8HI_FTYPE_V8HI_V8HI
},
27267 { OPTION_MASK_ISA_SSE2
, CODE_FOR_andv2di3
, "__builtin_ia32_pand128", IX86_BUILTIN_PAND128
, UNKNOWN
, (int) V2DI_FTYPE_V2DI_V2DI
},
27268 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_andnotv2di3
, "__builtin_ia32_pandn128", IX86_BUILTIN_PANDN128
, UNKNOWN
, (int) V2DI_FTYPE_V2DI_V2DI
},
27269 { OPTION_MASK_ISA_SSE2
, CODE_FOR_iorv2di3
, "__builtin_ia32_por128", IX86_BUILTIN_POR128
, UNKNOWN
, (int) V2DI_FTYPE_V2DI_V2DI
},
27270 { OPTION_MASK_ISA_SSE2
, CODE_FOR_xorv2di3
, "__builtin_ia32_pxor128", IX86_BUILTIN_PXOR128
, UNKNOWN
, (int) V2DI_FTYPE_V2DI_V2DI
},
27272 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_uavgv16qi3
, "__builtin_ia32_pavgb128", IX86_BUILTIN_PAVGB128
, UNKNOWN
, (int) V16QI_FTYPE_V16QI_V16QI
},
27273 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_uavgv8hi3
, "__builtin_ia32_pavgw128", IX86_BUILTIN_PAVGW128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_V8HI
},
27275 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_eqv16qi3
, "__builtin_ia32_pcmpeqb128", IX86_BUILTIN_PCMPEQB128
, UNKNOWN
, (int) V16QI_FTYPE_V16QI_V16QI
},
27276 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_eqv8hi3
, "__builtin_ia32_pcmpeqw128", IX86_BUILTIN_PCMPEQW128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_V8HI
},
27277 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_eqv4si3
, "__builtin_ia32_pcmpeqd128", IX86_BUILTIN_PCMPEQD128
, UNKNOWN
, (int) V4SI_FTYPE_V4SI_V4SI
},
27278 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_gtv16qi3
, "__builtin_ia32_pcmpgtb128", IX86_BUILTIN_PCMPGTB128
, UNKNOWN
, (int) V16QI_FTYPE_V16QI_V16QI
},
27279 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_gtv8hi3
, "__builtin_ia32_pcmpgtw128", IX86_BUILTIN_PCMPGTW128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_V8HI
},
27280 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_gtv4si3
, "__builtin_ia32_pcmpgtd128", IX86_BUILTIN_PCMPGTD128
, UNKNOWN
, (int) V4SI_FTYPE_V4SI_V4SI
},
27282 { OPTION_MASK_ISA_SSE2
, CODE_FOR_umaxv16qi3
, "__builtin_ia32_pmaxub128", IX86_BUILTIN_PMAXUB128
, UNKNOWN
, (int) V16QI_FTYPE_V16QI_V16QI
},
27283 { OPTION_MASK_ISA_SSE2
, CODE_FOR_smaxv8hi3
, "__builtin_ia32_pmaxsw128", IX86_BUILTIN_PMAXSW128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_V8HI
},
27284 { OPTION_MASK_ISA_SSE2
, CODE_FOR_uminv16qi3
, "__builtin_ia32_pminub128", IX86_BUILTIN_PMINUB128
, UNKNOWN
, (int) V16QI_FTYPE_V16QI_V16QI
},
27285 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sminv8hi3
, "__builtin_ia32_pminsw128", IX86_BUILTIN_PMINSW128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_V8HI
},
27287 { OPTION_MASK_ISA_SSE2
, CODE_FOR_vec_interleave_highv16qi
, "__builtin_ia32_punpckhbw128", IX86_BUILTIN_PUNPCKHBW128
, UNKNOWN
, (int) V16QI_FTYPE_V16QI_V16QI
},
27288 { OPTION_MASK_ISA_SSE2
, CODE_FOR_vec_interleave_highv8hi
, "__builtin_ia32_punpckhwd128", IX86_BUILTIN_PUNPCKHWD128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_V8HI
},
27289 { OPTION_MASK_ISA_SSE2
, CODE_FOR_vec_interleave_highv4si
, "__builtin_ia32_punpckhdq128", IX86_BUILTIN_PUNPCKHDQ128
, UNKNOWN
, (int) V4SI_FTYPE_V4SI_V4SI
},
27290 { OPTION_MASK_ISA_SSE2
, CODE_FOR_vec_interleave_highv2di
, "__builtin_ia32_punpckhqdq128", IX86_BUILTIN_PUNPCKHQDQ128
, UNKNOWN
, (int) V2DI_FTYPE_V2DI_V2DI
},
27291 { OPTION_MASK_ISA_SSE2
, CODE_FOR_vec_interleave_lowv16qi
, "__builtin_ia32_punpcklbw128", IX86_BUILTIN_PUNPCKLBW128
, UNKNOWN
, (int) V16QI_FTYPE_V16QI_V16QI
},
27292 { OPTION_MASK_ISA_SSE2
, CODE_FOR_vec_interleave_lowv8hi
, "__builtin_ia32_punpcklwd128", IX86_BUILTIN_PUNPCKLWD128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_V8HI
},
27293 { OPTION_MASK_ISA_SSE2
, CODE_FOR_vec_interleave_lowv4si
, "__builtin_ia32_punpckldq128", IX86_BUILTIN_PUNPCKLDQ128
, UNKNOWN
, (int) V4SI_FTYPE_V4SI_V4SI
},
27294 { OPTION_MASK_ISA_SSE2
, CODE_FOR_vec_interleave_lowv2di
, "__builtin_ia32_punpcklqdq128", IX86_BUILTIN_PUNPCKLQDQ128
, UNKNOWN
, (int) V2DI_FTYPE_V2DI_V2DI
},
27296 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_packsswb
, "__builtin_ia32_packsswb128", IX86_BUILTIN_PACKSSWB128
, UNKNOWN
, (int) V16QI_FTYPE_V8HI_V8HI
},
27297 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_packssdw
, "__builtin_ia32_packssdw128", IX86_BUILTIN_PACKSSDW128
, UNKNOWN
, (int) V8HI_FTYPE_V4SI_V4SI
},
27298 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_packuswb
, "__builtin_ia32_packuswb128", IX86_BUILTIN_PACKUSWB128
, UNKNOWN
, (int) V16QI_FTYPE_V8HI_V8HI
},
27300 { OPTION_MASK_ISA_SSE2
, CODE_FOR_umulv8hi3_highpart
, "__builtin_ia32_pmulhuw128", IX86_BUILTIN_PMULHUW128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_V8HI
},
27301 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_psadbw
, "__builtin_ia32_psadbw128", IX86_BUILTIN_PSADBW128
, UNKNOWN
, (int) V2DI_FTYPE_V16QI_V16QI
},
27303 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_umulv1siv1di3
, "__builtin_ia32_pmuludq", IX86_BUILTIN_PMULUDQ
, UNKNOWN
, (int) V1DI_FTYPE_V2SI_V2SI
},
27304 { OPTION_MASK_ISA_SSE2
, CODE_FOR_vec_widen_umult_even_v4si
, "__builtin_ia32_pmuludq128", IX86_BUILTIN_PMULUDQ128
, UNKNOWN
, (int) V2DI_FTYPE_V4SI_V4SI
},
27306 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_pmaddwd
, "__builtin_ia32_pmaddwd128", IX86_BUILTIN_PMADDWD128
, UNKNOWN
, (int) V4SI_FTYPE_V8HI_V8HI
},
27308 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_cvtsi2sd
, "__builtin_ia32_cvtsi2sd", IX86_BUILTIN_CVTSI2SD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_SI
},
27309 { OPTION_MASK_ISA_SSE2
| OPTION_MASK_ISA_64BIT
, CODE_FOR_sse2_cvtsi2sdq
, "__builtin_ia32_cvtsi642sd", IX86_BUILTIN_CVTSI642SD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_DI
},
27310 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_cvtsd2ss
, "__builtin_ia32_cvtsd2ss", IX86_BUILTIN_CVTSD2SS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V2DF
},
27311 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_cvtss2sd
, "__builtin_ia32_cvtss2sd", IX86_BUILTIN_CVTSS2SD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V4SF
},
27313 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_ashlv1ti3
, "__builtin_ia32_pslldqi128", IX86_BUILTIN_PSLLDQI128
, UNKNOWN
, (int) V2DI_FTYPE_V2DI_INT_CONVERT
},
27314 { OPTION_MASK_ISA_SSE2
, CODE_FOR_ashlv8hi3
, "__builtin_ia32_psllwi128", IX86_BUILTIN_PSLLWI128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_SI_COUNT
},
27315 { OPTION_MASK_ISA_SSE2
, CODE_FOR_ashlv4si3
, "__builtin_ia32_pslldi128", IX86_BUILTIN_PSLLDI128
, UNKNOWN
, (int) V4SI_FTYPE_V4SI_SI_COUNT
},
27316 { OPTION_MASK_ISA_SSE2
, CODE_FOR_ashlv2di3
, "__builtin_ia32_psllqi128", IX86_BUILTIN_PSLLQI128
, UNKNOWN
, (int) V2DI_FTYPE_V2DI_SI_COUNT
},
27317 { OPTION_MASK_ISA_SSE2
, CODE_FOR_ashlv8hi3
, "__builtin_ia32_psllw128", IX86_BUILTIN_PSLLW128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_V8HI_COUNT
},
27318 { OPTION_MASK_ISA_SSE2
, CODE_FOR_ashlv4si3
, "__builtin_ia32_pslld128", IX86_BUILTIN_PSLLD128
, UNKNOWN
, (int) V4SI_FTYPE_V4SI_V4SI_COUNT
},
27319 { OPTION_MASK_ISA_SSE2
, CODE_FOR_ashlv2di3
, "__builtin_ia32_psllq128", IX86_BUILTIN_PSLLQ128
, UNKNOWN
, (int) V2DI_FTYPE_V2DI_V2DI_COUNT
},
27321 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_lshrv1ti3
, "__builtin_ia32_psrldqi128", IX86_BUILTIN_PSRLDQI128
, UNKNOWN
, (int) V2DI_FTYPE_V2DI_INT_CONVERT
},
27322 { OPTION_MASK_ISA_SSE2
, CODE_FOR_lshrv8hi3
, "__builtin_ia32_psrlwi128", IX86_BUILTIN_PSRLWI128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_SI_COUNT
},
27323 { OPTION_MASK_ISA_SSE2
, CODE_FOR_lshrv4si3
, "__builtin_ia32_psrldi128", IX86_BUILTIN_PSRLDI128
, UNKNOWN
, (int) V4SI_FTYPE_V4SI_SI_COUNT
},
27324 { OPTION_MASK_ISA_SSE2
, CODE_FOR_lshrv2di3
, "__builtin_ia32_psrlqi128", IX86_BUILTIN_PSRLQI128
, UNKNOWN
, (int) V2DI_FTYPE_V2DI_SI_COUNT
},
27325 { OPTION_MASK_ISA_SSE2
, CODE_FOR_lshrv8hi3
, "__builtin_ia32_psrlw128", IX86_BUILTIN_PSRLW128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_V8HI_COUNT
},
27326 { OPTION_MASK_ISA_SSE2
, CODE_FOR_lshrv4si3
, "__builtin_ia32_psrld128", IX86_BUILTIN_PSRLD128
, UNKNOWN
, (int) V4SI_FTYPE_V4SI_V4SI_COUNT
},
27327 { OPTION_MASK_ISA_SSE2
, CODE_FOR_lshrv2di3
, "__builtin_ia32_psrlq128", IX86_BUILTIN_PSRLQ128
, UNKNOWN
, (int) V2DI_FTYPE_V2DI_V2DI_COUNT
},
27329 { OPTION_MASK_ISA_SSE2
, CODE_FOR_ashrv8hi3
, "__builtin_ia32_psrawi128", IX86_BUILTIN_PSRAWI128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_SI_COUNT
},
27330 { OPTION_MASK_ISA_SSE2
, CODE_FOR_ashrv4si3
, "__builtin_ia32_psradi128", IX86_BUILTIN_PSRADI128
, UNKNOWN
, (int) V4SI_FTYPE_V4SI_SI_COUNT
},
27331 { OPTION_MASK_ISA_SSE2
, CODE_FOR_ashrv8hi3
, "__builtin_ia32_psraw128", IX86_BUILTIN_PSRAW128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_V8HI_COUNT
},
27332 { OPTION_MASK_ISA_SSE2
, CODE_FOR_ashrv4si3
, "__builtin_ia32_psrad128", IX86_BUILTIN_PSRAD128
, UNKNOWN
, (int) V4SI_FTYPE_V4SI_V4SI_COUNT
},
27334 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_pshufd
, "__builtin_ia32_pshufd", IX86_BUILTIN_PSHUFD
, UNKNOWN
, (int) V4SI_FTYPE_V4SI_INT
},
27335 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_pshuflw
, "__builtin_ia32_pshuflw", IX86_BUILTIN_PSHUFLW
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_INT
},
27336 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_pshufhw
, "__builtin_ia32_pshufhw", IX86_BUILTIN_PSHUFHW
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_INT
},
27338 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_vmsqrtv2df2
, "__builtin_ia32_sqrtsd", IX86_BUILTIN_SQRTSD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_VEC_MERGE
},
27340 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse2_movq128
, "__builtin_ia32_movq128", IX86_BUILTIN_MOVQ128
, UNKNOWN
, (int) V2DI_FTYPE_V2DI
},
27343 { OPTION_MASK_ISA_SSE2
, CODE_FOR_mmx_addv1di3
, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ
, UNKNOWN
, (int) V1DI_FTYPE_V1DI_V1DI
},
27344 { OPTION_MASK_ISA_SSE2
, CODE_FOR_mmx_subv1di3
, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ
, UNKNOWN
, (int) V1DI_FTYPE_V1DI_V1DI
},
27347 { OPTION_MASK_ISA_SSE3
, CODE_FOR_sse3_movshdup
, "__builtin_ia32_movshdup", IX86_BUILTIN_MOVSHDUP
, UNKNOWN
, (int) V4SF_FTYPE_V4SF
},
27348 { OPTION_MASK_ISA_SSE3
, CODE_FOR_sse3_movsldup
, "__builtin_ia32_movsldup", IX86_BUILTIN_MOVSLDUP
, UNKNOWN
, (int) V4SF_FTYPE_V4SF
},
27350 { OPTION_MASK_ISA_SSE3
, CODE_FOR_sse3_addsubv4sf3
, "__builtin_ia32_addsubps", IX86_BUILTIN_ADDSUBPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF
},
27351 { OPTION_MASK_ISA_SSE3
, CODE_FOR_sse3_addsubv2df3
, "__builtin_ia32_addsubpd", IX86_BUILTIN_ADDSUBPD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF
},
27352 { OPTION_MASK_ISA_SSE3
, CODE_FOR_sse3_haddv4sf3
, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF
},
27353 { OPTION_MASK_ISA_SSE3
, CODE_FOR_sse3_haddv2df3
, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF
},
27354 { OPTION_MASK_ISA_SSE3
, CODE_FOR_sse3_hsubv4sf3
, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF
},
27355 { OPTION_MASK_ISA_SSE3
, CODE_FOR_sse3_hsubv2df3
, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF
},
27358 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_absv16qi2
, "__builtin_ia32_pabsb128", IX86_BUILTIN_PABSB128
, UNKNOWN
, (int) V16QI_FTYPE_V16QI
},
27359 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_absv8qi2
, "__builtin_ia32_pabsb", IX86_BUILTIN_PABSB
, UNKNOWN
, (int) V8QI_FTYPE_V8QI
},
27360 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_absv8hi2
, "__builtin_ia32_pabsw128", IX86_BUILTIN_PABSW128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI
},
27361 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_absv4hi2
, "__builtin_ia32_pabsw", IX86_BUILTIN_PABSW
, UNKNOWN
, (int) V4HI_FTYPE_V4HI
},
27362 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_absv4si2
, "__builtin_ia32_pabsd128", IX86_BUILTIN_PABSD128
, UNKNOWN
, (int) V4SI_FTYPE_V4SI
},
27363 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_absv2si2
, "__builtin_ia32_pabsd", IX86_BUILTIN_PABSD
, UNKNOWN
, (int) V2SI_FTYPE_V2SI
},
27365 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_ssse3_phaddwv8hi3
, "__builtin_ia32_phaddw128", IX86_BUILTIN_PHADDW128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_V8HI
},
27366 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_ssse3_phaddwv4hi3
, "__builtin_ia32_phaddw", IX86_BUILTIN_PHADDW
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_V4HI
},
27367 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_ssse3_phadddv4si3
, "__builtin_ia32_phaddd128", IX86_BUILTIN_PHADDD128
, UNKNOWN
, (int) V4SI_FTYPE_V4SI_V4SI
},
27368 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_ssse3_phadddv2si3
, "__builtin_ia32_phaddd", IX86_BUILTIN_PHADDD
, UNKNOWN
, (int) V2SI_FTYPE_V2SI_V2SI
},
27369 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_ssse3_phaddswv8hi3
, "__builtin_ia32_phaddsw128", IX86_BUILTIN_PHADDSW128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_V8HI
},
27370 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_ssse3_phaddswv4hi3
, "__builtin_ia32_phaddsw", IX86_BUILTIN_PHADDSW
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_V4HI
},
27371 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_ssse3_phsubwv8hi3
, "__builtin_ia32_phsubw128", IX86_BUILTIN_PHSUBW128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_V8HI
},
27372 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_ssse3_phsubwv4hi3
, "__builtin_ia32_phsubw", IX86_BUILTIN_PHSUBW
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_V4HI
},
27373 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_ssse3_phsubdv4si3
, "__builtin_ia32_phsubd128", IX86_BUILTIN_PHSUBD128
, UNKNOWN
, (int) V4SI_FTYPE_V4SI_V4SI
},
27374 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_ssse3_phsubdv2si3
, "__builtin_ia32_phsubd", IX86_BUILTIN_PHSUBD
, UNKNOWN
, (int) V2SI_FTYPE_V2SI_V2SI
},
27375 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_ssse3_phsubswv8hi3
, "__builtin_ia32_phsubsw128", IX86_BUILTIN_PHSUBSW128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_V8HI
},
27376 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_ssse3_phsubswv4hi3
, "__builtin_ia32_phsubsw", IX86_BUILTIN_PHSUBSW
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_V4HI
},
27377 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_ssse3_pmaddubsw128
, "__builtin_ia32_pmaddubsw128", IX86_BUILTIN_PMADDUBSW128
, UNKNOWN
, (int) V8HI_FTYPE_V16QI_V16QI
},
27378 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_ssse3_pmaddubsw
, "__builtin_ia32_pmaddubsw", IX86_BUILTIN_PMADDUBSW
, UNKNOWN
, (int) V4HI_FTYPE_V8QI_V8QI
},
27379 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_ssse3_pmulhrswv8hi3
, "__builtin_ia32_pmulhrsw128", IX86_BUILTIN_PMULHRSW128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_V8HI
},
27380 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_ssse3_pmulhrswv4hi3
, "__builtin_ia32_pmulhrsw", IX86_BUILTIN_PMULHRSW
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_V4HI
},
27381 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_ssse3_pshufbv16qi3
, "__builtin_ia32_pshufb128", IX86_BUILTIN_PSHUFB128
, UNKNOWN
, (int) V16QI_FTYPE_V16QI_V16QI
},
27382 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_ssse3_pshufbv8qi3
, "__builtin_ia32_pshufb", IX86_BUILTIN_PSHUFB
, UNKNOWN
, (int) V8QI_FTYPE_V8QI_V8QI
},
27383 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_ssse3_psignv16qi3
, "__builtin_ia32_psignb128", IX86_BUILTIN_PSIGNB128
, UNKNOWN
, (int) V16QI_FTYPE_V16QI_V16QI
},
27384 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_ssse3_psignv8qi3
, "__builtin_ia32_psignb", IX86_BUILTIN_PSIGNB
, UNKNOWN
, (int) V8QI_FTYPE_V8QI_V8QI
},
27385 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_ssse3_psignv8hi3
, "__builtin_ia32_psignw128", IX86_BUILTIN_PSIGNW128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_V8HI
},
27386 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_ssse3_psignv4hi3
, "__builtin_ia32_psignw", IX86_BUILTIN_PSIGNW
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_V4HI
},
27387 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_ssse3_psignv4si3
, "__builtin_ia32_psignd128", IX86_BUILTIN_PSIGND128
, UNKNOWN
, (int) V4SI_FTYPE_V4SI_V4SI
},
27388 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_ssse3_psignv2si3
, "__builtin_ia32_psignd", IX86_BUILTIN_PSIGND
, UNKNOWN
, (int) V2SI_FTYPE_V2SI_V2SI
},
27391 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_ssse3_palignrti
, "__builtin_ia32_palignr128", IX86_BUILTIN_PALIGNR128
, UNKNOWN
, (int) V2DI_FTYPE_V2DI_V2DI_INT_CONVERT
},
27392 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_ssse3_palignrdi
, "__builtin_ia32_palignr", IX86_BUILTIN_PALIGNR
, UNKNOWN
, (int) V1DI_FTYPE_V1DI_V1DI_INT_CONVERT
},
27395 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_sse4_1_blendpd
, "__builtin_ia32_blendpd", IX86_BUILTIN_BLENDPD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF_INT
},
27396 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_sse4_1_blendps
, "__builtin_ia32_blendps", IX86_BUILTIN_BLENDPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF_INT
},
27397 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_sse4_1_blendvpd
, "__builtin_ia32_blendvpd", IX86_BUILTIN_BLENDVPD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF_V2DF
},
27398 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_sse4_1_blendvps
, "__builtin_ia32_blendvps", IX86_BUILTIN_BLENDVPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF_V4SF
},
27399 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_sse4_1_dppd
, "__builtin_ia32_dppd", IX86_BUILTIN_DPPD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF_INT
},
27400 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_sse4_1_dpps
, "__builtin_ia32_dpps", IX86_BUILTIN_DPPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF_INT
},
27401 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_sse4_1_insertps
, "__builtin_ia32_insertps128", IX86_BUILTIN_INSERTPS128
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF_INT
},
27402 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_sse4_1_mpsadbw
, "__builtin_ia32_mpsadbw128", IX86_BUILTIN_MPSADBW128
, UNKNOWN
, (int) V16QI_FTYPE_V16QI_V16QI_INT
},
27403 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_sse4_1_pblendvb
, "__builtin_ia32_pblendvb128", IX86_BUILTIN_PBLENDVB128
, UNKNOWN
, (int) V16QI_FTYPE_V16QI_V16QI_V16QI
},
27404 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_sse4_1_pblendw
, "__builtin_ia32_pblendw128", IX86_BUILTIN_PBLENDW128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_V8HI_INT
},
27406 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_sse4_1_sign_extendv8qiv8hi2
, "__builtin_ia32_pmovsxbw128", IX86_BUILTIN_PMOVSXBW128
, UNKNOWN
, (int) V8HI_FTYPE_V16QI
},
27407 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_sse4_1_sign_extendv4qiv4si2
, "__builtin_ia32_pmovsxbd128", IX86_BUILTIN_PMOVSXBD128
, UNKNOWN
, (int) V4SI_FTYPE_V16QI
},
27408 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_sse4_1_sign_extendv2qiv2di2
, "__builtin_ia32_pmovsxbq128", IX86_BUILTIN_PMOVSXBQ128
, UNKNOWN
, (int) V2DI_FTYPE_V16QI
},
27409 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_sse4_1_sign_extendv4hiv4si2
, "__builtin_ia32_pmovsxwd128", IX86_BUILTIN_PMOVSXWD128
, UNKNOWN
, (int) V4SI_FTYPE_V8HI
},
27410 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_sse4_1_sign_extendv2hiv2di2
, "__builtin_ia32_pmovsxwq128", IX86_BUILTIN_PMOVSXWQ128
, UNKNOWN
, (int) V2DI_FTYPE_V8HI
},
27411 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_sse4_1_sign_extendv2siv2di2
, "__builtin_ia32_pmovsxdq128", IX86_BUILTIN_PMOVSXDQ128
, UNKNOWN
, (int) V2DI_FTYPE_V4SI
},
27412 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_sse4_1_zero_extendv8qiv8hi2
, "__builtin_ia32_pmovzxbw128", IX86_BUILTIN_PMOVZXBW128
, UNKNOWN
, (int) V8HI_FTYPE_V16QI
},
27413 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_sse4_1_zero_extendv4qiv4si2
, "__builtin_ia32_pmovzxbd128", IX86_BUILTIN_PMOVZXBD128
, UNKNOWN
, (int) V4SI_FTYPE_V16QI
},
27414 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_sse4_1_zero_extendv2qiv2di2
, "__builtin_ia32_pmovzxbq128", IX86_BUILTIN_PMOVZXBQ128
, UNKNOWN
, (int) V2DI_FTYPE_V16QI
},
27415 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_sse4_1_zero_extendv4hiv4si2
, "__builtin_ia32_pmovzxwd128", IX86_BUILTIN_PMOVZXWD128
, UNKNOWN
, (int) V4SI_FTYPE_V8HI
},
27416 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_sse4_1_zero_extendv2hiv2di2
, "__builtin_ia32_pmovzxwq128", IX86_BUILTIN_PMOVZXWQ128
, UNKNOWN
, (int) V2DI_FTYPE_V8HI
},
27417 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_sse4_1_zero_extendv2siv2di2
, "__builtin_ia32_pmovzxdq128", IX86_BUILTIN_PMOVZXDQ128
, UNKNOWN
, (int) V2DI_FTYPE_V4SI
},
27418 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_sse4_1_phminposuw
, "__builtin_ia32_phminposuw128", IX86_BUILTIN_PHMINPOSUW128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI
},
27420 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_sse4_1_packusdw
, "__builtin_ia32_packusdw128", IX86_BUILTIN_PACKUSDW128
, UNKNOWN
, (int) V8HI_FTYPE_V4SI_V4SI
},
27421 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_sse4_1_eqv2di3
, "__builtin_ia32_pcmpeqq", IX86_BUILTIN_PCMPEQQ
, UNKNOWN
, (int) V2DI_FTYPE_V2DI_V2DI
},
27422 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_smaxv16qi3
, "__builtin_ia32_pmaxsb128", IX86_BUILTIN_PMAXSB128
, UNKNOWN
, (int) V16QI_FTYPE_V16QI_V16QI
},
27423 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_smaxv4si3
, "__builtin_ia32_pmaxsd128", IX86_BUILTIN_PMAXSD128
, UNKNOWN
, (int) V4SI_FTYPE_V4SI_V4SI
},
27424 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_umaxv4si3
, "__builtin_ia32_pmaxud128", IX86_BUILTIN_PMAXUD128
, UNKNOWN
, (int) V4SI_FTYPE_V4SI_V4SI
},
27425 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_umaxv8hi3
, "__builtin_ia32_pmaxuw128", IX86_BUILTIN_PMAXUW128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_V8HI
},
27426 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_sminv16qi3
, "__builtin_ia32_pminsb128", IX86_BUILTIN_PMINSB128
, UNKNOWN
, (int) V16QI_FTYPE_V16QI_V16QI
},
27427 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_sminv4si3
, "__builtin_ia32_pminsd128", IX86_BUILTIN_PMINSD128
, UNKNOWN
, (int) V4SI_FTYPE_V4SI_V4SI
},
27428 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_uminv4si3
, "__builtin_ia32_pminud128", IX86_BUILTIN_PMINUD128
, UNKNOWN
, (int) V4SI_FTYPE_V4SI_V4SI
},
27429 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_uminv8hi3
, "__builtin_ia32_pminuw128", IX86_BUILTIN_PMINUW128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_V8HI
},
27430 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_sse4_1_mulv2siv2di3
, "__builtin_ia32_pmuldq128", IX86_BUILTIN_PMULDQ128
, UNKNOWN
, (int) V2DI_FTYPE_V4SI_V4SI
},
27431 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_mulv4si3
, "__builtin_ia32_pmulld128", IX86_BUILTIN_PMULLD128
, UNKNOWN
, (int) V4SI_FTYPE_V4SI_V4SI
},
27434 { OPTION_MASK_ISA_ROUND
, CODE_FOR_sse4_1_roundpd
, "__builtin_ia32_roundpd", IX86_BUILTIN_ROUNDPD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_INT
},
27435 { OPTION_MASK_ISA_ROUND
, CODE_FOR_sse4_1_roundps
, "__builtin_ia32_roundps", IX86_BUILTIN_ROUNDPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_INT
},
27436 { OPTION_MASK_ISA_ROUND
, CODE_FOR_sse4_1_roundsd
, "__builtin_ia32_roundsd", IX86_BUILTIN_ROUNDSD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF_INT
},
27437 { OPTION_MASK_ISA_ROUND
, CODE_FOR_sse4_1_roundss
, "__builtin_ia32_roundss", IX86_BUILTIN_ROUNDSS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF_INT
},
27439 { OPTION_MASK_ISA_ROUND
, CODE_FOR_sse4_1_roundpd
, "__builtin_ia32_floorpd", IX86_BUILTIN_FLOORPD
, (enum rtx_code
) ROUND_FLOOR
, (int) V2DF_FTYPE_V2DF_ROUND
},
27440 { OPTION_MASK_ISA_ROUND
, CODE_FOR_sse4_1_roundpd
, "__builtin_ia32_ceilpd", IX86_BUILTIN_CEILPD
, (enum rtx_code
) ROUND_CEIL
, (int) V2DF_FTYPE_V2DF_ROUND
},
27441 { OPTION_MASK_ISA_ROUND
, CODE_FOR_sse4_1_roundpd
, "__builtin_ia32_truncpd", IX86_BUILTIN_TRUNCPD
, (enum rtx_code
) ROUND_TRUNC
, (int) V2DF_FTYPE_V2DF_ROUND
},
27442 { OPTION_MASK_ISA_ROUND
, CODE_FOR_sse4_1_roundpd
, "__builtin_ia32_rintpd", IX86_BUILTIN_RINTPD
, (enum rtx_code
) ROUND_MXCSR
, (int) V2DF_FTYPE_V2DF_ROUND
},
27444 { OPTION_MASK_ISA_ROUND
, CODE_FOR_sse4_1_roundpd_vec_pack_sfix
, "__builtin_ia32_floorpd_vec_pack_sfix", IX86_BUILTIN_FLOORPD_VEC_PACK_SFIX
, (enum rtx_code
) ROUND_FLOOR
, (int) V4SI_FTYPE_V2DF_V2DF_ROUND
},
27445 { OPTION_MASK_ISA_ROUND
, CODE_FOR_sse4_1_roundpd_vec_pack_sfix
, "__builtin_ia32_ceilpd_vec_pack_sfix", IX86_BUILTIN_CEILPD_VEC_PACK_SFIX
, (enum rtx_code
) ROUND_CEIL
, (int) V4SI_FTYPE_V2DF_V2DF_ROUND
},
27447 { OPTION_MASK_ISA_ROUND
, CODE_FOR_roundv2df2
, "__builtin_ia32_roundpd_az", IX86_BUILTIN_ROUNDPD_AZ
, UNKNOWN
, (int) V2DF_FTYPE_V2DF
},
27448 { OPTION_MASK_ISA_ROUND
, CODE_FOR_roundv2df2_vec_pack_sfix
, "__builtin_ia32_roundpd_az_vec_pack_sfix", IX86_BUILTIN_ROUNDPD_AZ_VEC_PACK_SFIX
, UNKNOWN
, (int) V4SI_FTYPE_V2DF_V2DF
},
27450 { OPTION_MASK_ISA_ROUND
, CODE_FOR_sse4_1_roundps
, "__builtin_ia32_floorps", IX86_BUILTIN_FLOORPS
, (enum rtx_code
) ROUND_FLOOR
, (int) V4SF_FTYPE_V4SF_ROUND
},
27451 { OPTION_MASK_ISA_ROUND
, CODE_FOR_sse4_1_roundps
, "__builtin_ia32_ceilps", IX86_BUILTIN_CEILPS
, (enum rtx_code
) ROUND_CEIL
, (int) V4SF_FTYPE_V4SF_ROUND
},
27452 { OPTION_MASK_ISA_ROUND
, CODE_FOR_sse4_1_roundps
, "__builtin_ia32_truncps", IX86_BUILTIN_TRUNCPS
, (enum rtx_code
) ROUND_TRUNC
, (int) V4SF_FTYPE_V4SF_ROUND
},
27453 { OPTION_MASK_ISA_ROUND
, CODE_FOR_sse4_1_roundps
, "__builtin_ia32_rintps", IX86_BUILTIN_RINTPS
, (enum rtx_code
) ROUND_MXCSR
, (int) V4SF_FTYPE_V4SF_ROUND
},
27455 { OPTION_MASK_ISA_ROUND
, CODE_FOR_sse4_1_roundps_sfix
, "__builtin_ia32_floorps_sfix", IX86_BUILTIN_FLOORPS_SFIX
, (enum rtx_code
) ROUND_FLOOR
, (int) V4SI_FTYPE_V4SF_ROUND
},
27456 { OPTION_MASK_ISA_ROUND
, CODE_FOR_sse4_1_roundps_sfix
, "__builtin_ia32_ceilps_sfix", IX86_BUILTIN_CEILPS_SFIX
, (enum rtx_code
) ROUND_CEIL
, (int) V4SI_FTYPE_V4SF_ROUND
},
27458 { OPTION_MASK_ISA_ROUND
, CODE_FOR_roundv4sf2
, "__builtin_ia32_roundps_az", IX86_BUILTIN_ROUNDPS_AZ
, UNKNOWN
, (int) V4SF_FTYPE_V4SF
},
27459 { OPTION_MASK_ISA_ROUND
, CODE_FOR_roundv4sf2_sfix
, "__builtin_ia32_roundps_az_sfix", IX86_BUILTIN_ROUNDPS_AZ_SFIX
, UNKNOWN
, (int) V4SI_FTYPE_V4SF
},
27461 { OPTION_MASK_ISA_ROUND
, CODE_FOR_sse4_1_ptest
, "__builtin_ia32_ptestz128", IX86_BUILTIN_PTESTZ
, EQ
, (int) INT_FTYPE_V2DI_V2DI_PTEST
},
27462 { OPTION_MASK_ISA_ROUND
, CODE_FOR_sse4_1_ptest
, "__builtin_ia32_ptestc128", IX86_BUILTIN_PTESTC
, LTU
, (int) INT_FTYPE_V2DI_V2DI_PTEST
},
27463 { OPTION_MASK_ISA_ROUND
, CODE_FOR_sse4_1_ptest
, "__builtin_ia32_ptestnzc128", IX86_BUILTIN_PTESTNZC
, GTU
, (int) INT_FTYPE_V2DI_V2DI_PTEST
},
27466 { OPTION_MASK_ISA_SSE4_2
, CODE_FOR_sse4_2_gtv2di3
, "__builtin_ia32_pcmpgtq", IX86_BUILTIN_PCMPGTQ
, UNKNOWN
, (int) V2DI_FTYPE_V2DI_V2DI
},
27467 { OPTION_MASK_ISA_SSE4_2
| OPTION_MASK_ISA_CRC32
, CODE_FOR_sse4_2_crc32qi
, "__builtin_ia32_crc32qi", IX86_BUILTIN_CRC32QI
, UNKNOWN
, (int) UINT_FTYPE_UINT_UCHAR
},
27468 { OPTION_MASK_ISA_SSE4_2
| OPTION_MASK_ISA_CRC32
, CODE_FOR_sse4_2_crc32hi
, "__builtin_ia32_crc32hi", IX86_BUILTIN_CRC32HI
, UNKNOWN
, (int) UINT_FTYPE_UINT_USHORT
},
27469 { OPTION_MASK_ISA_SSE4_2
| OPTION_MASK_ISA_CRC32
, CODE_FOR_sse4_2_crc32si
, "__builtin_ia32_crc32si", IX86_BUILTIN_CRC32SI
, UNKNOWN
, (int) UINT_FTYPE_UINT_UINT
},
27470 { OPTION_MASK_ISA_SSE4_2
| OPTION_MASK_ISA_CRC32
| OPTION_MASK_ISA_64BIT
, CODE_FOR_sse4_2_crc32di
, "__builtin_ia32_crc32di", IX86_BUILTIN_CRC32DI
, UNKNOWN
, (int) UINT64_FTYPE_UINT64_UINT64
},
27473 { OPTION_MASK_ISA_SSE4A
, CODE_FOR_sse4a_extrqi
, "__builtin_ia32_extrqi", IX86_BUILTIN_EXTRQI
, UNKNOWN
, (int) V2DI_FTYPE_V2DI_UINT_UINT
},
27474 { OPTION_MASK_ISA_SSE4A
, CODE_FOR_sse4a_extrq
, "__builtin_ia32_extrq", IX86_BUILTIN_EXTRQ
, UNKNOWN
, (int) V2DI_FTYPE_V2DI_V16QI
},
27475 { OPTION_MASK_ISA_SSE4A
, CODE_FOR_sse4a_insertqi
, "__builtin_ia32_insertqi", IX86_BUILTIN_INSERTQI
, UNKNOWN
, (int) V2DI_FTYPE_V2DI_V2DI_UINT_UINT
},
27476 { OPTION_MASK_ISA_SSE4A
, CODE_FOR_sse4a_insertq
, "__builtin_ia32_insertq", IX86_BUILTIN_INSERTQ
, UNKNOWN
, (int) V2DI_FTYPE_V2DI_V2DI
},
27479 { OPTION_MASK_ISA_SSE2
, CODE_FOR_aeskeygenassist
, 0, IX86_BUILTIN_AESKEYGENASSIST128
, UNKNOWN
, (int) V2DI_FTYPE_V2DI_INT
},
27480 { OPTION_MASK_ISA_SSE2
, CODE_FOR_aesimc
, 0, IX86_BUILTIN_AESIMC128
, UNKNOWN
, (int) V2DI_FTYPE_V2DI
},
27482 { OPTION_MASK_ISA_SSE2
, CODE_FOR_aesenc
, 0, IX86_BUILTIN_AESENC128
, UNKNOWN
, (int) V2DI_FTYPE_V2DI_V2DI
},
27483 { OPTION_MASK_ISA_SSE2
, CODE_FOR_aesenclast
, 0, IX86_BUILTIN_AESENCLAST128
, UNKNOWN
, (int) V2DI_FTYPE_V2DI_V2DI
},
27484 { OPTION_MASK_ISA_SSE2
, CODE_FOR_aesdec
, 0, IX86_BUILTIN_AESDEC128
, UNKNOWN
, (int) V2DI_FTYPE_V2DI_V2DI
},
27485 { OPTION_MASK_ISA_SSE2
, CODE_FOR_aesdeclast
, 0, IX86_BUILTIN_AESDECLAST128
, UNKNOWN
, (int) V2DI_FTYPE_V2DI_V2DI
},
27488 { OPTION_MASK_ISA_SSE2
, CODE_FOR_pclmulqdq
, 0, IX86_BUILTIN_PCLMULQDQ128
, UNKNOWN
, (int) V2DI_FTYPE_V2DI_V2DI_INT
},
27491 { OPTION_MASK_ISA_AVX
, CODE_FOR_addv4df3
, "__builtin_ia32_addpd256", IX86_BUILTIN_ADDPD256
, UNKNOWN
, (int) V4DF_FTYPE_V4DF_V4DF
},
27492 { OPTION_MASK_ISA_AVX
, CODE_FOR_addv8sf3
, "__builtin_ia32_addps256", IX86_BUILTIN_ADDPS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF_V8SF
},
27493 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_addsubv4df3
, "__builtin_ia32_addsubpd256", IX86_BUILTIN_ADDSUBPD256
, UNKNOWN
, (int) V4DF_FTYPE_V4DF_V4DF
},
27494 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_addsubv8sf3
, "__builtin_ia32_addsubps256", IX86_BUILTIN_ADDSUBPS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF_V8SF
},
27495 { OPTION_MASK_ISA_AVX
, CODE_FOR_andv4df3
, "__builtin_ia32_andpd256", IX86_BUILTIN_ANDPD256
, UNKNOWN
, (int) V4DF_FTYPE_V4DF_V4DF
},
27496 { OPTION_MASK_ISA_AVX
, CODE_FOR_andv8sf3
, "__builtin_ia32_andps256", IX86_BUILTIN_ANDPS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF_V8SF
},
27497 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_andnotv4df3
, "__builtin_ia32_andnpd256", IX86_BUILTIN_ANDNPD256
, UNKNOWN
, (int) V4DF_FTYPE_V4DF_V4DF
},
27498 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_andnotv8sf3
, "__builtin_ia32_andnps256", IX86_BUILTIN_ANDNPS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF_V8SF
},
27499 { OPTION_MASK_ISA_AVX
, CODE_FOR_divv4df3
, "__builtin_ia32_divpd256", IX86_BUILTIN_DIVPD256
, UNKNOWN
, (int) V4DF_FTYPE_V4DF_V4DF
},
27500 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_divv8sf3
, "__builtin_ia32_divps256", IX86_BUILTIN_DIVPS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF_V8SF
},
27501 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_haddv4df3
, "__builtin_ia32_haddpd256", IX86_BUILTIN_HADDPD256
, UNKNOWN
, (int) V4DF_FTYPE_V4DF_V4DF
},
27502 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_hsubv8sf3
, "__builtin_ia32_hsubps256", IX86_BUILTIN_HSUBPS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF_V8SF
},
27503 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_hsubv4df3
, "__builtin_ia32_hsubpd256", IX86_BUILTIN_HSUBPD256
, UNKNOWN
, (int) V4DF_FTYPE_V4DF_V4DF
},
27504 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_haddv8sf3
, "__builtin_ia32_haddps256", IX86_BUILTIN_HADDPS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF_V8SF
},
27505 { OPTION_MASK_ISA_AVX
, CODE_FOR_smaxv4df3
, "__builtin_ia32_maxpd256", IX86_BUILTIN_MAXPD256
, UNKNOWN
, (int) V4DF_FTYPE_V4DF_V4DF
},
27506 { OPTION_MASK_ISA_AVX
, CODE_FOR_smaxv8sf3
, "__builtin_ia32_maxps256", IX86_BUILTIN_MAXPS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF_V8SF
},
27507 { OPTION_MASK_ISA_AVX
, CODE_FOR_sminv4df3
, "__builtin_ia32_minpd256", IX86_BUILTIN_MINPD256
, UNKNOWN
, (int) V4DF_FTYPE_V4DF_V4DF
},
27508 { OPTION_MASK_ISA_AVX
, CODE_FOR_sminv8sf3
, "__builtin_ia32_minps256", IX86_BUILTIN_MINPS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF_V8SF
},
27509 { OPTION_MASK_ISA_AVX
, CODE_FOR_mulv4df3
, "__builtin_ia32_mulpd256", IX86_BUILTIN_MULPD256
, UNKNOWN
, (int) V4DF_FTYPE_V4DF_V4DF
},
27510 { OPTION_MASK_ISA_AVX
, CODE_FOR_mulv8sf3
, "__builtin_ia32_mulps256", IX86_BUILTIN_MULPS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF_V8SF
},
27511 { OPTION_MASK_ISA_AVX
, CODE_FOR_iorv4df3
, "__builtin_ia32_orpd256", IX86_BUILTIN_ORPD256
, UNKNOWN
, (int) V4DF_FTYPE_V4DF_V4DF
},
27512 { OPTION_MASK_ISA_AVX
, CODE_FOR_iorv8sf3
, "__builtin_ia32_orps256", IX86_BUILTIN_ORPS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF_V8SF
},
27513 { OPTION_MASK_ISA_AVX
, CODE_FOR_subv4df3
, "__builtin_ia32_subpd256", IX86_BUILTIN_SUBPD256
, UNKNOWN
, (int) V4DF_FTYPE_V4DF_V4DF
},
27514 { OPTION_MASK_ISA_AVX
, CODE_FOR_subv8sf3
, "__builtin_ia32_subps256", IX86_BUILTIN_SUBPS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF_V8SF
},
27515 { OPTION_MASK_ISA_AVX
, CODE_FOR_xorv4df3
, "__builtin_ia32_xorpd256", IX86_BUILTIN_XORPD256
, UNKNOWN
, (int) V4DF_FTYPE_V4DF_V4DF
},
27516 { OPTION_MASK_ISA_AVX
, CODE_FOR_xorv8sf3
, "__builtin_ia32_xorps256", IX86_BUILTIN_XORPS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF_V8SF
},
27518 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vpermilvarv2df3
, "__builtin_ia32_vpermilvarpd", IX86_BUILTIN_VPERMILVARPD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DI
},
27519 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vpermilvarv4sf3
, "__builtin_ia32_vpermilvarps", IX86_BUILTIN_VPERMILVARPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SI
},
27520 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vpermilvarv4df3
, "__builtin_ia32_vpermilvarpd256", IX86_BUILTIN_VPERMILVARPD256
, UNKNOWN
, (int) V4DF_FTYPE_V4DF_V4DI
},
27521 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vpermilvarv8sf3
, "__builtin_ia32_vpermilvarps256", IX86_BUILTIN_VPERMILVARPS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF_V8SI
},
27523 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_blendpd256
, "__builtin_ia32_blendpd256", IX86_BUILTIN_BLENDPD256
, UNKNOWN
, (int) V4DF_FTYPE_V4DF_V4DF_INT
},
27524 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_blendps256
, "__builtin_ia32_blendps256", IX86_BUILTIN_BLENDPS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF_V8SF_INT
},
27525 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_blendvpd256
, "__builtin_ia32_blendvpd256", IX86_BUILTIN_BLENDVPD256
, UNKNOWN
, (int) V4DF_FTYPE_V4DF_V4DF_V4DF
},
27526 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_blendvps256
, "__builtin_ia32_blendvps256", IX86_BUILTIN_BLENDVPS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF_V8SF_V8SF
},
27527 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_dpps256
, "__builtin_ia32_dpps256", IX86_BUILTIN_DPPS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF_V8SF_INT
},
27528 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_shufpd256
, "__builtin_ia32_shufpd256", IX86_BUILTIN_SHUFPD256
, UNKNOWN
, (int) V4DF_FTYPE_V4DF_V4DF_INT
},
27529 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_shufps256
, "__builtin_ia32_shufps256", IX86_BUILTIN_SHUFPS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF_V8SF_INT
},
27530 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vmcmpv2df3
, "__builtin_ia32_cmpsd", IX86_BUILTIN_CMPSD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF_INT
},
27531 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vmcmpv4sf3
, "__builtin_ia32_cmpss", IX86_BUILTIN_CMPSS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF_INT
},
27532 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_cmpv2df3
, "__builtin_ia32_cmppd", IX86_BUILTIN_CMPPD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF_INT
},
27533 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_cmpv4sf3
, "__builtin_ia32_cmpps", IX86_BUILTIN_CMPPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF_INT
},
27534 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_cmpv4df3
, "__builtin_ia32_cmppd256", IX86_BUILTIN_CMPPD256
, UNKNOWN
, (int) V4DF_FTYPE_V4DF_V4DF_INT
},
27535 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_cmpv8sf3
, "__builtin_ia32_cmpps256", IX86_BUILTIN_CMPPS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF_V8SF_INT
},
27536 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vextractf128v4df
, "__builtin_ia32_vextractf128_pd256", IX86_BUILTIN_EXTRACTF128PD256
, UNKNOWN
, (int) V2DF_FTYPE_V4DF_INT
},
27537 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vextractf128v8sf
, "__builtin_ia32_vextractf128_ps256", IX86_BUILTIN_EXTRACTF128PS256
, UNKNOWN
, (int) V4SF_FTYPE_V8SF_INT
},
27538 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vextractf128v8si
, "__builtin_ia32_vextractf128_si256", IX86_BUILTIN_EXTRACTF128SI256
, UNKNOWN
, (int) V4SI_FTYPE_V8SI_INT
},
27539 { OPTION_MASK_ISA_AVX
, CODE_FOR_floatv4siv4df2
, "__builtin_ia32_cvtdq2pd256", IX86_BUILTIN_CVTDQ2PD256
, UNKNOWN
, (int) V4DF_FTYPE_V4SI
},
27540 { OPTION_MASK_ISA_AVX
, CODE_FOR_floatv8siv8sf2
, "__builtin_ia32_cvtdq2ps256", IX86_BUILTIN_CVTDQ2PS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SI
},
27541 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_cvtpd2ps256
, "__builtin_ia32_cvtpd2ps256", IX86_BUILTIN_CVTPD2PS256
, UNKNOWN
, (int) V4SF_FTYPE_V4DF
},
27542 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_cvtps2dq256
, "__builtin_ia32_cvtps2dq256", IX86_BUILTIN_CVTPS2DQ256
, UNKNOWN
, (int) V8SI_FTYPE_V8SF
},
27543 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_cvtps2pd256
, "__builtin_ia32_cvtps2pd256", IX86_BUILTIN_CVTPS2PD256
, UNKNOWN
, (int) V4DF_FTYPE_V4SF
},
27544 { OPTION_MASK_ISA_AVX
, CODE_FOR_fix_truncv4dfv4si2
, "__builtin_ia32_cvttpd2dq256", IX86_BUILTIN_CVTTPD2DQ256
, UNKNOWN
, (int) V4SI_FTYPE_V4DF
},
27545 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_cvtpd2dq256
, "__builtin_ia32_cvtpd2dq256", IX86_BUILTIN_CVTPD2DQ256
, UNKNOWN
, (int) V4SI_FTYPE_V4DF
},
27546 { OPTION_MASK_ISA_AVX
, CODE_FOR_fix_truncv8sfv8si2
, "__builtin_ia32_cvttps2dq256", IX86_BUILTIN_CVTTPS2DQ256
, UNKNOWN
, (int) V8SI_FTYPE_V8SF
},
27547 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vperm2f128v4df3
, "__builtin_ia32_vperm2f128_pd256", IX86_BUILTIN_VPERM2F128PD256
, UNKNOWN
, (int) V4DF_FTYPE_V4DF_V4DF_INT
},
27548 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vperm2f128v8sf3
, "__builtin_ia32_vperm2f128_ps256", IX86_BUILTIN_VPERM2F128PS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF_V8SF_INT
},
27549 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vperm2f128v8si3
, "__builtin_ia32_vperm2f128_si256", IX86_BUILTIN_VPERM2F128SI256
, UNKNOWN
, (int) V8SI_FTYPE_V8SI_V8SI_INT
},
27550 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vpermilv2df
, "__builtin_ia32_vpermilpd", IX86_BUILTIN_VPERMILPD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_INT
},
27551 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vpermilv4sf
, "__builtin_ia32_vpermilps", IX86_BUILTIN_VPERMILPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_INT
},
27552 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vpermilv4df
, "__builtin_ia32_vpermilpd256", IX86_BUILTIN_VPERMILPD256
, UNKNOWN
, (int) V4DF_FTYPE_V4DF_INT
},
27553 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vpermilv8sf
, "__builtin_ia32_vpermilps256", IX86_BUILTIN_VPERMILPS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF_INT
},
27554 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vinsertf128v4df
, "__builtin_ia32_vinsertf128_pd256", IX86_BUILTIN_VINSERTF128PD256
, UNKNOWN
, (int) V4DF_FTYPE_V4DF_V2DF_INT
},
27555 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vinsertf128v8sf
, "__builtin_ia32_vinsertf128_ps256", IX86_BUILTIN_VINSERTF128PS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF_V4SF_INT
},
27556 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vinsertf128v8si
, "__builtin_ia32_vinsertf128_si256", IX86_BUILTIN_VINSERTF128SI256
, UNKNOWN
, (int) V8SI_FTYPE_V8SI_V4SI_INT
},
27558 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_movshdup256
, "__builtin_ia32_movshdup256", IX86_BUILTIN_MOVSHDUP256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF
},
27559 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_movsldup256
, "__builtin_ia32_movsldup256", IX86_BUILTIN_MOVSLDUP256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF
},
27560 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_movddup256
, "__builtin_ia32_movddup256", IX86_BUILTIN_MOVDDUP256
, UNKNOWN
, (int) V4DF_FTYPE_V4DF
},
27562 { OPTION_MASK_ISA_AVX
, CODE_FOR_sqrtv4df2
, "__builtin_ia32_sqrtpd256", IX86_BUILTIN_SQRTPD256
, UNKNOWN
, (int) V4DF_FTYPE_V4DF
},
27563 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_sqrtv8sf2
, "__builtin_ia32_sqrtps256", IX86_BUILTIN_SQRTPS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF
},
27564 { OPTION_MASK_ISA_AVX
, CODE_FOR_sqrtv8sf2
, "__builtin_ia32_sqrtps_nr256", IX86_BUILTIN_SQRTPS_NR256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF
},
27565 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_rsqrtv8sf2
, "__builtin_ia32_rsqrtps256", IX86_BUILTIN_RSQRTPS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF
},
27566 { OPTION_MASK_ISA_AVX
, CODE_FOR_rsqrtv8sf2
, "__builtin_ia32_rsqrtps_nr256", IX86_BUILTIN_RSQRTPS_NR256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF
},
27568 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_rcpv8sf2
, "__builtin_ia32_rcpps256", IX86_BUILTIN_RCPPS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF
},
27570 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_roundpd256
, "__builtin_ia32_roundpd256", IX86_BUILTIN_ROUNDPD256
, UNKNOWN
, (int) V4DF_FTYPE_V4DF_INT
},
27571 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_roundps256
, "__builtin_ia32_roundps256", IX86_BUILTIN_ROUNDPS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF_INT
},
27573 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_roundpd256
, "__builtin_ia32_floorpd256", IX86_BUILTIN_FLOORPD256
, (enum rtx_code
) ROUND_FLOOR
, (int) V4DF_FTYPE_V4DF_ROUND
},
27574 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_roundpd256
, "__builtin_ia32_ceilpd256", IX86_BUILTIN_CEILPD256
, (enum rtx_code
) ROUND_CEIL
, (int) V4DF_FTYPE_V4DF_ROUND
},
27575 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_roundpd256
, "__builtin_ia32_truncpd256", IX86_BUILTIN_TRUNCPD256
, (enum rtx_code
) ROUND_TRUNC
, (int) V4DF_FTYPE_V4DF_ROUND
},
27576 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_roundpd256
, "__builtin_ia32_rintpd256", IX86_BUILTIN_RINTPD256
, (enum rtx_code
) ROUND_MXCSR
, (int) V4DF_FTYPE_V4DF_ROUND
},
27578 { OPTION_MASK_ISA_AVX
, CODE_FOR_roundv4df2
, "__builtin_ia32_roundpd_az256", IX86_BUILTIN_ROUNDPD_AZ256
, UNKNOWN
, (int) V4DF_FTYPE_V4DF
},
27579 { OPTION_MASK_ISA_AVX
, CODE_FOR_roundv4df2_vec_pack_sfix
, "__builtin_ia32_roundpd_az_vec_pack_sfix256", IX86_BUILTIN_ROUNDPD_AZ_VEC_PACK_SFIX256
, UNKNOWN
, (int) V8SI_FTYPE_V4DF_V4DF
},
27581 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_roundpd_vec_pack_sfix256
, "__builtin_ia32_floorpd_vec_pack_sfix256", IX86_BUILTIN_FLOORPD_VEC_PACK_SFIX256
, (enum rtx_code
) ROUND_FLOOR
, (int) V8SI_FTYPE_V4DF_V4DF_ROUND
},
27582 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_roundpd_vec_pack_sfix256
, "__builtin_ia32_ceilpd_vec_pack_sfix256", IX86_BUILTIN_CEILPD_VEC_PACK_SFIX256
, (enum rtx_code
) ROUND_CEIL
, (int) V8SI_FTYPE_V4DF_V4DF_ROUND
},
27584 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_roundps256
, "__builtin_ia32_floorps256", IX86_BUILTIN_FLOORPS256
, (enum rtx_code
) ROUND_FLOOR
, (int) V8SF_FTYPE_V8SF_ROUND
},
27585 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_roundps256
, "__builtin_ia32_ceilps256", IX86_BUILTIN_CEILPS256
, (enum rtx_code
) ROUND_CEIL
, (int) V8SF_FTYPE_V8SF_ROUND
},
27586 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_roundps256
, "__builtin_ia32_truncps256", IX86_BUILTIN_TRUNCPS256
, (enum rtx_code
) ROUND_TRUNC
, (int) V8SF_FTYPE_V8SF_ROUND
},
27587 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_roundps256
, "__builtin_ia32_rintps256", IX86_BUILTIN_RINTPS256
, (enum rtx_code
) ROUND_MXCSR
, (int) V8SF_FTYPE_V8SF_ROUND
},
27589 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_roundps_sfix256
, "__builtin_ia32_floorps_sfix256", IX86_BUILTIN_FLOORPS_SFIX256
, (enum rtx_code
) ROUND_FLOOR
, (int) V8SI_FTYPE_V8SF_ROUND
},
27590 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_roundps_sfix256
, "__builtin_ia32_ceilps_sfix256", IX86_BUILTIN_CEILPS_SFIX256
, (enum rtx_code
) ROUND_CEIL
, (int) V8SI_FTYPE_V8SF_ROUND
},
27592 { OPTION_MASK_ISA_AVX
, CODE_FOR_roundv8sf2
, "__builtin_ia32_roundps_az256", IX86_BUILTIN_ROUNDPS_AZ256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF
},
27593 { OPTION_MASK_ISA_AVX
, CODE_FOR_roundv8sf2_sfix
, "__builtin_ia32_roundps_az_sfix256", IX86_BUILTIN_ROUNDPS_AZ_SFIX256
, UNKNOWN
, (int) V8SI_FTYPE_V8SF
},
27595 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_unpckhpd256
, "__builtin_ia32_unpckhpd256", IX86_BUILTIN_UNPCKHPD256
, UNKNOWN
, (int) V4DF_FTYPE_V4DF_V4DF
},
27596 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_unpcklpd256
, "__builtin_ia32_unpcklpd256", IX86_BUILTIN_UNPCKLPD256
, UNKNOWN
, (int) V4DF_FTYPE_V4DF_V4DF
},
27597 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_unpckhps256
, "__builtin_ia32_unpckhps256", IX86_BUILTIN_UNPCKHPS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF_V8SF
},
27598 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_unpcklps256
, "__builtin_ia32_unpcklps256", IX86_BUILTIN_UNPCKLPS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF_V8SF
},
27600 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_si256_si
, "__builtin_ia32_si256_si", IX86_BUILTIN_SI256_SI
, UNKNOWN
, (int) V8SI_FTYPE_V4SI
},
27601 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_ps256_ps
, "__builtin_ia32_ps256_ps", IX86_BUILTIN_PS256_PS
, UNKNOWN
, (int) V8SF_FTYPE_V4SF
},
27602 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_pd256_pd
, "__builtin_ia32_pd256_pd", IX86_BUILTIN_PD256_PD
, UNKNOWN
, (int) V4DF_FTYPE_V2DF
},
27603 { OPTION_MASK_ISA_AVX
, CODE_FOR_vec_extract_lo_v8si
, "__builtin_ia32_si_si256", IX86_BUILTIN_SI_SI256
, UNKNOWN
, (int) V4SI_FTYPE_V8SI
},
27604 { OPTION_MASK_ISA_AVX
, CODE_FOR_vec_extract_lo_v8sf
, "__builtin_ia32_ps_ps256", IX86_BUILTIN_PS_PS256
, UNKNOWN
, (int) V4SF_FTYPE_V8SF
},
27605 { OPTION_MASK_ISA_AVX
, CODE_FOR_vec_extract_lo_v4df
, "__builtin_ia32_pd_pd256", IX86_BUILTIN_PD_PD256
, UNKNOWN
, (int) V2DF_FTYPE_V4DF
},
27607 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vtestpd
, "__builtin_ia32_vtestzpd", IX86_BUILTIN_VTESTZPD
, EQ
, (int) INT_FTYPE_V2DF_V2DF_PTEST
},
27608 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vtestpd
, "__builtin_ia32_vtestcpd", IX86_BUILTIN_VTESTCPD
, LTU
, (int) INT_FTYPE_V2DF_V2DF_PTEST
},
27609 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vtestpd
, "__builtin_ia32_vtestnzcpd", IX86_BUILTIN_VTESTNZCPD
, GTU
, (int) INT_FTYPE_V2DF_V2DF_PTEST
},
27610 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vtestps
, "__builtin_ia32_vtestzps", IX86_BUILTIN_VTESTZPS
, EQ
, (int) INT_FTYPE_V4SF_V4SF_PTEST
},
27611 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vtestps
, "__builtin_ia32_vtestcps", IX86_BUILTIN_VTESTCPS
, LTU
, (int) INT_FTYPE_V4SF_V4SF_PTEST
},
27612 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vtestps
, "__builtin_ia32_vtestnzcps", IX86_BUILTIN_VTESTNZCPS
, GTU
, (int) INT_FTYPE_V4SF_V4SF_PTEST
},
27613 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vtestpd256
, "__builtin_ia32_vtestzpd256", IX86_BUILTIN_VTESTZPD256
, EQ
, (int) INT_FTYPE_V4DF_V4DF_PTEST
},
27614 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vtestpd256
, "__builtin_ia32_vtestcpd256", IX86_BUILTIN_VTESTCPD256
, LTU
, (int) INT_FTYPE_V4DF_V4DF_PTEST
},
27615 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vtestpd256
, "__builtin_ia32_vtestnzcpd256", IX86_BUILTIN_VTESTNZCPD256
, GTU
, (int) INT_FTYPE_V4DF_V4DF_PTEST
},
27616 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vtestps256
, "__builtin_ia32_vtestzps256", IX86_BUILTIN_VTESTZPS256
, EQ
, (int) INT_FTYPE_V8SF_V8SF_PTEST
},
27617 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vtestps256
, "__builtin_ia32_vtestcps256", IX86_BUILTIN_VTESTCPS256
, LTU
, (int) INT_FTYPE_V8SF_V8SF_PTEST
},
27618 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vtestps256
, "__builtin_ia32_vtestnzcps256", IX86_BUILTIN_VTESTNZCPS256
, GTU
, (int) INT_FTYPE_V8SF_V8SF_PTEST
},
27619 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_ptest256
, "__builtin_ia32_ptestz256", IX86_BUILTIN_PTESTZ256
, EQ
, (int) INT_FTYPE_V4DI_V4DI_PTEST
},
27620 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_ptest256
, "__builtin_ia32_ptestc256", IX86_BUILTIN_PTESTC256
, LTU
, (int) INT_FTYPE_V4DI_V4DI_PTEST
},
27621 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_ptest256
, "__builtin_ia32_ptestnzc256", IX86_BUILTIN_PTESTNZC256
, GTU
, (int) INT_FTYPE_V4DI_V4DI_PTEST
},
27623 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_movmskpd256
, "__builtin_ia32_movmskpd256", IX86_BUILTIN_MOVMSKPD256
, UNKNOWN
, (int) INT_FTYPE_V4DF
},
27624 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_movmskps256
, "__builtin_ia32_movmskps256", IX86_BUILTIN_MOVMSKPS256
, UNKNOWN
, (int) INT_FTYPE_V8SF
},
27626 { OPTION_MASK_ISA_AVX
, CODE_FOR_copysignv8sf3
, "__builtin_ia32_copysignps256", IX86_BUILTIN_CPYSGNPS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF_V8SF
},
27627 { OPTION_MASK_ISA_AVX
, CODE_FOR_copysignv4df3
, "__builtin_ia32_copysignpd256", IX86_BUILTIN_CPYSGNPD256
, UNKNOWN
, (int) V4DF_FTYPE_V4DF_V4DF
},
27629 { OPTION_MASK_ISA_AVX
, CODE_FOR_vec_pack_sfix_v4df
, "__builtin_ia32_vec_pack_sfix256 ", IX86_BUILTIN_VEC_PACK_SFIX256
, UNKNOWN
, (int) V8SI_FTYPE_V4DF_V4DF
},
27632 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_mpsadbw
, "__builtin_ia32_mpsadbw256", IX86_BUILTIN_MPSADBW256
, UNKNOWN
, (int) V32QI_FTYPE_V32QI_V32QI_INT
},
27633 { OPTION_MASK_ISA_AVX2
, CODE_FOR_absv32qi2
, "__builtin_ia32_pabsb256", IX86_BUILTIN_PABSB256
, UNKNOWN
, (int) V32QI_FTYPE_V32QI
},
27634 { OPTION_MASK_ISA_AVX2
, CODE_FOR_absv16hi2
, "__builtin_ia32_pabsw256", IX86_BUILTIN_PABSW256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI
},
27635 { OPTION_MASK_ISA_AVX2
, CODE_FOR_absv8si2
, "__builtin_ia32_pabsd256", IX86_BUILTIN_PABSD256
, UNKNOWN
, (int) V8SI_FTYPE_V8SI
},
27636 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_packssdw
, "__builtin_ia32_packssdw256", IX86_BUILTIN_PACKSSDW256
, UNKNOWN
, (int) V16HI_FTYPE_V8SI_V8SI
},
27637 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_packsswb
, "__builtin_ia32_packsswb256", IX86_BUILTIN_PACKSSWB256
, UNKNOWN
, (int) V32QI_FTYPE_V16HI_V16HI
},
27638 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_packusdw
, "__builtin_ia32_packusdw256", IX86_BUILTIN_PACKUSDW256
, UNKNOWN
, (int) V16HI_FTYPE_V8SI_V8SI
},
27639 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_packuswb
, "__builtin_ia32_packuswb256", IX86_BUILTIN_PACKUSWB256
, UNKNOWN
, (int) V32QI_FTYPE_V16HI_V16HI
},
27640 { OPTION_MASK_ISA_AVX2
, CODE_FOR_addv32qi3
, "__builtin_ia32_paddb256", IX86_BUILTIN_PADDB256
, UNKNOWN
, (int) V32QI_FTYPE_V32QI_V32QI
},
27641 { OPTION_MASK_ISA_AVX2
, CODE_FOR_addv16hi3
, "__builtin_ia32_paddw256", IX86_BUILTIN_PADDW256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_V16HI
},
27642 { OPTION_MASK_ISA_AVX2
, CODE_FOR_addv8si3
, "__builtin_ia32_paddd256", IX86_BUILTIN_PADDD256
, UNKNOWN
, (int) V8SI_FTYPE_V8SI_V8SI
},
27643 { OPTION_MASK_ISA_AVX2
, CODE_FOR_addv4di3
, "__builtin_ia32_paddq256", IX86_BUILTIN_PADDQ256
, UNKNOWN
, (int) V4DI_FTYPE_V4DI_V4DI
},
27644 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_ssaddv32qi3
, "__builtin_ia32_paddsb256", IX86_BUILTIN_PADDSB256
, UNKNOWN
, (int) V32QI_FTYPE_V32QI_V32QI
},
27645 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_ssaddv16hi3
, "__builtin_ia32_paddsw256", IX86_BUILTIN_PADDSW256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_V16HI
},
27646 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_usaddv32qi3
, "__builtin_ia32_paddusb256", IX86_BUILTIN_PADDUSB256
, UNKNOWN
, (int) V32QI_FTYPE_V32QI_V32QI
},
27647 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_usaddv16hi3
, "__builtin_ia32_paddusw256", IX86_BUILTIN_PADDUSW256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_V16HI
},
27648 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_palignrv2ti
, "__builtin_ia32_palignr256", IX86_BUILTIN_PALIGNR256
, UNKNOWN
, (int) V4DI_FTYPE_V4DI_V4DI_INT_CONVERT
},
27649 { OPTION_MASK_ISA_AVX2
, CODE_FOR_andv4di3
, "__builtin_ia32_andsi256", IX86_BUILTIN_AND256I
, UNKNOWN
, (int) V4DI_FTYPE_V4DI_V4DI
},
27650 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_andnotv4di3
, "__builtin_ia32_andnotsi256", IX86_BUILTIN_ANDNOT256I
, UNKNOWN
, (int) V4DI_FTYPE_V4DI_V4DI
},
27651 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_uavgv32qi3
, "__builtin_ia32_pavgb256", IX86_BUILTIN_PAVGB256
, UNKNOWN
, (int) V32QI_FTYPE_V32QI_V32QI
},
27652 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_uavgv16hi3
, "__builtin_ia32_pavgw256", IX86_BUILTIN_PAVGW256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_V16HI
},
27653 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_pblendvb
, "__builtin_ia32_pblendvb256", IX86_BUILTIN_PBLENDVB256
, UNKNOWN
, (int) V32QI_FTYPE_V32QI_V32QI_V32QI
},
27654 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_pblendw
, "__builtin_ia32_pblendw256", IX86_BUILTIN_PBLENDVW256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_V16HI_INT
},
27655 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_eqv32qi3
, "__builtin_ia32_pcmpeqb256", IX86_BUILTIN_PCMPEQB256
, UNKNOWN
, (int) V32QI_FTYPE_V32QI_V32QI
},
27656 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_eqv16hi3
, "__builtin_ia32_pcmpeqw256", IX86_BUILTIN_PCMPEQW256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_V16HI
},
27657 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_eqv8si3
, "__builtin_ia32_pcmpeqd256", IX86_BUILTIN_PCMPEQD256
, UNKNOWN
, (int) V8SI_FTYPE_V8SI_V8SI
},
27658 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_eqv4di3
, "__builtin_ia32_pcmpeqq256", IX86_BUILTIN_PCMPEQQ256
, UNKNOWN
, (int) V4DI_FTYPE_V4DI_V4DI
},
27659 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_gtv32qi3
, "__builtin_ia32_pcmpgtb256", IX86_BUILTIN_PCMPGTB256
, UNKNOWN
, (int) V32QI_FTYPE_V32QI_V32QI
},
27660 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_gtv16hi3
, "__builtin_ia32_pcmpgtw256", IX86_BUILTIN_PCMPGTW256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_V16HI
},
27661 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_gtv8si3
, "__builtin_ia32_pcmpgtd256", IX86_BUILTIN_PCMPGTD256
, UNKNOWN
, (int) V8SI_FTYPE_V8SI_V8SI
},
27662 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_gtv4di3
, "__builtin_ia32_pcmpgtq256", IX86_BUILTIN_PCMPGTQ256
, UNKNOWN
, (int) V4DI_FTYPE_V4DI_V4DI
},
27663 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_phaddwv16hi3
, "__builtin_ia32_phaddw256", IX86_BUILTIN_PHADDW256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_V16HI
},
27664 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_phadddv8si3
, "__builtin_ia32_phaddd256", IX86_BUILTIN_PHADDD256
, UNKNOWN
, (int) V8SI_FTYPE_V8SI_V8SI
},
27665 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_phaddswv16hi3
, "__builtin_ia32_phaddsw256", IX86_BUILTIN_PHADDSW256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_V16HI
},
27666 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_phsubwv16hi3
, "__builtin_ia32_phsubw256", IX86_BUILTIN_PHSUBW256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_V16HI
},
27667 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_phsubdv8si3
, "__builtin_ia32_phsubd256", IX86_BUILTIN_PHSUBD256
, UNKNOWN
, (int) V8SI_FTYPE_V8SI_V8SI
},
27668 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_phsubswv16hi3
, "__builtin_ia32_phsubsw256", IX86_BUILTIN_PHSUBSW256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_V16HI
},
27669 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_pmaddubsw256
, "__builtin_ia32_pmaddubsw256", IX86_BUILTIN_PMADDUBSW256
, UNKNOWN
, (int) V16HI_FTYPE_V32QI_V32QI
},
27670 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_pmaddwd
, "__builtin_ia32_pmaddwd256", IX86_BUILTIN_PMADDWD256
, UNKNOWN
, (int) V8SI_FTYPE_V16HI_V16HI
},
27671 { OPTION_MASK_ISA_AVX2
, CODE_FOR_smaxv32qi3
, "__builtin_ia32_pmaxsb256", IX86_BUILTIN_PMAXSB256
, UNKNOWN
, (int) V32QI_FTYPE_V32QI_V32QI
},
27672 { OPTION_MASK_ISA_AVX2
, CODE_FOR_smaxv16hi3
, "__builtin_ia32_pmaxsw256", IX86_BUILTIN_PMAXSW256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_V16HI
},
27673 { OPTION_MASK_ISA_AVX2
, CODE_FOR_smaxv8si3
, "__builtin_ia32_pmaxsd256", IX86_BUILTIN_PMAXSD256
, UNKNOWN
, (int) V8SI_FTYPE_V8SI_V8SI
},
27674 { OPTION_MASK_ISA_AVX2
, CODE_FOR_umaxv32qi3
, "__builtin_ia32_pmaxub256", IX86_BUILTIN_PMAXUB256
, UNKNOWN
, (int) V32QI_FTYPE_V32QI_V32QI
},
27675 { OPTION_MASK_ISA_AVX2
, CODE_FOR_umaxv16hi3
, "__builtin_ia32_pmaxuw256", IX86_BUILTIN_PMAXUW256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_V16HI
},
27676 { OPTION_MASK_ISA_AVX2
, CODE_FOR_umaxv8si3
, "__builtin_ia32_pmaxud256", IX86_BUILTIN_PMAXUD256
, UNKNOWN
, (int) V8SI_FTYPE_V8SI_V8SI
},
27677 { OPTION_MASK_ISA_AVX2
, CODE_FOR_sminv32qi3
, "__builtin_ia32_pminsb256", IX86_BUILTIN_PMINSB256
, UNKNOWN
, (int) V32QI_FTYPE_V32QI_V32QI
},
27678 { OPTION_MASK_ISA_AVX2
, CODE_FOR_sminv16hi3
, "__builtin_ia32_pminsw256", IX86_BUILTIN_PMINSW256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_V16HI
},
27679 { OPTION_MASK_ISA_AVX2
, CODE_FOR_sminv8si3
, "__builtin_ia32_pminsd256", IX86_BUILTIN_PMINSD256
, UNKNOWN
, (int) V8SI_FTYPE_V8SI_V8SI
},
27680 { OPTION_MASK_ISA_AVX2
, CODE_FOR_uminv32qi3
, "__builtin_ia32_pminub256", IX86_BUILTIN_PMINUB256
, UNKNOWN
, (int) V32QI_FTYPE_V32QI_V32QI
},
27681 { OPTION_MASK_ISA_AVX2
, CODE_FOR_uminv16hi3
, "__builtin_ia32_pminuw256", IX86_BUILTIN_PMINUW256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_V16HI
},
27682 { OPTION_MASK_ISA_AVX2
, CODE_FOR_uminv8si3
, "__builtin_ia32_pminud256", IX86_BUILTIN_PMINUD256
, UNKNOWN
, (int) V8SI_FTYPE_V8SI_V8SI
},
27683 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_pmovmskb
, "__builtin_ia32_pmovmskb256", IX86_BUILTIN_PMOVMSKB256
, UNKNOWN
, (int) INT_FTYPE_V32QI
},
27684 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_sign_extendv16qiv16hi2
, "__builtin_ia32_pmovsxbw256", IX86_BUILTIN_PMOVSXBW256
, UNKNOWN
, (int) V16HI_FTYPE_V16QI
},
27685 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_sign_extendv8qiv8si2
, "__builtin_ia32_pmovsxbd256", IX86_BUILTIN_PMOVSXBD256
, UNKNOWN
, (int) V8SI_FTYPE_V16QI
},
27686 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_sign_extendv4qiv4di2
, "__builtin_ia32_pmovsxbq256", IX86_BUILTIN_PMOVSXBQ256
, UNKNOWN
, (int) V4DI_FTYPE_V16QI
},
27687 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_sign_extendv8hiv8si2
, "__builtin_ia32_pmovsxwd256", IX86_BUILTIN_PMOVSXWD256
, UNKNOWN
, (int) V8SI_FTYPE_V8HI
},
27688 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_sign_extendv4hiv4di2
, "__builtin_ia32_pmovsxwq256", IX86_BUILTIN_PMOVSXWQ256
, UNKNOWN
, (int) V4DI_FTYPE_V8HI
},
27689 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_sign_extendv4siv4di2
, "__builtin_ia32_pmovsxdq256", IX86_BUILTIN_PMOVSXDQ256
, UNKNOWN
, (int) V4DI_FTYPE_V4SI
},
27690 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_zero_extendv16qiv16hi2
, "__builtin_ia32_pmovzxbw256", IX86_BUILTIN_PMOVZXBW256
, UNKNOWN
, (int) V16HI_FTYPE_V16QI
},
27691 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_zero_extendv8qiv8si2
, "__builtin_ia32_pmovzxbd256", IX86_BUILTIN_PMOVZXBD256
, UNKNOWN
, (int) V8SI_FTYPE_V16QI
},
27692 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_zero_extendv4qiv4di2
, "__builtin_ia32_pmovzxbq256", IX86_BUILTIN_PMOVZXBQ256
, UNKNOWN
, (int) V4DI_FTYPE_V16QI
},
27693 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_zero_extendv8hiv8si2
, "__builtin_ia32_pmovzxwd256", IX86_BUILTIN_PMOVZXWD256
, UNKNOWN
, (int) V8SI_FTYPE_V8HI
},
27694 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_zero_extendv4hiv4di2
, "__builtin_ia32_pmovzxwq256", IX86_BUILTIN_PMOVZXWQ256
, UNKNOWN
, (int) V4DI_FTYPE_V8HI
},
27695 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_zero_extendv4siv4di2
, "__builtin_ia32_pmovzxdq256", IX86_BUILTIN_PMOVZXDQ256
, UNKNOWN
, (int) V4DI_FTYPE_V4SI
},
27696 { OPTION_MASK_ISA_AVX2
, CODE_FOR_vec_widen_smult_even_v8si
, "__builtin_ia32_pmuldq256", IX86_BUILTIN_PMULDQ256
, UNKNOWN
, (int) V4DI_FTYPE_V8SI_V8SI
},
27697 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_pmulhrswv16hi3
, "__builtin_ia32_pmulhrsw256", IX86_BUILTIN_PMULHRSW256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_V16HI
},
27698 { OPTION_MASK_ISA_AVX2
, CODE_FOR_umulv16hi3_highpart
, "__builtin_ia32_pmulhuw256" , IX86_BUILTIN_PMULHUW256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_V16HI
},
27699 { OPTION_MASK_ISA_AVX2
, CODE_FOR_smulv16hi3_highpart
, "__builtin_ia32_pmulhw256" , IX86_BUILTIN_PMULHW256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_V16HI
},
27700 { OPTION_MASK_ISA_AVX2
, CODE_FOR_mulv16hi3
, "__builtin_ia32_pmullw256" , IX86_BUILTIN_PMULLW256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_V16HI
},
27701 { OPTION_MASK_ISA_AVX2
, CODE_FOR_mulv8si3
, "__builtin_ia32_pmulld256" , IX86_BUILTIN_PMULLD256
, UNKNOWN
, (int) V8SI_FTYPE_V8SI_V8SI
},
27702 { OPTION_MASK_ISA_AVX2
, CODE_FOR_vec_widen_umult_even_v8si
, "__builtin_ia32_pmuludq256", IX86_BUILTIN_PMULUDQ256
, UNKNOWN
, (int) V4DI_FTYPE_V8SI_V8SI
},
27703 { OPTION_MASK_ISA_AVX2
, CODE_FOR_iorv4di3
, "__builtin_ia32_por256", IX86_BUILTIN_POR256
, UNKNOWN
, (int) V4DI_FTYPE_V4DI_V4DI
},
27704 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_psadbw
, "__builtin_ia32_psadbw256", IX86_BUILTIN_PSADBW256
, UNKNOWN
, (int) V16HI_FTYPE_V32QI_V32QI
},
27705 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_pshufbv32qi3
, "__builtin_ia32_pshufb256", IX86_BUILTIN_PSHUFB256
, UNKNOWN
, (int) V32QI_FTYPE_V32QI_V32QI
},
27706 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_pshufdv3
, "__builtin_ia32_pshufd256", IX86_BUILTIN_PSHUFD256
, UNKNOWN
, (int) V8SI_FTYPE_V8SI_INT
},
27707 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_pshufhwv3
, "__builtin_ia32_pshufhw256", IX86_BUILTIN_PSHUFHW256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_INT
},
27708 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_pshuflwv3
, "__builtin_ia32_pshuflw256", IX86_BUILTIN_PSHUFLW256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_INT
},
27709 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_psignv32qi3
, "__builtin_ia32_psignb256", IX86_BUILTIN_PSIGNB256
, UNKNOWN
, (int) V32QI_FTYPE_V32QI_V32QI
},
27710 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_psignv16hi3
, "__builtin_ia32_psignw256", IX86_BUILTIN_PSIGNW256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_V16HI
},
27711 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_psignv8si3
, "__builtin_ia32_psignd256", IX86_BUILTIN_PSIGND256
, UNKNOWN
, (int) V8SI_FTYPE_V8SI_V8SI
},
27712 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_ashlv2ti3
, "__builtin_ia32_pslldqi256", IX86_BUILTIN_PSLLDQI256
, UNKNOWN
, (int) V4DI_FTYPE_V4DI_INT_CONVERT
},
27713 { OPTION_MASK_ISA_AVX2
, CODE_FOR_ashlv16hi3
, "__builtin_ia32_psllwi256", IX86_BUILTIN_PSLLWI256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_SI_COUNT
},
27714 { OPTION_MASK_ISA_AVX2
, CODE_FOR_ashlv16hi3
, "__builtin_ia32_psllw256", IX86_BUILTIN_PSLLW256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_V8HI_COUNT
},
27715 { OPTION_MASK_ISA_AVX2
, CODE_FOR_ashlv8si3
, "__builtin_ia32_pslldi256", IX86_BUILTIN_PSLLDI256
, UNKNOWN
, (int) V8SI_FTYPE_V8SI_SI_COUNT
},
27716 { OPTION_MASK_ISA_AVX2
, CODE_FOR_ashlv8si3
, "__builtin_ia32_pslld256", IX86_BUILTIN_PSLLD256
, UNKNOWN
, (int) V8SI_FTYPE_V8SI_V4SI_COUNT
},
27717 { OPTION_MASK_ISA_AVX2
, CODE_FOR_ashlv4di3
, "__builtin_ia32_psllqi256", IX86_BUILTIN_PSLLQI256
, UNKNOWN
, (int) V4DI_FTYPE_V4DI_INT_COUNT
},
27718 { OPTION_MASK_ISA_AVX2
, CODE_FOR_ashlv4di3
, "__builtin_ia32_psllq256", IX86_BUILTIN_PSLLQ256
, UNKNOWN
, (int) V4DI_FTYPE_V4DI_V2DI_COUNT
},
27719 { OPTION_MASK_ISA_AVX2
, CODE_FOR_ashrv16hi3
, "__builtin_ia32_psrawi256", IX86_BUILTIN_PSRAWI256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_SI_COUNT
},
27720 { OPTION_MASK_ISA_AVX2
, CODE_FOR_ashrv16hi3
, "__builtin_ia32_psraw256", IX86_BUILTIN_PSRAW256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_V8HI_COUNT
},
27721 { OPTION_MASK_ISA_AVX2
, CODE_FOR_ashrv8si3
, "__builtin_ia32_psradi256", IX86_BUILTIN_PSRADI256
, UNKNOWN
, (int) V8SI_FTYPE_V8SI_SI_COUNT
},
27722 { OPTION_MASK_ISA_AVX2
, CODE_FOR_ashrv8si3
, "__builtin_ia32_psrad256", IX86_BUILTIN_PSRAD256
, UNKNOWN
, (int) V8SI_FTYPE_V8SI_V4SI_COUNT
},
27723 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_lshrv2ti3
, "__builtin_ia32_psrldqi256", IX86_BUILTIN_PSRLDQI256
, UNKNOWN
, (int) V4DI_FTYPE_V4DI_INT_CONVERT
},
27724 { OPTION_MASK_ISA_AVX2
, CODE_FOR_lshrv16hi3
, "__builtin_ia32_psrlwi256", IX86_BUILTIN_PSRLWI256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_SI_COUNT
},
27725 { OPTION_MASK_ISA_AVX2
, CODE_FOR_lshrv16hi3
, "__builtin_ia32_psrlw256", IX86_BUILTIN_PSRLW256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_V8HI_COUNT
},
27726 { OPTION_MASK_ISA_AVX2
, CODE_FOR_lshrv8si3
, "__builtin_ia32_psrldi256", IX86_BUILTIN_PSRLDI256
, UNKNOWN
, (int) V8SI_FTYPE_V8SI_SI_COUNT
},
27727 { OPTION_MASK_ISA_AVX2
, CODE_FOR_lshrv8si3
, "__builtin_ia32_psrld256", IX86_BUILTIN_PSRLD256
, UNKNOWN
, (int) V8SI_FTYPE_V8SI_V4SI_COUNT
},
27728 { OPTION_MASK_ISA_AVX2
, CODE_FOR_lshrv4di3
, "__builtin_ia32_psrlqi256", IX86_BUILTIN_PSRLQI256
, UNKNOWN
, (int) V4DI_FTYPE_V4DI_INT_COUNT
},
27729 { OPTION_MASK_ISA_AVX2
, CODE_FOR_lshrv4di3
, "__builtin_ia32_psrlq256", IX86_BUILTIN_PSRLQ256
, UNKNOWN
, (int) V4DI_FTYPE_V4DI_V2DI_COUNT
},
27730 { OPTION_MASK_ISA_AVX2
, CODE_FOR_subv32qi3
, "__builtin_ia32_psubb256", IX86_BUILTIN_PSUBB256
, UNKNOWN
, (int) V32QI_FTYPE_V32QI_V32QI
},
27731 { OPTION_MASK_ISA_AVX2
, CODE_FOR_subv16hi3
, "__builtin_ia32_psubw256", IX86_BUILTIN_PSUBW256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_V16HI
},
27732 { OPTION_MASK_ISA_AVX2
, CODE_FOR_subv8si3
, "__builtin_ia32_psubd256", IX86_BUILTIN_PSUBD256
, UNKNOWN
, (int) V8SI_FTYPE_V8SI_V8SI
},
27733 { OPTION_MASK_ISA_AVX2
, CODE_FOR_subv4di3
, "__builtin_ia32_psubq256", IX86_BUILTIN_PSUBQ256
, UNKNOWN
, (int) V4DI_FTYPE_V4DI_V4DI
},
27734 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_sssubv32qi3
, "__builtin_ia32_psubsb256", IX86_BUILTIN_PSUBSB256
, UNKNOWN
, (int) V32QI_FTYPE_V32QI_V32QI
},
27735 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_sssubv16hi3
, "__builtin_ia32_psubsw256", IX86_BUILTIN_PSUBSW256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_V16HI
},
27736 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_ussubv32qi3
, "__builtin_ia32_psubusb256", IX86_BUILTIN_PSUBUSB256
, UNKNOWN
, (int) V32QI_FTYPE_V32QI_V32QI
},
27737 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_ussubv16hi3
, "__builtin_ia32_psubusw256", IX86_BUILTIN_PSUBUSW256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_V16HI
},
27738 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_interleave_highv32qi
, "__builtin_ia32_punpckhbw256", IX86_BUILTIN_PUNPCKHBW256
, UNKNOWN
, (int) V32QI_FTYPE_V32QI_V32QI
},
27739 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_interleave_highv16hi
, "__builtin_ia32_punpckhwd256", IX86_BUILTIN_PUNPCKHWD256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_V16HI
},
27740 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_interleave_highv8si
, "__builtin_ia32_punpckhdq256", IX86_BUILTIN_PUNPCKHDQ256
, UNKNOWN
, (int) V8SI_FTYPE_V8SI_V8SI
},
27741 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_interleave_highv4di
, "__builtin_ia32_punpckhqdq256", IX86_BUILTIN_PUNPCKHQDQ256
, UNKNOWN
, (int) V4DI_FTYPE_V4DI_V4DI
},
27742 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_interleave_lowv32qi
, "__builtin_ia32_punpcklbw256", IX86_BUILTIN_PUNPCKLBW256
, UNKNOWN
, (int) V32QI_FTYPE_V32QI_V32QI
},
27743 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_interleave_lowv16hi
, "__builtin_ia32_punpcklwd256", IX86_BUILTIN_PUNPCKLWD256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_V16HI
},
27744 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_interleave_lowv8si
, "__builtin_ia32_punpckldq256", IX86_BUILTIN_PUNPCKLDQ256
, UNKNOWN
, (int) V8SI_FTYPE_V8SI_V8SI
},
27745 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_interleave_lowv4di
, "__builtin_ia32_punpcklqdq256", IX86_BUILTIN_PUNPCKLQDQ256
, UNKNOWN
, (int) V4DI_FTYPE_V4DI_V4DI
},
27746 { OPTION_MASK_ISA_AVX2
, CODE_FOR_xorv4di3
, "__builtin_ia32_pxor256", IX86_BUILTIN_PXOR256
, UNKNOWN
, (int) V4DI_FTYPE_V4DI_V4DI
},
27747 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_vec_dupv4sf
, "__builtin_ia32_vbroadcastss_ps", IX86_BUILTIN_VBROADCASTSS_PS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF
},
27748 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_vec_dupv8sf
, "__builtin_ia32_vbroadcastss_ps256", IX86_BUILTIN_VBROADCASTSS_PS256
, UNKNOWN
, (int) V8SF_FTYPE_V4SF
},
27749 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_vec_dupv4df
, "__builtin_ia32_vbroadcastsd_pd256", IX86_BUILTIN_VBROADCASTSD_PD256
, UNKNOWN
, (int) V4DF_FTYPE_V2DF
},
27750 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_vbroadcasti128_v4di
, "__builtin_ia32_vbroadcastsi256", IX86_BUILTIN_VBROADCASTSI256
, UNKNOWN
, (int) V4DI_FTYPE_V2DI
},
27751 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_pblenddv4si
, "__builtin_ia32_pblendd128", IX86_BUILTIN_PBLENDD128
, UNKNOWN
, (int) V4SI_FTYPE_V4SI_V4SI_INT
},
27752 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_pblenddv8si
, "__builtin_ia32_pblendd256", IX86_BUILTIN_PBLENDD256
, UNKNOWN
, (int) V8SI_FTYPE_V8SI_V8SI_INT
},
27753 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_pbroadcastv32qi
, "__builtin_ia32_pbroadcastb256", IX86_BUILTIN_PBROADCASTB256
, UNKNOWN
, (int) V32QI_FTYPE_V16QI
},
27754 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_pbroadcastv16hi
, "__builtin_ia32_pbroadcastw256", IX86_BUILTIN_PBROADCASTW256
, UNKNOWN
, (int) V16HI_FTYPE_V8HI
},
27755 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_pbroadcastv8si
, "__builtin_ia32_pbroadcastd256", IX86_BUILTIN_PBROADCASTD256
, UNKNOWN
, (int) V8SI_FTYPE_V4SI
},
27756 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_pbroadcastv4di
, "__builtin_ia32_pbroadcastq256", IX86_BUILTIN_PBROADCASTQ256
, UNKNOWN
, (int) V4DI_FTYPE_V2DI
},
27757 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_pbroadcastv16qi
, "__builtin_ia32_pbroadcastb128", IX86_BUILTIN_PBROADCASTB128
, UNKNOWN
, (int) V16QI_FTYPE_V16QI
},
27758 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_pbroadcastv8hi
, "__builtin_ia32_pbroadcastw128", IX86_BUILTIN_PBROADCASTW128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI
},
27759 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_pbroadcastv4si
, "__builtin_ia32_pbroadcastd128", IX86_BUILTIN_PBROADCASTD128
, UNKNOWN
, (int) V4SI_FTYPE_V4SI
},
27760 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_pbroadcastv2di
, "__builtin_ia32_pbroadcastq128", IX86_BUILTIN_PBROADCASTQ128
, UNKNOWN
, (int) V2DI_FTYPE_V2DI
},
27761 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_permvarv8si
, "__builtin_ia32_permvarsi256", IX86_BUILTIN_VPERMVARSI256
, UNKNOWN
, (int) V8SI_FTYPE_V8SI_V8SI
},
27762 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_permvarv8sf
, "__builtin_ia32_permvarsf256", IX86_BUILTIN_VPERMVARSF256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF_V8SI
},
27763 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_permv4df
, "__builtin_ia32_permdf256", IX86_BUILTIN_VPERMDF256
, UNKNOWN
, (int) V4DF_FTYPE_V4DF_INT
},
27764 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_permv4di
, "__builtin_ia32_permdi256", IX86_BUILTIN_VPERMDI256
, UNKNOWN
, (int) V4DI_FTYPE_V4DI_INT
},
27765 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_permv2ti
, "__builtin_ia32_permti256", IX86_BUILTIN_VPERMTI256
, UNKNOWN
, (int) V4DI_FTYPE_V4DI_V4DI_INT
},
27766 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_extracti128
, "__builtin_ia32_extract128i256", IX86_BUILTIN_VEXTRACT128I256
, UNKNOWN
, (int) V2DI_FTYPE_V4DI_INT
},
27767 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_inserti128
, "__builtin_ia32_insert128i256", IX86_BUILTIN_VINSERT128I256
, UNKNOWN
, (int) V4DI_FTYPE_V4DI_V2DI_INT
},
27768 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_ashlvv4di
, "__builtin_ia32_psllv4di", IX86_BUILTIN_PSLLVV4DI
, UNKNOWN
, (int) V4DI_FTYPE_V4DI_V4DI
},
27769 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_ashlvv2di
, "__builtin_ia32_psllv2di", IX86_BUILTIN_PSLLVV2DI
, UNKNOWN
, (int) V2DI_FTYPE_V2DI_V2DI
},
27770 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_ashlvv8si
, "__builtin_ia32_psllv8si", IX86_BUILTIN_PSLLVV8SI
, UNKNOWN
, (int) V8SI_FTYPE_V8SI_V8SI
},
27771 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_ashlvv4si
, "__builtin_ia32_psllv4si", IX86_BUILTIN_PSLLVV4SI
, UNKNOWN
, (int) V4SI_FTYPE_V4SI_V4SI
},
27772 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_ashrvv8si
, "__builtin_ia32_psrav8si", IX86_BUILTIN_PSRAVV8SI
, UNKNOWN
, (int) V8SI_FTYPE_V8SI_V8SI
},
27773 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_ashrvv4si
, "__builtin_ia32_psrav4si", IX86_BUILTIN_PSRAVV4SI
, UNKNOWN
, (int) V4SI_FTYPE_V4SI_V4SI
},
27774 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_lshrvv4di
, "__builtin_ia32_psrlv4di", IX86_BUILTIN_PSRLVV4DI
, UNKNOWN
, (int) V4DI_FTYPE_V4DI_V4DI
},
27775 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_lshrvv2di
, "__builtin_ia32_psrlv2di", IX86_BUILTIN_PSRLVV2DI
, UNKNOWN
, (int) V2DI_FTYPE_V2DI_V2DI
},
27776 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_lshrvv8si
, "__builtin_ia32_psrlv8si", IX86_BUILTIN_PSRLVV8SI
, UNKNOWN
, (int) V8SI_FTYPE_V8SI_V8SI
},
27777 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_lshrvv4si
, "__builtin_ia32_psrlv4si", IX86_BUILTIN_PSRLVV4SI
, UNKNOWN
, (int) V4SI_FTYPE_V4SI_V4SI
},
27779 { OPTION_MASK_ISA_LZCNT
, CODE_FOR_clzhi2_lzcnt
, "__builtin_clzs", IX86_BUILTIN_CLZS
, UNKNOWN
, (int) UINT16_FTYPE_UINT16
},
27782 { OPTION_MASK_ISA_BMI
, CODE_FOR_bmi_bextr_si
, "__builtin_ia32_bextr_u32", IX86_BUILTIN_BEXTR32
, UNKNOWN
, (int) UINT_FTYPE_UINT_UINT
},
27783 { OPTION_MASK_ISA_BMI
, CODE_FOR_bmi_bextr_di
, "__builtin_ia32_bextr_u64", IX86_BUILTIN_BEXTR64
, UNKNOWN
, (int) UINT64_FTYPE_UINT64_UINT64
},
27784 { OPTION_MASK_ISA_BMI
, CODE_FOR_ctzhi2
, "__builtin_ctzs", IX86_BUILTIN_CTZS
, UNKNOWN
, (int) UINT16_FTYPE_UINT16
},
27787 { OPTION_MASK_ISA_TBM
, CODE_FOR_tbm_bextri_si
, "__builtin_ia32_bextri_u32", IX86_BUILTIN_BEXTRI32
, UNKNOWN
, (int) UINT_FTYPE_UINT_UINT
},
27788 { OPTION_MASK_ISA_TBM
, CODE_FOR_tbm_bextri_di
, "__builtin_ia32_bextri_u64", IX86_BUILTIN_BEXTRI64
, UNKNOWN
, (int) UINT64_FTYPE_UINT64_UINT64
},
27791 { OPTION_MASK_ISA_F16C
, CODE_FOR_vcvtph2ps
, "__builtin_ia32_vcvtph2ps", IX86_BUILTIN_CVTPH2PS
, UNKNOWN
, (int) V4SF_FTYPE_V8HI
},
27792 { OPTION_MASK_ISA_F16C
, CODE_FOR_vcvtph2ps256
, "__builtin_ia32_vcvtph2ps256", IX86_BUILTIN_CVTPH2PS256
, UNKNOWN
, (int) V8SF_FTYPE_V8HI
},
27793 { OPTION_MASK_ISA_F16C
, CODE_FOR_vcvtps2ph
, "__builtin_ia32_vcvtps2ph", IX86_BUILTIN_CVTPS2PH
, UNKNOWN
, (int) V8HI_FTYPE_V4SF_INT
},
27794 { OPTION_MASK_ISA_F16C
, CODE_FOR_vcvtps2ph256
, "__builtin_ia32_vcvtps2ph256", IX86_BUILTIN_CVTPS2PH256
, UNKNOWN
, (int) V8HI_FTYPE_V8SF_INT
},
27797 { OPTION_MASK_ISA_BMI2
, CODE_FOR_bmi2_bzhi_si3
, "__builtin_ia32_bzhi_si", IX86_BUILTIN_BZHI32
, UNKNOWN
, (int) UINT_FTYPE_UINT_UINT
},
27798 { OPTION_MASK_ISA_BMI2
, CODE_FOR_bmi2_bzhi_di3
, "__builtin_ia32_bzhi_di", IX86_BUILTIN_BZHI64
, UNKNOWN
, (int) UINT64_FTYPE_UINT64_UINT64
},
27799 { OPTION_MASK_ISA_BMI2
, CODE_FOR_bmi2_pdep_si3
, "__builtin_ia32_pdep_si", IX86_BUILTIN_PDEP32
, UNKNOWN
, (int) UINT_FTYPE_UINT_UINT
},
27800 { OPTION_MASK_ISA_BMI2
, CODE_FOR_bmi2_pdep_di3
, "__builtin_ia32_pdep_di", IX86_BUILTIN_PDEP64
, UNKNOWN
, (int) UINT64_FTYPE_UINT64_UINT64
},
27801 { OPTION_MASK_ISA_BMI2
, CODE_FOR_bmi2_pext_si3
, "__builtin_ia32_pext_si", IX86_BUILTIN_PEXT32
, UNKNOWN
, (int) UINT_FTYPE_UINT_UINT
},
27802 { OPTION_MASK_ISA_BMI2
, CODE_FOR_bmi2_pext_di3
, "__builtin_ia32_pext_di", IX86_BUILTIN_PEXT64
, UNKNOWN
, (int) UINT64_FTYPE_UINT64_UINT64
},
27805 /* FMA4 and XOP. */
27806 #define MULTI_ARG_4_DF2_DI_I V2DF_FTYPE_V2DF_V2DF_V2DI_INT
27807 #define MULTI_ARG_4_DF2_DI_I1 V4DF_FTYPE_V4DF_V4DF_V4DI_INT
27808 #define MULTI_ARG_4_SF2_SI_I V4SF_FTYPE_V4SF_V4SF_V4SI_INT
27809 #define MULTI_ARG_4_SF2_SI_I1 V8SF_FTYPE_V8SF_V8SF_V8SI_INT
27810 #define MULTI_ARG_3_SF V4SF_FTYPE_V4SF_V4SF_V4SF
27811 #define MULTI_ARG_3_DF V2DF_FTYPE_V2DF_V2DF_V2DF
27812 #define MULTI_ARG_3_SF2 V8SF_FTYPE_V8SF_V8SF_V8SF
27813 #define MULTI_ARG_3_DF2 V4DF_FTYPE_V4DF_V4DF_V4DF
27814 #define MULTI_ARG_3_DI V2DI_FTYPE_V2DI_V2DI_V2DI
27815 #define MULTI_ARG_3_SI V4SI_FTYPE_V4SI_V4SI_V4SI
27816 #define MULTI_ARG_3_SI_DI V4SI_FTYPE_V4SI_V4SI_V2DI
27817 #define MULTI_ARG_3_HI V8HI_FTYPE_V8HI_V8HI_V8HI
27818 #define MULTI_ARG_3_HI_SI V8HI_FTYPE_V8HI_V8HI_V4SI
27819 #define MULTI_ARG_3_QI V16QI_FTYPE_V16QI_V16QI_V16QI
27820 #define MULTI_ARG_3_DI2 V4DI_FTYPE_V4DI_V4DI_V4DI
27821 #define MULTI_ARG_3_SI2 V8SI_FTYPE_V8SI_V8SI_V8SI
27822 #define MULTI_ARG_3_HI2 V16HI_FTYPE_V16HI_V16HI_V16HI
27823 #define MULTI_ARG_3_QI2 V32QI_FTYPE_V32QI_V32QI_V32QI
27824 #define MULTI_ARG_2_SF V4SF_FTYPE_V4SF_V4SF
27825 #define MULTI_ARG_2_DF V2DF_FTYPE_V2DF_V2DF
27826 #define MULTI_ARG_2_DI V2DI_FTYPE_V2DI_V2DI
27827 #define MULTI_ARG_2_SI V4SI_FTYPE_V4SI_V4SI
27828 #define MULTI_ARG_2_HI V8HI_FTYPE_V8HI_V8HI
27829 #define MULTI_ARG_2_QI V16QI_FTYPE_V16QI_V16QI
27830 #define MULTI_ARG_2_DI_IMM V2DI_FTYPE_V2DI_SI
27831 #define MULTI_ARG_2_SI_IMM V4SI_FTYPE_V4SI_SI
27832 #define MULTI_ARG_2_HI_IMM V8HI_FTYPE_V8HI_SI
27833 #define MULTI_ARG_2_QI_IMM V16QI_FTYPE_V16QI_SI
27834 #define MULTI_ARG_2_DI_CMP V2DI_FTYPE_V2DI_V2DI_CMP
27835 #define MULTI_ARG_2_SI_CMP V4SI_FTYPE_V4SI_V4SI_CMP
27836 #define MULTI_ARG_2_HI_CMP V8HI_FTYPE_V8HI_V8HI_CMP
27837 #define MULTI_ARG_2_QI_CMP V16QI_FTYPE_V16QI_V16QI_CMP
27838 #define MULTI_ARG_2_SF_TF V4SF_FTYPE_V4SF_V4SF_TF
27839 #define MULTI_ARG_2_DF_TF V2DF_FTYPE_V2DF_V2DF_TF
27840 #define MULTI_ARG_2_DI_TF V2DI_FTYPE_V2DI_V2DI_TF
27841 #define MULTI_ARG_2_SI_TF V4SI_FTYPE_V4SI_V4SI_TF
27842 #define MULTI_ARG_2_HI_TF V8HI_FTYPE_V8HI_V8HI_TF
27843 #define MULTI_ARG_2_QI_TF V16QI_FTYPE_V16QI_V16QI_TF
27844 #define MULTI_ARG_1_SF V4SF_FTYPE_V4SF
27845 #define MULTI_ARG_1_DF V2DF_FTYPE_V2DF
27846 #define MULTI_ARG_1_SF2 V8SF_FTYPE_V8SF
27847 #define MULTI_ARG_1_DF2 V4DF_FTYPE_V4DF
27848 #define MULTI_ARG_1_DI V2DI_FTYPE_V2DI
27849 #define MULTI_ARG_1_SI V4SI_FTYPE_V4SI
27850 #define MULTI_ARG_1_HI V8HI_FTYPE_V8HI
27851 #define MULTI_ARG_1_QI V16QI_FTYPE_V16QI
27852 #define MULTI_ARG_1_SI_DI V2DI_FTYPE_V4SI
27853 #define MULTI_ARG_1_HI_DI V2DI_FTYPE_V8HI
27854 #define MULTI_ARG_1_HI_SI V4SI_FTYPE_V8HI
27855 #define MULTI_ARG_1_QI_DI V2DI_FTYPE_V16QI
27856 #define MULTI_ARG_1_QI_SI V4SI_FTYPE_V16QI
27857 #define MULTI_ARG_1_QI_HI V8HI_FTYPE_V16QI
27859 static const struct builtin_description bdesc_multi_arg
[] =
27861 { OPTION_MASK_ISA_FMA4
, CODE_FOR_fma4i_vmfmadd_v4sf
,
27862 "__builtin_ia32_vfmaddss", IX86_BUILTIN_VFMADDSS
,
27863 UNKNOWN
, (int)MULTI_ARG_3_SF
},
27864 { OPTION_MASK_ISA_FMA4
, CODE_FOR_fma4i_vmfmadd_v2df
,
27865 "__builtin_ia32_vfmaddsd", IX86_BUILTIN_VFMADDSD
,
27866 UNKNOWN
, (int)MULTI_ARG_3_DF
},
27868 { OPTION_MASK_ISA_FMA
, CODE_FOR_fmai_vmfmadd_v4sf
,
27869 "__builtin_ia32_vfmaddss3", IX86_BUILTIN_VFMADDSS3
,
27870 UNKNOWN
, (int)MULTI_ARG_3_SF
},
27871 { OPTION_MASK_ISA_FMA
, CODE_FOR_fmai_vmfmadd_v2df
,
27872 "__builtin_ia32_vfmaddsd3", IX86_BUILTIN_VFMADDSD3
,
27873 UNKNOWN
, (int)MULTI_ARG_3_DF
},
27875 { OPTION_MASK_ISA_FMA
| OPTION_MASK_ISA_FMA4
, CODE_FOR_fma4i_fmadd_v4sf
,
27876 "__builtin_ia32_vfmaddps", IX86_BUILTIN_VFMADDPS
,
27877 UNKNOWN
, (int)MULTI_ARG_3_SF
},
27878 { OPTION_MASK_ISA_FMA
| OPTION_MASK_ISA_FMA4
, CODE_FOR_fma4i_fmadd_v2df
,
27879 "__builtin_ia32_vfmaddpd", IX86_BUILTIN_VFMADDPD
,
27880 UNKNOWN
, (int)MULTI_ARG_3_DF
},
27881 { OPTION_MASK_ISA_FMA
| OPTION_MASK_ISA_FMA4
, CODE_FOR_fma4i_fmadd_v8sf
,
27882 "__builtin_ia32_vfmaddps256", IX86_BUILTIN_VFMADDPS256
,
27883 UNKNOWN
, (int)MULTI_ARG_3_SF2
},
27884 { OPTION_MASK_ISA_FMA
| OPTION_MASK_ISA_FMA4
, CODE_FOR_fma4i_fmadd_v4df
,
27885 "__builtin_ia32_vfmaddpd256", IX86_BUILTIN_VFMADDPD256
,
27886 UNKNOWN
, (int)MULTI_ARG_3_DF2
},
27888 { OPTION_MASK_ISA_FMA
| OPTION_MASK_ISA_FMA4
, CODE_FOR_fmaddsub_v4sf
,
27889 "__builtin_ia32_vfmaddsubps", IX86_BUILTIN_VFMADDSUBPS
,
27890 UNKNOWN
, (int)MULTI_ARG_3_SF
},
27891 { OPTION_MASK_ISA_FMA
| OPTION_MASK_ISA_FMA4
, CODE_FOR_fmaddsub_v2df
,
27892 "__builtin_ia32_vfmaddsubpd", IX86_BUILTIN_VFMADDSUBPD
,
27893 UNKNOWN
, (int)MULTI_ARG_3_DF
},
27894 { OPTION_MASK_ISA_FMA
| OPTION_MASK_ISA_FMA4
, CODE_FOR_fmaddsub_v8sf
,
27895 "__builtin_ia32_vfmaddsubps256", IX86_BUILTIN_VFMADDSUBPS256
,
27896 UNKNOWN
, (int)MULTI_ARG_3_SF2
},
27897 { OPTION_MASK_ISA_FMA
| OPTION_MASK_ISA_FMA4
, CODE_FOR_fmaddsub_v4df
,
27898 "__builtin_ia32_vfmaddsubpd256", IX86_BUILTIN_VFMADDSUBPD256
,
27899 UNKNOWN
, (int)MULTI_ARG_3_DF2
},
27901 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcmov_v2di
, "__builtin_ia32_vpcmov", IX86_BUILTIN_VPCMOV
, UNKNOWN
, (int)MULTI_ARG_3_DI
},
27902 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcmov_v2di
, "__builtin_ia32_vpcmov_v2di", IX86_BUILTIN_VPCMOV_V2DI
, UNKNOWN
, (int)MULTI_ARG_3_DI
},
27903 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcmov_v4si
, "__builtin_ia32_vpcmov_v4si", IX86_BUILTIN_VPCMOV_V4SI
, UNKNOWN
, (int)MULTI_ARG_3_SI
},
27904 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcmov_v8hi
, "__builtin_ia32_vpcmov_v8hi", IX86_BUILTIN_VPCMOV_V8HI
, UNKNOWN
, (int)MULTI_ARG_3_HI
},
27905 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcmov_v16qi
, "__builtin_ia32_vpcmov_v16qi",IX86_BUILTIN_VPCMOV_V16QI
,UNKNOWN
, (int)MULTI_ARG_3_QI
},
27906 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcmov_v2df
, "__builtin_ia32_vpcmov_v2df", IX86_BUILTIN_VPCMOV_V2DF
, UNKNOWN
, (int)MULTI_ARG_3_DF
},
27907 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcmov_v4sf
, "__builtin_ia32_vpcmov_v4sf", IX86_BUILTIN_VPCMOV_V4SF
, UNKNOWN
, (int)MULTI_ARG_3_SF
},
27909 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcmov_v4di256
, "__builtin_ia32_vpcmov256", IX86_BUILTIN_VPCMOV256
, UNKNOWN
, (int)MULTI_ARG_3_DI2
},
27910 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcmov_v4di256
, "__builtin_ia32_vpcmov_v4di256", IX86_BUILTIN_VPCMOV_V4DI256
, UNKNOWN
, (int)MULTI_ARG_3_DI2
},
27911 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcmov_v8si256
, "__builtin_ia32_vpcmov_v8si256", IX86_BUILTIN_VPCMOV_V8SI256
, UNKNOWN
, (int)MULTI_ARG_3_SI2
},
27912 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcmov_v16hi256
, "__builtin_ia32_vpcmov_v16hi256", IX86_BUILTIN_VPCMOV_V16HI256
, UNKNOWN
, (int)MULTI_ARG_3_HI2
},
27913 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcmov_v32qi256
, "__builtin_ia32_vpcmov_v32qi256", IX86_BUILTIN_VPCMOV_V32QI256
, UNKNOWN
, (int)MULTI_ARG_3_QI2
},
27914 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcmov_v4df256
, "__builtin_ia32_vpcmov_v4df256", IX86_BUILTIN_VPCMOV_V4DF256
, UNKNOWN
, (int)MULTI_ARG_3_DF2
},
27915 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcmov_v8sf256
, "__builtin_ia32_vpcmov_v8sf256", IX86_BUILTIN_VPCMOV_V8SF256
, UNKNOWN
, (int)MULTI_ARG_3_SF2
},
27917 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pperm
, "__builtin_ia32_vpperm", IX86_BUILTIN_VPPERM
, UNKNOWN
, (int)MULTI_ARG_3_QI
},
27919 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pmacssww
, "__builtin_ia32_vpmacssww", IX86_BUILTIN_VPMACSSWW
, UNKNOWN
, (int)MULTI_ARG_3_HI
},
27920 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pmacsww
, "__builtin_ia32_vpmacsww", IX86_BUILTIN_VPMACSWW
, UNKNOWN
, (int)MULTI_ARG_3_HI
},
27921 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pmacsswd
, "__builtin_ia32_vpmacsswd", IX86_BUILTIN_VPMACSSWD
, UNKNOWN
, (int)MULTI_ARG_3_HI_SI
},
27922 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pmacswd
, "__builtin_ia32_vpmacswd", IX86_BUILTIN_VPMACSWD
, UNKNOWN
, (int)MULTI_ARG_3_HI_SI
},
27923 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pmacssdd
, "__builtin_ia32_vpmacssdd", IX86_BUILTIN_VPMACSSDD
, UNKNOWN
, (int)MULTI_ARG_3_SI
},
27924 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pmacsdd
, "__builtin_ia32_vpmacsdd", IX86_BUILTIN_VPMACSDD
, UNKNOWN
, (int)MULTI_ARG_3_SI
},
27925 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pmacssdql
, "__builtin_ia32_vpmacssdql", IX86_BUILTIN_VPMACSSDQL
, UNKNOWN
, (int)MULTI_ARG_3_SI_DI
},
27926 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pmacssdqh
, "__builtin_ia32_vpmacssdqh", IX86_BUILTIN_VPMACSSDQH
, UNKNOWN
, (int)MULTI_ARG_3_SI_DI
},
27927 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pmacsdql
, "__builtin_ia32_vpmacsdql", IX86_BUILTIN_VPMACSDQL
, UNKNOWN
, (int)MULTI_ARG_3_SI_DI
},
27928 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pmacsdqh
, "__builtin_ia32_vpmacsdqh", IX86_BUILTIN_VPMACSDQH
, UNKNOWN
, (int)MULTI_ARG_3_SI_DI
},
27929 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pmadcsswd
, "__builtin_ia32_vpmadcsswd", IX86_BUILTIN_VPMADCSSWD
, UNKNOWN
, (int)MULTI_ARG_3_HI_SI
},
27930 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pmadcswd
, "__builtin_ia32_vpmadcswd", IX86_BUILTIN_VPMADCSWD
, UNKNOWN
, (int)MULTI_ARG_3_HI_SI
},
27932 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_vrotlv2di3
, "__builtin_ia32_vprotq", IX86_BUILTIN_VPROTQ
, UNKNOWN
, (int)MULTI_ARG_2_DI
},
27933 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_vrotlv4si3
, "__builtin_ia32_vprotd", IX86_BUILTIN_VPROTD
, UNKNOWN
, (int)MULTI_ARG_2_SI
},
27934 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_vrotlv8hi3
, "__builtin_ia32_vprotw", IX86_BUILTIN_VPROTW
, UNKNOWN
, (int)MULTI_ARG_2_HI
},
27935 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_vrotlv16qi3
, "__builtin_ia32_vprotb", IX86_BUILTIN_VPROTB
, UNKNOWN
, (int)MULTI_ARG_2_QI
},
27936 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_rotlv2di3
, "__builtin_ia32_vprotqi", IX86_BUILTIN_VPROTQ_IMM
, UNKNOWN
, (int)MULTI_ARG_2_DI_IMM
},
27937 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_rotlv4si3
, "__builtin_ia32_vprotdi", IX86_BUILTIN_VPROTD_IMM
, UNKNOWN
, (int)MULTI_ARG_2_SI_IMM
},
27938 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_rotlv8hi3
, "__builtin_ia32_vprotwi", IX86_BUILTIN_VPROTW_IMM
, UNKNOWN
, (int)MULTI_ARG_2_HI_IMM
},
27939 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_rotlv16qi3
, "__builtin_ia32_vprotbi", IX86_BUILTIN_VPROTB_IMM
, UNKNOWN
, (int)MULTI_ARG_2_QI_IMM
},
27940 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_shav2di3
, "__builtin_ia32_vpshaq", IX86_BUILTIN_VPSHAQ
, UNKNOWN
, (int)MULTI_ARG_2_DI
},
27941 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_shav4si3
, "__builtin_ia32_vpshad", IX86_BUILTIN_VPSHAD
, UNKNOWN
, (int)MULTI_ARG_2_SI
},
27942 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_shav8hi3
, "__builtin_ia32_vpshaw", IX86_BUILTIN_VPSHAW
, UNKNOWN
, (int)MULTI_ARG_2_HI
},
27943 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_shav16qi3
, "__builtin_ia32_vpshab", IX86_BUILTIN_VPSHAB
, UNKNOWN
, (int)MULTI_ARG_2_QI
},
27944 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_shlv2di3
, "__builtin_ia32_vpshlq", IX86_BUILTIN_VPSHLQ
, UNKNOWN
, (int)MULTI_ARG_2_DI
},
27945 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_shlv4si3
, "__builtin_ia32_vpshld", IX86_BUILTIN_VPSHLD
, UNKNOWN
, (int)MULTI_ARG_2_SI
},
27946 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_shlv8hi3
, "__builtin_ia32_vpshlw", IX86_BUILTIN_VPSHLW
, UNKNOWN
, (int)MULTI_ARG_2_HI
},
27947 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_shlv16qi3
, "__builtin_ia32_vpshlb", IX86_BUILTIN_VPSHLB
, UNKNOWN
, (int)MULTI_ARG_2_QI
},
27949 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_vmfrczv4sf2
, "__builtin_ia32_vfrczss", IX86_BUILTIN_VFRCZSS
, UNKNOWN
, (int)MULTI_ARG_2_SF
},
27950 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_vmfrczv2df2
, "__builtin_ia32_vfrczsd", IX86_BUILTIN_VFRCZSD
, UNKNOWN
, (int)MULTI_ARG_2_DF
},
27951 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_frczv4sf2
, "__builtin_ia32_vfrczps", IX86_BUILTIN_VFRCZPS
, UNKNOWN
, (int)MULTI_ARG_1_SF
},
27952 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_frczv2df2
, "__builtin_ia32_vfrczpd", IX86_BUILTIN_VFRCZPD
, UNKNOWN
, (int)MULTI_ARG_1_DF
},
27953 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_frczv8sf2
, "__builtin_ia32_vfrczps256", IX86_BUILTIN_VFRCZPS256
, UNKNOWN
, (int)MULTI_ARG_1_SF2
},
27954 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_frczv4df2
, "__builtin_ia32_vfrczpd256", IX86_BUILTIN_VFRCZPD256
, UNKNOWN
, (int)MULTI_ARG_1_DF2
},
27956 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_phaddbw
, "__builtin_ia32_vphaddbw", IX86_BUILTIN_VPHADDBW
, UNKNOWN
, (int)MULTI_ARG_1_QI_HI
},
27957 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_phaddbd
, "__builtin_ia32_vphaddbd", IX86_BUILTIN_VPHADDBD
, UNKNOWN
, (int)MULTI_ARG_1_QI_SI
},
27958 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_phaddbq
, "__builtin_ia32_vphaddbq", IX86_BUILTIN_VPHADDBQ
, UNKNOWN
, (int)MULTI_ARG_1_QI_DI
},
27959 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_phaddwd
, "__builtin_ia32_vphaddwd", IX86_BUILTIN_VPHADDWD
, UNKNOWN
, (int)MULTI_ARG_1_HI_SI
},
27960 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_phaddwq
, "__builtin_ia32_vphaddwq", IX86_BUILTIN_VPHADDWQ
, UNKNOWN
, (int)MULTI_ARG_1_HI_DI
},
27961 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_phadddq
, "__builtin_ia32_vphadddq", IX86_BUILTIN_VPHADDDQ
, UNKNOWN
, (int)MULTI_ARG_1_SI_DI
},
27962 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_phaddubw
, "__builtin_ia32_vphaddubw", IX86_BUILTIN_VPHADDUBW
, UNKNOWN
, (int)MULTI_ARG_1_QI_HI
},
27963 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_phaddubd
, "__builtin_ia32_vphaddubd", IX86_BUILTIN_VPHADDUBD
, UNKNOWN
, (int)MULTI_ARG_1_QI_SI
},
27964 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_phaddubq
, "__builtin_ia32_vphaddubq", IX86_BUILTIN_VPHADDUBQ
, UNKNOWN
, (int)MULTI_ARG_1_QI_DI
},
27965 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_phadduwd
, "__builtin_ia32_vphadduwd", IX86_BUILTIN_VPHADDUWD
, UNKNOWN
, (int)MULTI_ARG_1_HI_SI
},
27966 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_phadduwq
, "__builtin_ia32_vphadduwq", IX86_BUILTIN_VPHADDUWQ
, UNKNOWN
, (int)MULTI_ARG_1_HI_DI
},
27967 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_phaddudq
, "__builtin_ia32_vphaddudq", IX86_BUILTIN_VPHADDUDQ
, UNKNOWN
, (int)MULTI_ARG_1_SI_DI
},
27968 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_phsubbw
, "__builtin_ia32_vphsubbw", IX86_BUILTIN_VPHSUBBW
, UNKNOWN
, (int)MULTI_ARG_1_QI_HI
},
27969 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_phsubwd
, "__builtin_ia32_vphsubwd", IX86_BUILTIN_VPHSUBWD
, UNKNOWN
, (int)MULTI_ARG_1_HI_SI
},
27970 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_phsubdq
, "__builtin_ia32_vphsubdq", IX86_BUILTIN_VPHSUBDQ
, UNKNOWN
, (int)MULTI_ARG_1_SI_DI
},
27972 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmpv16qi3
, "__builtin_ia32_vpcomeqb", IX86_BUILTIN_VPCOMEQB
, EQ
, (int)MULTI_ARG_2_QI_CMP
},
27973 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmpv16qi3
, "__builtin_ia32_vpcomneb", IX86_BUILTIN_VPCOMNEB
, NE
, (int)MULTI_ARG_2_QI_CMP
},
27974 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmpv16qi3
, "__builtin_ia32_vpcomneqb", IX86_BUILTIN_VPCOMNEB
, NE
, (int)MULTI_ARG_2_QI_CMP
},
27975 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmpv16qi3
, "__builtin_ia32_vpcomltb", IX86_BUILTIN_VPCOMLTB
, LT
, (int)MULTI_ARG_2_QI_CMP
},
27976 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmpv16qi3
, "__builtin_ia32_vpcomleb", IX86_BUILTIN_VPCOMLEB
, LE
, (int)MULTI_ARG_2_QI_CMP
},
27977 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmpv16qi3
, "__builtin_ia32_vpcomgtb", IX86_BUILTIN_VPCOMGTB
, GT
, (int)MULTI_ARG_2_QI_CMP
},
27978 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmpv16qi3
, "__builtin_ia32_vpcomgeb", IX86_BUILTIN_VPCOMGEB
, GE
, (int)MULTI_ARG_2_QI_CMP
},
27980 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmpv8hi3
, "__builtin_ia32_vpcomeqw", IX86_BUILTIN_VPCOMEQW
, EQ
, (int)MULTI_ARG_2_HI_CMP
},
27981 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmpv8hi3
, "__builtin_ia32_vpcomnew", IX86_BUILTIN_VPCOMNEW
, NE
, (int)MULTI_ARG_2_HI_CMP
},
27982 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmpv8hi3
, "__builtin_ia32_vpcomneqw", IX86_BUILTIN_VPCOMNEW
, NE
, (int)MULTI_ARG_2_HI_CMP
},
27983 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmpv8hi3
, "__builtin_ia32_vpcomltw", IX86_BUILTIN_VPCOMLTW
, LT
, (int)MULTI_ARG_2_HI_CMP
},
27984 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmpv8hi3
, "__builtin_ia32_vpcomlew", IX86_BUILTIN_VPCOMLEW
, LE
, (int)MULTI_ARG_2_HI_CMP
},
27985 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmpv8hi3
, "__builtin_ia32_vpcomgtw", IX86_BUILTIN_VPCOMGTW
, GT
, (int)MULTI_ARG_2_HI_CMP
},
27986 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmpv8hi3
, "__builtin_ia32_vpcomgew", IX86_BUILTIN_VPCOMGEW
, GE
, (int)MULTI_ARG_2_HI_CMP
},
27988 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmpv4si3
, "__builtin_ia32_vpcomeqd", IX86_BUILTIN_VPCOMEQD
, EQ
, (int)MULTI_ARG_2_SI_CMP
},
27989 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmpv4si3
, "__builtin_ia32_vpcomned", IX86_BUILTIN_VPCOMNED
, NE
, (int)MULTI_ARG_2_SI_CMP
},
27990 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmpv4si3
, "__builtin_ia32_vpcomneqd", IX86_BUILTIN_VPCOMNED
, NE
, (int)MULTI_ARG_2_SI_CMP
},
27991 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmpv4si3
, "__builtin_ia32_vpcomltd", IX86_BUILTIN_VPCOMLTD
, LT
, (int)MULTI_ARG_2_SI_CMP
},
27992 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmpv4si3
, "__builtin_ia32_vpcomled", IX86_BUILTIN_VPCOMLED
, LE
, (int)MULTI_ARG_2_SI_CMP
},
27993 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmpv4si3
, "__builtin_ia32_vpcomgtd", IX86_BUILTIN_VPCOMGTD
, GT
, (int)MULTI_ARG_2_SI_CMP
},
27994 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmpv4si3
, "__builtin_ia32_vpcomged", IX86_BUILTIN_VPCOMGED
, GE
, (int)MULTI_ARG_2_SI_CMP
},
27996 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmpv2di3
, "__builtin_ia32_vpcomeqq", IX86_BUILTIN_VPCOMEQQ
, EQ
, (int)MULTI_ARG_2_DI_CMP
},
27997 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmpv2di3
, "__builtin_ia32_vpcomneq", IX86_BUILTIN_VPCOMNEQ
, NE
, (int)MULTI_ARG_2_DI_CMP
},
27998 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmpv2di3
, "__builtin_ia32_vpcomneqq", IX86_BUILTIN_VPCOMNEQ
, NE
, (int)MULTI_ARG_2_DI_CMP
},
27999 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmpv2di3
, "__builtin_ia32_vpcomltq", IX86_BUILTIN_VPCOMLTQ
, LT
, (int)MULTI_ARG_2_DI_CMP
},
28000 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmpv2di3
, "__builtin_ia32_vpcomleq", IX86_BUILTIN_VPCOMLEQ
, LE
, (int)MULTI_ARG_2_DI_CMP
},
28001 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmpv2di3
, "__builtin_ia32_vpcomgtq", IX86_BUILTIN_VPCOMGTQ
, GT
, (int)MULTI_ARG_2_DI_CMP
},
28002 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmpv2di3
, "__builtin_ia32_vpcomgeq", IX86_BUILTIN_VPCOMGEQ
, GE
, (int)MULTI_ARG_2_DI_CMP
},
28004 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmp_uns2v16qi3
,"__builtin_ia32_vpcomequb", IX86_BUILTIN_VPCOMEQUB
, EQ
, (int)MULTI_ARG_2_QI_CMP
},
28005 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmp_uns2v16qi3
,"__builtin_ia32_vpcomneub", IX86_BUILTIN_VPCOMNEUB
, NE
, (int)MULTI_ARG_2_QI_CMP
},
28006 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmp_uns2v16qi3
,"__builtin_ia32_vpcomnequb", IX86_BUILTIN_VPCOMNEUB
, NE
, (int)MULTI_ARG_2_QI_CMP
},
28007 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmp_unsv16qi3
, "__builtin_ia32_vpcomltub", IX86_BUILTIN_VPCOMLTUB
, LTU
, (int)MULTI_ARG_2_QI_CMP
},
28008 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmp_unsv16qi3
, "__builtin_ia32_vpcomleub", IX86_BUILTIN_VPCOMLEUB
, LEU
, (int)MULTI_ARG_2_QI_CMP
},
28009 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmp_unsv16qi3
, "__builtin_ia32_vpcomgtub", IX86_BUILTIN_VPCOMGTUB
, GTU
, (int)MULTI_ARG_2_QI_CMP
},
28010 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmp_unsv16qi3
, "__builtin_ia32_vpcomgeub", IX86_BUILTIN_VPCOMGEUB
, GEU
, (int)MULTI_ARG_2_QI_CMP
},
28012 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmp_uns2v8hi3
, "__builtin_ia32_vpcomequw", IX86_BUILTIN_VPCOMEQUW
, EQ
, (int)MULTI_ARG_2_HI_CMP
},
28013 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmp_uns2v8hi3
, "__builtin_ia32_vpcomneuw", IX86_BUILTIN_VPCOMNEUW
, NE
, (int)MULTI_ARG_2_HI_CMP
},
28014 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmp_uns2v8hi3
, "__builtin_ia32_vpcomnequw", IX86_BUILTIN_VPCOMNEUW
, NE
, (int)MULTI_ARG_2_HI_CMP
},
28015 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmp_unsv8hi3
, "__builtin_ia32_vpcomltuw", IX86_BUILTIN_VPCOMLTUW
, LTU
, (int)MULTI_ARG_2_HI_CMP
},
28016 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmp_unsv8hi3
, "__builtin_ia32_vpcomleuw", IX86_BUILTIN_VPCOMLEUW
, LEU
, (int)MULTI_ARG_2_HI_CMP
},
28017 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmp_unsv8hi3
, "__builtin_ia32_vpcomgtuw", IX86_BUILTIN_VPCOMGTUW
, GTU
, (int)MULTI_ARG_2_HI_CMP
},
28018 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmp_unsv8hi3
, "__builtin_ia32_vpcomgeuw", IX86_BUILTIN_VPCOMGEUW
, GEU
, (int)MULTI_ARG_2_HI_CMP
},
28020 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmp_uns2v4si3
, "__builtin_ia32_vpcomequd", IX86_BUILTIN_VPCOMEQUD
, EQ
, (int)MULTI_ARG_2_SI_CMP
},
28021 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmp_uns2v4si3
, "__builtin_ia32_vpcomneud", IX86_BUILTIN_VPCOMNEUD
, NE
, (int)MULTI_ARG_2_SI_CMP
},
28022 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmp_uns2v4si3
, "__builtin_ia32_vpcomnequd", IX86_BUILTIN_VPCOMNEUD
, NE
, (int)MULTI_ARG_2_SI_CMP
},
28023 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmp_unsv4si3
, "__builtin_ia32_vpcomltud", IX86_BUILTIN_VPCOMLTUD
, LTU
, (int)MULTI_ARG_2_SI_CMP
},
28024 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmp_unsv4si3
, "__builtin_ia32_vpcomleud", IX86_BUILTIN_VPCOMLEUD
, LEU
, (int)MULTI_ARG_2_SI_CMP
},
28025 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmp_unsv4si3
, "__builtin_ia32_vpcomgtud", IX86_BUILTIN_VPCOMGTUD
, GTU
, (int)MULTI_ARG_2_SI_CMP
},
28026 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmp_unsv4si3
, "__builtin_ia32_vpcomgeud", IX86_BUILTIN_VPCOMGEUD
, GEU
, (int)MULTI_ARG_2_SI_CMP
},
28028 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmp_uns2v2di3
, "__builtin_ia32_vpcomequq", IX86_BUILTIN_VPCOMEQUQ
, EQ
, (int)MULTI_ARG_2_DI_CMP
},
28029 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmp_uns2v2di3
, "__builtin_ia32_vpcomneuq", IX86_BUILTIN_VPCOMNEUQ
, NE
, (int)MULTI_ARG_2_DI_CMP
},
28030 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmp_uns2v2di3
, "__builtin_ia32_vpcomnequq", IX86_BUILTIN_VPCOMNEUQ
, NE
, (int)MULTI_ARG_2_DI_CMP
},
28031 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmp_unsv2di3
, "__builtin_ia32_vpcomltuq", IX86_BUILTIN_VPCOMLTUQ
, LTU
, (int)MULTI_ARG_2_DI_CMP
},
28032 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmp_unsv2di3
, "__builtin_ia32_vpcomleuq", IX86_BUILTIN_VPCOMLEUQ
, LEU
, (int)MULTI_ARG_2_DI_CMP
},
28033 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmp_unsv2di3
, "__builtin_ia32_vpcomgtuq", IX86_BUILTIN_VPCOMGTUQ
, GTU
, (int)MULTI_ARG_2_DI_CMP
},
28034 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmp_unsv2di3
, "__builtin_ia32_vpcomgeuq", IX86_BUILTIN_VPCOMGEUQ
, GEU
, (int)MULTI_ARG_2_DI_CMP
},
28036 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcom_tfv16qi3
, "__builtin_ia32_vpcomfalseb", IX86_BUILTIN_VPCOMFALSEB
, (enum rtx_code
) PCOM_FALSE
, (int)MULTI_ARG_2_QI_TF
},
28037 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcom_tfv8hi3
, "__builtin_ia32_vpcomfalsew", IX86_BUILTIN_VPCOMFALSEW
, (enum rtx_code
) PCOM_FALSE
, (int)MULTI_ARG_2_HI_TF
},
28038 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcom_tfv4si3
, "__builtin_ia32_vpcomfalsed", IX86_BUILTIN_VPCOMFALSED
, (enum rtx_code
) PCOM_FALSE
, (int)MULTI_ARG_2_SI_TF
},
28039 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcom_tfv2di3
, "__builtin_ia32_vpcomfalseq", IX86_BUILTIN_VPCOMFALSEQ
, (enum rtx_code
) PCOM_FALSE
, (int)MULTI_ARG_2_DI_TF
},
28040 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcom_tfv16qi3
, "__builtin_ia32_vpcomfalseub",IX86_BUILTIN_VPCOMFALSEUB
,(enum rtx_code
) PCOM_FALSE
, (int)MULTI_ARG_2_QI_TF
},
28041 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcom_tfv8hi3
, "__builtin_ia32_vpcomfalseuw",IX86_BUILTIN_VPCOMFALSEUW
,(enum rtx_code
) PCOM_FALSE
, (int)MULTI_ARG_2_HI_TF
},
28042 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcom_tfv4si3
, "__builtin_ia32_vpcomfalseud",IX86_BUILTIN_VPCOMFALSEUD
,(enum rtx_code
) PCOM_FALSE
, (int)MULTI_ARG_2_SI_TF
},
28043 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcom_tfv2di3
, "__builtin_ia32_vpcomfalseuq",IX86_BUILTIN_VPCOMFALSEUQ
,(enum rtx_code
) PCOM_FALSE
, (int)MULTI_ARG_2_DI_TF
},
28045 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcom_tfv16qi3
, "__builtin_ia32_vpcomtrueb", IX86_BUILTIN_VPCOMTRUEB
, (enum rtx_code
) PCOM_TRUE
, (int)MULTI_ARG_2_QI_TF
},
28046 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcom_tfv8hi3
, "__builtin_ia32_vpcomtruew", IX86_BUILTIN_VPCOMTRUEW
, (enum rtx_code
) PCOM_TRUE
, (int)MULTI_ARG_2_HI_TF
},
28047 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcom_tfv4si3
, "__builtin_ia32_vpcomtrued", IX86_BUILTIN_VPCOMTRUED
, (enum rtx_code
) PCOM_TRUE
, (int)MULTI_ARG_2_SI_TF
},
28048 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcom_tfv2di3
, "__builtin_ia32_vpcomtrueq", IX86_BUILTIN_VPCOMTRUEQ
, (enum rtx_code
) PCOM_TRUE
, (int)MULTI_ARG_2_DI_TF
},
28049 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcom_tfv16qi3
, "__builtin_ia32_vpcomtrueub", IX86_BUILTIN_VPCOMTRUEUB
, (enum rtx_code
) PCOM_TRUE
, (int)MULTI_ARG_2_QI_TF
},
28050 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcom_tfv8hi3
, "__builtin_ia32_vpcomtrueuw", IX86_BUILTIN_VPCOMTRUEUW
, (enum rtx_code
) PCOM_TRUE
, (int)MULTI_ARG_2_HI_TF
},
28051 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcom_tfv4si3
, "__builtin_ia32_vpcomtrueud", IX86_BUILTIN_VPCOMTRUEUD
, (enum rtx_code
) PCOM_TRUE
, (int)MULTI_ARG_2_SI_TF
},
28052 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcom_tfv2di3
, "__builtin_ia32_vpcomtrueuq", IX86_BUILTIN_VPCOMTRUEUQ
, (enum rtx_code
) PCOM_TRUE
, (int)MULTI_ARG_2_DI_TF
},
28054 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_vpermil2v2df3
, "__builtin_ia32_vpermil2pd", IX86_BUILTIN_VPERMIL2PD
, UNKNOWN
, (int)MULTI_ARG_4_DF2_DI_I
},
28055 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_vpermil2v4sf3
, "__builtin_ia32_vpermil2ps", IX86_BUILTIN_VPERMIL2PS
, UNKNOWN
, (int)MULTI_ARG_4_SF2_SI_I
},
28056 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_vpermil2v4df3
, "__builtin_ia32_vpermil2pd256", IX86_BUILTIN_VPERMIL2PD256
, UNKNOWN
, (int)MULTI_ARG_4_DF2_DI_I1
},
28057 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_vpermil2v8sf3
, "__builtin_ia32_vpermil2ps256", IX86_BUILTIN_VPERMIL2PS256
, UNKNOWN
, (int)MULTI_ARG_4_SF2_SI_I1
},
28061 /* TM vector builtins. */
28063 /* Reuse the existing x86-specific `struct builtin_description' cause
28064 we're lazy. Add casts to make them fit. */
28065 static const struct builtin_description bdesc_tm
[] =
28067 { OPTION_MASK_ISA_MMX
, CODE_FOR_nothing
, "__builtin__ITM_WM64", (enum ix86_builtins
) BUILT_IN_TM_STORE_M64
, UNKNOWN
, VOID_FTYPE_PV2SI_V2SI
},
28068 { OPTION_MASK_ISA_MMX
, CODE_FOR_nothing
, "__builtin__ITM_WaRM64", (enum ix86_builtins
) BUILT_IN_TM_STORE_WAR_M64
, UNKNOWN
, VOID_FTYPE_PV2SI_V2SI
},
28069 { OPTION_MASK_ISA_MMX
, CODE_FOR_nothing
, "__builtin__ITM_WaWM64", (enum ix86_builtins
) BUILT_IN_TM_STORE_WAW_M64
, UNKNOWN
, VOID_FTYPE_PV2SI_V2SI
},
28070 { OPTION_MASK_ISA_MMX
, CODE_FOR_nothing
, "__builtin__ITM_RM64", (enum ix86_builtins
) BUILT_IN_TM_LOAD_M64
, UNKNOWN
, V2SI_FTYPE_PCV2SI
},
28071 { OPTION_MASK_ISA_MMX
, CODE_FOR_nothing
, "__builtin__ITM_RaRM64", (enum ix86_builtins
) BUILT_IN_TM_LOAD_RAR_M64
, UNKNOWN
, V2SI_FTYPE_PCV2SI
},
28072 { OPTION_MASK_ISA_MMX
, CODE_FOR_nothing
, "__builtin__ITM_RaWM64", (enum ix86_builtins
) BUILT_IN_TM_LOAD_RAW_M64
, UNKNOWN
, V2SI_FTYPE_PCV2SI
},
28073 { OPTION_MASK_ISA_MMX
, CODE_FOR_nothing
, "__builtin__ITM_RfWM64", (enum ix86_builtins
) BUILT_IN_TM_LOAD_RFW_M64
, UNKNOWN
, V2SI_FTYPE_PCV2SI
},
28075 { OPTION_MASK_ISA_SSE
, CODE_FOR_nothing
, "__builtin__ITM_WM128", (enum ix86_builtins
) BUILT_IN_TM_STORE_M128
, UNKNOWN
, VOID_FTYPE_PV4SF_V4SF
},
28076 { OPTION_MASK_ISA_SSE
, CODE_FOR_nothing
, "__builtin__ITM_WaRM128", (enum ix86_builtins
) BUILT_IN_TM_STORE_WAR_M128
, UNKNOWN
, VOID_FTYPE_PV4SF_V4SF
},
28077 { OPTION_MASK_ISA_SSE
, CODE_FOR_nothing
, "__builtin__ITM_WaWM128", (enum ix86_builtins
) BUILT_IN_TM_STORE_WAW_M128
, UNKNOWN
, VOID_FTYPE_PV4SF_V4SF
},
28078 { OPTION_MASK_ISA_SSE
, CODE_FOR_nothing
, "__builtin__ITM_RM128", (enum ix86_builtins
) BUILT_IN_TM_LOAD_M128
, UNKNOWN
, V4SF_FTYPE_PCV4SF
},
28079 { OPTION_MASK_ISA_SSE
, CODE_FOR_nothing
, "__builtin__ITM_RaRM128", (enum ix86_builtins
) BUILT_IN_TM_LOAD_RAR_M128
, UNKNOWN
, V4SF_FTYPE_PCV4SF
},
28080 { OPTION_MASK_ISA_SSE
, CODE_FOR_nothing
, "__builtin__ITM_RaWM128", (enum ix86_builtins
) BUILT_IN_TM_LOAD_RAW_M128
, UNKNOWN
, V4SF_FTYPE_PCV4SF
},
28081 { OPTION_MASK_ISA_SSE
, CODE_FOR_nothing
, "__builtin__ITM_RfWM128", (enum ix86_builtins
) BUILT_IN_TM_LOAD_RFW_M128
, UNKNOWN
, V4SF_FTYPE_PCV4SF
},
28083 { OPTION_MASK_ISA_AVX
, CODE_FOR_nothing
, "__builtin__ITM_WM256", (enum ix86_builtins
) BUILT_IN_TM_STORE_M256
, UNKNOWN
, VOID_FTYPE_PV8SF_V8SF
},
28084 { OPTION_MASK_ISA_AVX
, CODE_FOR_nothing
, "__builtin__ITM_WaRM256", (enum ix86_builtins
) BUILT_IN_TM_STORE_WAR_M256
, UNKNOWN
, VOID_FTYPE_PV8SF_V8SF
},
28085 { OPTION_MASK_ISA_AVX
, CODE_FOR_nothing
, "__builtin__ITM_WaWM256", (enum ix86_builtins
) BUILT_IN_TM_STORE_WAW_M256
, UNKNOWN
, VOID_FTYPE_PV8SF_V8SF
},
28086 { OPTION_MASK_ISA_AVX
, CODE_FOR_nothing
, "__builtin__ITM_RM256", (enum ix86_builtins
) BUILT_IN_TM_LOAD_M256
, UNKNOWN
, V8SF_FTYPE_PCV8SF
},
28087 { OPTION_MASK_ISA_AVX
, CODE_FOR_nothing
, "__builtin__ITM_RaRM256", (enum ix86_builtins
) BUILT_IN_TM_LOAD_RAR_M256
, UNKNOWN
, V8SF_FTYPE_PCV8SF
},
28088 { OPTION_MASK_ISA_AVX
, CODE_FOR_nothing
, "__builtin__ITM_RaWM256", (enum ix86_builtins
) BUILT_IN_TM_LOAD_RAW_M256
, UNKNOWN
, V8SF_FTYPE_PCV8SF
},
28089 { OPTION_MASK_ISA_AVX
, CODE_FOR_nothing
, "__builtin__ITM_RfWM256", (enum ix86_builtins
) BUILT_IN_TM_LOAD_RFW_M256
, UNKNOWN
, V8SF_FTYPE_PCV8SF
},
28091 { OPTION_MASK_ISA_MMX
, CODE_FOR_nothing
, "__builtin__ITM_LM64", (enum ix86_builtins
) BUILT_IN_TM_LOG_M64
, UNKNOWN
, VOID_FTYPE_PCVOID
},
28092 { OPTION_MASK_ISA_SSE
, CODE_FOR_nothing
, "__builtin__ITM_LM128", (enum ix86_builtins
) BUILT_IN_TM_LOG_M128
, UNKNOWN
, VOID_FTYPE_PCVOID
},
28093 { OPTION_MASK_ISA_AVX
, CODE_FOR_nothing
, "__builtin__ITM_LM256", (enum ix86_builtins
) BUILT_IN_TM_LOG_M256
, UNKNOWN
, VOID_FTYPE_PCVOID
},
28096 /* TM callbacks. */
28098 /* Return the builtin decl needed to load a vector of TYPE. */
28101 ix86_builtin_tm_load (tree type
)
28103 if (TREE_CODE (type
) == VECTOR_TYPE
)
28105 switch (tree_low_cst (TYPE_SIZE (type
), 1))
28108 return builtin_decl_explicit (BUILT_IN_TM_LOAD_M64
);
28110 return builtin_decl_explicit (BUILT_IN_TM_LOAD_M128
);
28112 return builtin_decl_explicit (BUILT_IN_TM_LOAD_M256
);
28118 /* Return the builtin decl needed to store a vector of TYPE. */
28121 ix86_builtin_tm_store (tree type
)
28123 if (TREE_CODE (type
) == VECTOR_TYPE
)
28125 switch (tree_low_cst (TYPE_SIZE (type
), 1))
28128 return builtin_decl_explicit (BUILT_IN_TM_STORE_M64
);
28130 return builtin_decl_explicit (BUILT_IN_TM_STORE_M128
);
28132 return builtin_decl_explicit (BUILT_IN_TM_STORE_M256
);
28138 /* Initialize the transactional memory vector load/store builtins. */
28141 ix86_init_tm_builtins (void)
28143 enum ix86_builtin_func_type ftype
;
28144 const struct builtin_description
*d
;
28147 tree attrs_load
, attrs_type_load
, attrs_store
, attrs_type_store
;
28148 tree attrs_log
, attrs_type_log
;
28153 /* If there are no builtins defined, we must be compiling in a
28154 language without trans-mem support. */
28155 if (!builtin_decl_explicit_p (BUILT_IN_TM_LOAD_1
))
28158 /* Use whatever attributes a normal TM load has. */
28159 decl
= builtin_decl_explicit (BUILT_IN_TM_LOAD_1
);
28160 attrs_load
= DECL_ATTRIBUTES (decl
);
28161 attrs_type_load
= TYPE_ATTRIBUTES (TREE_TYPE (decl
));
28162 /* Use whatever attributes a normal TM store has. */
28163 decl
= builtin_decl_explicit (BUILT_IN_TM_STORE_1
);
28164 attrs_store
= DECL_ATTRIBUTES (decl
);
28165 attrs_type_store
= TYPE_ATTRIBUTES (TREE_TYPE (decl
));
28166 /* Use whatever attributes a normal TM log has. */
28167 decl
= builtin_decl_explicit (BUILT_IN_TM_LOG
);
28168 attrs_log
= DECL_ATTRIBUTES (decl
);
28169 attrs_type_log
= TYPE_ATTRIBUTES (TREE_TYPE (decl
));
28171 for (i
= 0, d
= bdesc_tm
;
28172 i
< ARRAY_SIZE (bdesc_tm
);
28175 if ((d
->mask
& ix86_isa_flags
) != 0
28176 || (lang_hooks
.builtin_function
28177 == lang_hooks
.builtin_function_ext_scope
))
28179 tree type
, attrs
, attrs_type
;
28180 enum built_in_function code
= (enum built_in_function
) d
->code
;
28182 ftype
= (enum ix86_builtin_func_type
) d
->flag
;
28183 type
= ix86_get_builtin_func_type (ftype
);
28185 if (BUILTIN_TM_LOAD_P (code
))
28187 attrs
= attrs_load
;
28188 attrs_type
= attrs_type_load
;
28190 else if (BUILTIN_TM_STORE_P (code
))
28192 attrs
= attrs_store
;
28193 attrs_type
= attrs_type_store
;
28198 attrs_type
= attrs_type_log
;
28200 decl
= add_builtin_function (d
->name
, type
, code
, BUILT_IN_NORMAL
,
28201 /* The builtin without the prefix for
28202 calling it directly. */
28203 d
->name
+ strlen ("__builtin_"),
28205 /* add_builtin_function() will set the DECL_ATTRIBUTES, now
28206 set the TYPE_ATTRIBUTES. */
28207 decl_attributes (&TREE_TYPE (decl
), attrs_type
, ATTR_FLAG_BUILT_IN
);
28209 set_builtin_decl (code
, decl
, false);
28214 /* Set up all the MMX/SSE builtins, even builtins for instructions that are not
28215 in the current target ISA to allow the user to compile particular modules
28216 with different target specific options that differ from the command line
28219 ix86_init_mmx_sse_builtins (void)
28221 const struct builtin_description
* d
;
28222 enum ix86_builtin_func_type ftype
;
28225 /* Add all special builtins with variable number of operands. */
28226 for (i
= 0, d
= bdesc_special_args
;
28227 i
< ARRAY_SIZE (bdesc_special_args
);
28233 ftype
= (enum ix86_builtin_func_type
) d
->flag
;
28234 def_builtin (d
->mask
, d
->name
, ftype
, d
->code
);
28237 /* Add all builtins with variable number of operands. */
28238 for (i
= 0, d
= bdesc_args
;
28239 i
< ARRAY_SIZE (bdesc_args
);
28245 ftype
= (enum ix86_builtin_func_type
) d
->flag
;
28246 def_builtin_const (d
->mask
, d
->name
, ftype
, d
->code
);
28249 /* pcmpestr[im] insns. */
28250 for (i
= 0, d
= bdesc_pcmpestr
;
28251 i
< ARRAY_SIZE (bdesc_pcmpestr
);
28254 if (d
->code
== IX86_BUILTIN_PCMPESTRM128
)
28255 ftype
= V16QI_FTYPE_V16QI_INT_V16QI_INT_INT
;
28257 ftype
= INT_FTYPE_V16QI_INT_V16QI_INT_INT
;
28258 def_builtin_const (d
->mask
, d
->name
, ftype
, d
->code
);
28261 /* pcmpistr[im] insns. */
28262 for (i
= 0, d
= bdesc_pcmpistr
;
28263 i
< ARRAY_SIZE (bdesc_pcmpistr
);
28266 if (d
->code
== IX86_BUILTIN_PCMPISTRM128
)
28267 ftype
= V16QI_FTYPE_V16QI_V16QI_INT
;
28269 ftype
= INT_FTYPE_V16QI_V16QI_INT
;
28270 def_builtin_const (d
->mask
, d
->name
, ftype
, d
->code
);
28273 /* comi/ucomi insns. */
28274 for (i
= 0, d
= bdesc_comi
; i
< ARRAY_SIZE (bdesc_comi
); i
++, d
++)
28276 if (d
->mask
== OPTION_MASK_ISA_SSE2
)
28277 ftype
= INT_FTYPE_V2DF_V2DF
;
28279 ftype
= INT_FTYPE_V4SF_V4SF
;
28280 def_builtin_const (d
->mask
, d
->name
, ftype
, d
->code
);
28284 def_builtin (OPTION_MASK_ISA_SSE
, "__builtin_ia32_ldmxcsr",
28285 VOID_FTYPE_UNSIGNED
, IX86_BUILTIN_LDMXCSR
);
28286 def_builtin (OPTION_MASK_ISA_SSE
, "__builtin_ia32_stmxcsr",
28287 UNSIGNED_FTYPE_VOID
, IX86_BUILTIN_STMXCSR
);
28289 /* SSE or 3DNow!A */
28290 def_builtin (OPTION_MASK_ISA_SSE
| OPTION_MASK_ISA_3DNOW_A
,
28291 "__builtin_ia32_maskmovq", VOID_FTYPE_V8QI_V8QI_PCHAR
,
28292 IX86_BUILTIN_MASKMOVQ
);
28295 def_builtin (OPTION_MASK_ISA_SSE2
, "__builtin_ia32_maskmovdqu",
28296 VOID_FTYPE_V16QI_V16QI_PCHAR
, IX86_BUILTIN_MASKMOVDQU
);
28298 def_builtin (OPTION_MASK_ISA_SSE2
, "__builtin_ia32_clflush",
28299 VOID_FTYPE_PCVOID
, IX86_BUILTIN_CLFLUSH
);
28300 x86_mfence
= def_builtin (OPTION_MASK_ISA_SSE2
, "__builtin_ia32_mfence",
28301 VOID_FTYPE_VOID
, IX86_BUILTIN_MFENCE
);
28304 def_builtin (OPTION_MASK_ISA_SSE3
, "__builtin_ia32_monitor",
28305 VOID_FTYPE_PCVOID_UNSIGNED_UNSIGNED
, IX86_BUILTIN_MONITOR
);
28306 def_builtin (OPTION_MASK_ISA_SSE3
, "__builtin_ia32_mwait",
28307 VOID_FTYPE_UNSIGNED_UNSIGNED
, IX86_BUILTIN_MWAIT
);
28310 def_builtin_const (OPTION_MASK_ISA_AES
, "__builtin_ia32_aesenc128",
28311 V2DI_FTYPE_V2DI_V2DI
, IX86_BUILTIN_AESENC128
);
28312 def_builtin_const (OPTION_MASK_ISA_AES
, "__builtin_ia32_aesenclast128",
28313 V2DI_FTYPE_V2DI_V2DI
, IX86_BUILTIN_AESENCLAST128
);
28314 def_builtin_const (OPTION_MASK_ISA_AES
, "__builtin_ia32_aesdec128",
28315 V2DI_FTYPE_V2DI_V2DI
, IX86_BUILTIN_AESDEC128
);
28316 def_builtin_const (OPTION_MASK_ISA_AES
, "__builtin_ia32_aesdeclast128",
28317 V2DI_FTYPE_V2DI_V2DI
, IX86_BUILTIN_AESDECLAST128
);
28318 def_builtin_const (OPTION_MASK_ISA_AES
, "__builtin_ia32_aesimc128",
28319 V2DI_FTYPE_V2DI
, IX86_BUILTIN_AESIMC128
);
28320 def_builtin_const (OPTION_MASK_ISA_AES
, "__builtin_ia32_aeskeygenassist128",
28321 V2DI_FTYPE_V2DI_INT
, IX86_BUILTIN_AESKEYGENASSIST128
);
28324 def_builtin_const (OPTION_MASK_ISA_PCLMUL
, "__builtin_ia32_pclmulqdq128",
28325 V2DI_FTYPE_V2DI_V2DI_INT
, IX86_BUILTIN_PCLMULQDQ128
);
28328 def_builtin (OPTION_MASK_ISA_RDRND
, "__builtin_ia32_rdrand16_step",
28329 INT_FTYPE_PUSHORT
, IX86_BUILTIN_RDRAND16_STEP
);
28330 def_builtin (OPTION_MASK_ISA_RDRND
, "__builtin_ia32_rdrand32_step",
28331 INT_FTYPE_PUNSIGNED
, IX86_BUILTIN_RDRAND32_STEP
);
28332 def_builtin (OPTION_MASK_ISA_RDRND
| OPTION_MASK_ISA_64BIT
,
28333 "__builtin_ia32_rdrand64_step", INT_FTYPE_PULONGLONG
,
28334 IX86_BUILTIN_RDRAND64_STEP
);
28337 def_builtin (OPTION_MASK_ISA_AVX2
, "__builtin_ia32_gathersiv2df",
28338 V2DF_FTYPE_V2DF_PCDOUBLE_V4SI_V2DF_INT
,
28339 IX86_BUILTIN_GATHERSIV2DF
);
28341 def_builtin (OPTION_MASK_ISA_AVX2
, "__builtin_ia32_gathersiv4df",
28342 V4DF_FTYPE_V4DF_PCDOUBLE_V4SI_V4DF_INT
,
28343 IX86_BUILTIN_GATHERSIV4DF
);
28345 def_builtin (OPTION_MASK_ISA_AVX2
, "__builtin_ia32_gatherdiv2df",
28346 V2DF_FTYPE_V2DF_PCDOUBLE_V2DI_V2DF_INT
,
28347 IX86_BUILTIN_GATHERDIV2DF
);
28349 def_builtin (OPTION_MASK_ISA_AVX2
, "__builtin_ia32_gatherdiv4df",
28350 V4DF_FTYPE_V4DF_PCDOUBLE_V4DI_V4DF_INT
,
28351 IX86_BUILTIN_GATHERDIV4DF
);
28353 def_builtin (OPTION_MASK_ISA_AVX2
, "__builtin_ia32_gathersiv4sf",
28354 V4SF_FTYPE_V4SF_PCFLOAT_V4SI_V4SF_INT
,
28355 IX86_BUILTIN_GATHERSIV4SF
);
28357 def_builtin (OPTION_MASK_ISA_AVX2
, "__builtin_ia32_gathersiv8sf",
28358 V8SF_FTYPE_V8SF_PCFLOAT_V8SI_V8SF_INT
,
28359 IX86_BUILTIN_GATHERSIV8SF
);
28361 def_builtin (OPTION_MASK_ISA_AVX2
, "__builtin_ia32_gatherdiv4sf",
28362 V4SF_FTYPE_V4SF_PCFLOAT_V2DI_V4SF_INT
,
28363 IX86_BUILTIN_GATHERDIV4SF
);
28365 def_builtin (OPTION_MASK_ISA_AVX2
, "__builtin_ia32_gatherdiv4sf256",
28366 V4SF_FTYPE_V4SF_PCFLOAT_V4DI_V4SF_INT
,
28367 IX86_BUILTIN_GATHERDIV8SF
);
28369 def_builtin (OPTION_MASK_ISA_AVX2
, "__builtin_ia32_gathersiv2di",
28370 V2DI_FTYPE_V2DI_PCINT64_V4SI_V2DI_INT
,
28371 IX86_BUILTIN_GATHERSIV2DI
);
28373 def_builtin (OPTION_MASK_ISA_AVX2
, "__builtin_ia32_gathersiv4di",
28374 V4DI_FTYPE_V4DI_PCINT64_V4SI_V4DI_INT
,
28375 IX86_BUILTIN_GATHERSIV4DI
);
28377 def_builtin (OPTION_MASK_ISA_AVX2
, "__builtin_ia32_gatherdiv2di",
28378 V2DI_FTYPE_V2DI_PCINT64_V2DI_V2DI_INT
,
28379 IX86_BUILTIN_GATHERDIV2DI
);
28381 def_builtin (OPTION_MASK_ISA_AVX2
, "__builtin_ia32_gatherdiv4di",
28382 V4DI_FTYPE_V4DI_PCINT64_V4DI_V4DI_INT
,
28383 IX86_BUILTIN_GATHERDIV4DI
);
28385 def_builtin (OPTION_MASK_ISA_AVX2
, "__builtin_ia32_gathersiv4si",
28386 V4SI_FTYPE_V4SI_PCINT_V4SI_V4SI_INT
,
28387 IX86_BUILTIN_GATHERSIV4SI
);
28389 def_builtin (OPTION_MASK_ISA_AVX2
, "__builtin_ia32_gathersiv8si",
28390 V8SI_FTYPE_V8SI_PCINT_V8SI_V8SI_INT
,
28391 IX86_BUILTIN_GATHERSIV8SI
);
28393 def_builtin (OPTION_MASK_ISA_AVX2
, "__builtin_ia32_gatherdiv4si",
28394 V4SI_FTYPE_V4SI_PCINT_V2DI_V4SI_INT
,
28395 IX86_BUILTIN_GATHERDIV4SI
);
28397 def_builtin (OPTION_MASK_ISA_AVX2
, "__builtin_ia32_gatherdiv4si256",
28398 V4SI_FTYPE_V4SI_PCINT_V4DI_V4SI_INT
,
28399 IX86_BUILTIN_GATHERDIV8SI
);
28401 def_builtin (OPTION_MASK_ISA_AVX2
, "__builtin_ia32_gatheraltsiv4df ",
28402 V4DF_FTYPE_V4DF_PCDOUBLE_V8SI_V4DF_INT
,
28403 IX86_BUILTIN_GATHERALTSIV4DF
);
28405 def_builtin (OPTION_MASK_ISA_AVX2
, "__builtin_ia32_gatheraltdiv4sf256 ",
28406 V8SF_FTYPE_V8SF_PCFLOAT_V4DI_V8SF_INT
,
28407 IX86_BUILTIN_GATHERALTDIV8SF
);
28409 def_builtin (OPTION_MASK_ISA_AVX2
, "__builtin_ia32_gatheraltsiv4di ",
28410 V4DI_FTYPE_V4DI_PCINT64_V8SI_V4DI_INT
,
28411 IX86_BUILTIN_GATHERALTSIV4DI
);
28413 def_builtin (OPTION_MASK_ISA_AVX2
, "__builtin_ia32_gatheraltdiv4si256 ",
28414 V8SI_FTYPE_V8SI_PCINT_V4DI_V8SI_INT
,
28415 IX86_BUILTIN_GATHERALTDIV8SI
);
28418 def_builtin (OPTION_MASK_ISA_RTM
, "__builtin_ia32_xabort",
28419 VOID_FTYPE_UNSIGNED
, IX86_BUILTIN_XABORT
);
28421 /* MMX access to the vec_init patterns. */
28422 def_builtin_const (OPTION_MASK_ISA_MMX
, "__builtin_ia32_vec_init_v2si",
28423 V2SI_FTYPE_INT_INT
, IX86_BUILTIN_VEC_INIT_V2SI
);
28425 def_builtin_const (OPTION_MASK_ISA_MMX
, "__builtin_ia32_vec_init_v4hi",
28426 V4HI_FTYPE_HI_HI_HI_HI
,
28427 IX86_BUILTIN_VEC_INIT_V4HI
);
28429 def_builtin_const (OPTION_MASK_ISA_MMX
, "__builtin_ia32_vec_init_v8qi",
28430 V8QI_FTYPE_QI_QI_QI_QI_QI_QI_QI_QI
,
28431 IX86_BUILTIN_VEC_INIT_V8QI
);
28433 /* Access to the vec_extract patterns. */
28434 def_builtin_const (OPTION_MASK_ISA_SSE2
, "__builtin_ia32_vec_ext_v2df",
28435 DOUBLE_FTYPE_V2DF_INT
, IX86_BUILTIN_VEC_EXT_V2DF
);
28436 def_builtin_const (OPTION_MASK_ISA_SSE2
, "__builtin_ia32_vec_ext_v2di",
28437 DI_FTYPE_V2DI_INT
, IX86_BUILTIN_VEC_EXT_V2DI
);
28438 def_builtin_const (OPTION_MASK_ISA_SSE
, "__builtin_ia32_vec_ext_v4sf",
28439 FLOAT_FTYPE_V4SF_INT
, IX86_BUILTIN_VEC_EXT_V4SF
);
28440 def_builtin_const (OPTION_MASK_ISA_SSE2
, "__builtin_ia32_vec_ext_v4si",
28441 SI_FTYPE_V4SI_INT
, IX86_BUILTIN_VEC_EXT_V4SI
);
28442 def_builtin_const (OPTION_MASK_ISA_SSE2
, "__builtin_ia32_vec_ext_v8hi",
28443 HI_FTYPE_V8HI_INT
, IX86_BUILTIN_VEC_EXT_V8HI
);
28445 def_builtin_const (OPTION_MASK_ISA_SSE
| OPTION_MASK_ISA_3DNOW_A
,
28446 "__builtin_ia32_vec_ext_v4hi",
28447 HI_FTYPE_V4HI_INT
, IX86_BUILTIN_VEC_EXT_V4HI
);
28449 def_builtin_const (OPTION_MASK_ISA_MMX
, "__builtin_ia32_vec_ext_v2si",
28450 SI_FTYPE_V2SI_INT
, IX86_BUILTIN_VEC_EXT_V2SI
);
28452 def_builtin_const (OPTION_MASK_ISA_SSE2
, "__builtin_ia32_vec_ext_v16qi",
28453 QI_FTYPE_V16QI_INT
, IX86_BUILTIN_VEC_EXT_V16QI
);
28455 /* Access to the vec_set patterns. */
28456 def_builtin_const (OPTION_MASK_ISA_SSE4_1
| OPTION_MASK_ISA_64BIT
,
28457 "__builtin_ia32_vec_set_v2di",
28458 V2DI_FTYPE_V2DI_DI_INT
, IX86_BUILTIN_VEC_SET_V2DI
);
28460 def_builtin_const (OPTION_MASK_ISA_SSE4_1
, "__builtin_ia32_vec_set_v4sf",
28461 V4SF_FTYPE_V4SF_FLOAT_INT
, IX86_BUILTIN_VEC_SET_V4SF
);
28463 def_builtin_const (OPTION_MASK_ISA_SSE4_1
, "__builtin_ia32_vec_set_v4si",
28464 V4SI_FTYPE_V4SI_SI_INT
, IX86_BUILTIN_VEC_SET_V4SI
);
28466 def_builtin_const (OPTION_MASK_ISA_SSE2
, "__builtin_ia32_vec_set_v8hi",
28467 V8HI_FTYPE_V8HI_HI_INT
, IX86_BUILTIN_VEC_SET_V8HI
);
28469 def_builtin_const (OPTION_MASK_ISA_SSE
| OPTION_MASK_ISA_3DNOW_A
,
28470 "__builtin_ia32_vec_set_v4hi",
28471 V4HI_FTYPE_V4HI_HI_INT
, IX86_BUILTIN_VEC_SET_V4HI
);
28473 def_builtin_const (OPTION_MASK_ISA_SSE4_1
, "__builtin_ia32_vec_set_v16qi",
28474 V16QI_FTYPE_V16QI_QI_INT
, IX86_BUILTIN_VEC_SET_V16QI
);
28477 def_builtin (OPTION_MASK_ISA_RDSEED
, "__builtin_ia32_rdseed_hi_step",
28478 INT_FTYPE_PUSHORT
, IX86_BUILTIN_RDSEED16_STEP
);
28479 def_builtin (OPTION_MASK_ISA_RDSEED
, "__builtin_ia32_rdseed_si_step",
28480 INT_FTYPE_PUNSIGNED
, IX86_BUILTIN_RDSEED32_STEP
);
28481 def_builtin (OPTION_MASK_ISA_RDSEED
| OPTION_MASK_ISA_64BIT
,
28482 "__builtin_ia32_rdseed_di_step",
28483 INT_FTYPE_PULONGLONG
, IX86_BUILTIN_RDSEED64_STEP
);
28486 def_builtin (0, "__builtin_ia32_addcarryx_u32",
28487 UCHAR_FTYPE_UCHAR_UINT_UINT_PUNSIGNED
, IX86_BUILTIN_ADDCARRYX32
);
28488 def_builtin (OPTION_MASK_ISA_64BIT
,
28489 "__builtin_ia32_addcarryx_u64",
28490 UCHAR_FTYPE_UCHAR_ULONGLONG_ULONGLONG_PULONGLONG
,
28491 IX86_BUILTIN_ADDCARRYX64
);
28493 /* Add FMA4 multi-arg argument instructions */
28494 for (i
= 0, d
= bdesc_multi_arg
; i
< ARRAY_SIZE (bdesc_multi_arg
); i
++, d
++)
28499 ftype
= (enum ix86_builtin_func_type
) d
->flag
;
28500 def_builtin_const (d
->mask
, d
->name
, ftype
, d
->code
);
28504 /* This adds a condition to the basic_block NEW_BB in function FUNCTION_DECL
28505 to return a pointer to VERSION_DECL if the outcome of the expression
28506 formed by PREDICATE_CHAIN is true. This function will be called during
28507 version dispatch to decide which function version to execute. It returns
28508 the basic block at the end, to which more conditions can be added. */
28511 add_condition_to_bb (tree function_decl
, tree version_decl
,
28512 tree predicate_chain
, basic_block new_bb
)
28514 gimple return_stmt
;
28515 tree convert_expr
, result_var
;
28516 gimple convert_stmt
;
28517 gimple call_cond_stmt
;
28518 gimple if_else_stmt
;
28520 basic_block bb1
, bb2
, bb3
;
28523 tree cond_var
, and_expr_var
= NULL_TREE
;
28526 tree predicate_decl
, predicate_arg
;
28528 push_cfun (DECL_STRUCT_FUNCTION (function_decl
));
28530 gcc_assert (new_bb
!= NULL
);
28531 gseq
= bb_seq (new_bb
);
28534 convert_expr
= build1 (CONVERT_EXPR
, ptr_type_node
,
28535 build_fold_addr_expr (version_decl
));
28536 result_var
= create_tmp_var (ptr_type_node
, NULL
);
28537 convert_stmt
= gimple_build_assign (result_var
, convert_expr
);
28538 return_stmt
= gimple_build_return (result_var
);
28540 if (predicate_chain
== NULL_TREE
)
28542 gimple_seq_add_stmt (&gseq
, convert_stmt
);
28543 gimple_seq_add_stmt (&gseq
, return_stmt
);
28544 set_bb_seq (new_bb
, gseq
);
28545 gimple_set_bb (convert_stmt
, new_bb
);
28546 gimple_set_bb (return_stmt
, new_bb
);
28551 while (predicate_chain
!= NULL
)
28553 cond_var
= create_tmp_var (integer_type_node
, NULL
);
28554 predicate_decl
= TREE_PURPOSE (predicate_chain
);
28555 predicate_arg
= TREE_VALUE (predicate_chain
);
28556 call_cond_stmt
= gimple_build_call (predicate_decl
, 1, predicate_arg
);
28557 gimple_call_set_lhs (call_cond_stmt
, cond_var
);
28559 gimple_set_block (call_cond_stmt
, DECL_INITIAL (function_decl
));
28560 gimple_set_bb (call_cond_stmt
, new_bb
);
28561 gimple_seq_add_stmt (&gseq
, call_cond_stmt
);
28563 predicate_chain
= TREE_CHAIN (predicate_chain
);
28565 if (and_expr_var
== NULL
)
28566 and_expr_var
= cond_var
;
28569 gimple assign_stmt
;
28570 /* Use MIN_EXPR to check if any integer is zero?.
28571 and_expr_var = min_expr <cond_var, and_expr_var> */
28572 assign_stmt
= gimple_build_assign (and_expr_var
,
28573 build2 (MIN_EXPR
, integer_type_node
,
28574 cond_var
, and_expr_var
));
28576 gimple_set_block (assign_stmt
, DECL_INITIAL (function_decl
));
28577 gimple_set_bb (assign_stmt
, new_bb
);
28578 gimple_seq_add_stmt (&gseq
, assign_stmt
);
28582 if_else_stmt
= gimple_build_cond (GT_EXPR
, and_expr_var
,
28584 NULL_TREE
, NULL_TREE
);
28585 gimple_set_block (if_else_stmt
, DECL_INITIAL (function_decl
));
28586 gimple_set_bb (if_else_stmt
, new_bb
);
28587 gimple_seq_add_stmt (&gseq
, if_else_stmt
);
28589 gimple_seq_add_stmt (&gseq
, convert_stmt
);
28590 gimple_seq_add_stmt (&gseq
, return_stmt
);
28591 set_bb_seq (new_bb
, gseq
);
28594 e12
= split_block (bb1
, if_else_stmt
);
28596 e12
->flags
&= ~EDGE_FALLTHRU
;
28597 e12
->flags
|= EDGE_TRUE_VALUE
;
28599 e23
= split_block (bb2
, return_stmt
);
28601 gimple_set_bb (convert_stmt
, bb2
);
28602 gimple_set_bb (return_stmt
, bb2
);
28605 make_edge (bb1
, bb3
, EDGE_FALSE_VALUE
);
28608 make_edge (bb2
, EXIT_BLOCK_PTR
, 0);
28615 /* This parses the attribute arguments to target in DECL and determines
28616 the right builtin to use to match the platform specification.
28617 It returns the priority value for this version decl. If PREDICATE_LIST
28618 is not NULL, it stores the list of cpu features that need to be checked
28619 before dispatching this function. */
28621 static unsigned int
28622 get_builtin_code_for_version (tree decl
, tree
*predicate_list
)
28625 struct cl_target_option cur_target
;
28627 struct cl_target_option
*new_target
;
28628 const char *arg_str
= NULL
;
28629 const char *attrs_str
= NULL
;
28630 char *tok_str
= NULL
;
28633 /* Priority of i386 features, greater value is higher priority. This is
28634 used to decide the order in which function dispatch must happen. For
28635 instance, a version specialized for SSE4.2 should be checked for dispatch
28636 before a version for SSE3, as SSE4.2 implies SSE3. */
28637 enum feature_priority
28658 enum feature_priority priority
= P_ZERO
;
28660 /* These are the target attribute strings for which a dispatcher is
28661 available, from fold_builtin_cpu. */
28663 static struct _feature_list
28665 const char *const name
;
28666 const enum feature_priority priority
;
28668 const feature_list
[] =
28674 {"ssse3", P_SSSE3
},
28675 {"sse4.1", P_SSE4_1
},
28676 {"sse4.2", P_SSE4_2
},
28677 {"popcnt", P_POPCNT
},
28683 static unsigned int NUM_FEATURES
28684 = sizeof (feature_list
) / sizeof (struct _feature_list
);
28688 tree predicate_chain
= NULL_TREE
;
28689 tree predicate_decl
, predicate_arg
;
28691 attrs
= lookup_attribute ("target", DECL_ATTRIBUTES (decl
));
28692 gcc_assert (attrs
!= NULL
);
28694 attrs
= TREE_VALUE (TREE_VALUE (attrs
));
28696 gcc_assert (TREE_CODE (attrs
) == STRING_CST
);
28697 attrs_str
= TREE_STRING_POINTER (attrs
);
28700 /* Handle arch= if specified. For priority, set it to be 1 more than
28701 the best instruction set the processor can handle. For instance, if
28702 there is a version for atom and a version for ssse3 (the highest ISA
28703 priority for atom), the atom version must be checked for dispatch
28704 before the ssse3 version. */
28705 if (strstr (attrs_str
, "arch=") != NULL
)
28707 cl_target_option_save (&cur_target
, &global_options
);
28708 target_node
= ix86_valid_target_attribute_tree (attrs
);
28710 gcc_assert (target_node
);
28711 new_target
= TREE_TARGET_OPTION (target_node
);
28712 gcc_assert (new_target
);
28714 if (new_target
->arch_specified
&& new_target
->arch
> 0)
28716 switch (new_target
->arch
)
28718 case PROCESSOR_CORE2
:
28720 priority
= P_PROC_SSSE3
;
28722 case PROCESSOR_COREI7
:
28723 arg_str
= "corei7";
28724 priority
= P_PROC_SSE4_2
;
28726 case PROCESSOR_ATOM
:
28728 priority
= P_PROC_SSSE3
;
28730 case PROCESSOR_AMDFAM10
:
28731 arg_str
= "amdfam10h";
28732 priority
= P_PROC_SSE4_a
;
28734 case PROCESSOR_BDVER1
:
28735 arg_str
= "bdver1";
28736 priority
= P_PROC_FMA
;
28738 case PROCESSOR_BDVER2
:
28739 arg_str
= "bdver2";
28740 priority
= P_PROC_FMA
;
28745 cl_target_option_restore (&global_options
, &cur_target
);
28747 if (predicate_list
&& arg_str
== NULL
)
28749 error_at (DECL_SOURCE_LOCATION (decl
),
28750 "No dispatcher found for the versioning attributes");
28754 if (predicate_list
)
28756 predicate_decl
= ix86_builtins
[(int) IX86_BUILTIN_CPU_IS
];
28757 /* For a C string literal the length includes the trailing NULL. */
28758 predicate_arg
= build_string_literal (strlen (arg_str
) + 1, arg_str
);
28759 predicate_chain
= tree_cons (predicate_decl
, predicate_arg
,
28764 /* Process feature name. */
28765 tok_str
= (char *) xmalloc (strlen (attrs_str
) + 1);
28766 strcpy (tok_str
, attrs_str
);
28767 token
= strtok (tok_str
, ",");
28768 predicate_decl
= ix86_builtins
[(int) IX86_BUILTIN_CPU_SUPPORTS
];
28770 while (token
!= NULL
)
28772 /* Do not process "arch=" */
28773 if (strncmp (token
, "arch=", 5) == 0)
28775 token
= strtok (NULL
, ",");
28778 for (i
= 0; i
< NUM_FEATURES
; ++i
)
28780 if (strcmp (token
, feature_list
[i
].name
) == 0)
28782 if (predicate_list
)
28784 predicate_arg
= build_string_literal (
28785 strlen (feature_list
[i
].name
) + 1,
28786 feature_list
[i
].name
);
28787 predicate_chain
= tree_cons (predicate_decl
, predicate_arg
,
28790 /* Find the maximum priority feature. */
28791 if (feature_list
[i
].priority
> priority
)
28792 priority
= feature_list
[i
].priority
;
28797 if (predicate_list
&& i
== NUM_FEATURES
)
28799 error_at (DECL_SOURCE_LOCATION (decl
),
28800 "No dispatcher found for %s", token
);
28803 token
= strtok (NULL
, ",");
28807 if (predicate_list
&& predicate_chain
== NULL_TREE
)
28809 error_at (DECL_SOURCE_LOCATION (decl
),
28810 "No dispatcher found for the versioning attributes : %s",
28814 else if (predicate_list
)
28816 predicate_chain
= nreverse (predicate_chain
);
28817 *predicate_list
= predicate_chain
;
28823 /* This compares the priority of target features in function DECL1
28824 and DECL2. It returns positive value if DECL1 is higher priority,
28825 negative value if DECL2 is higher priority and 0 if they are the
28829 ix86_compare_version_priority (tree decl1
, tree decl2
)
28831 unsigned int priority1
= 0;
28832 unsigned int priority2
= 0;
28834 if (lookup_attribute ("target", DECL_ATTRIBUTES (decl1
)) != NULL
)
28835 priority1
= get_builtin_code_for_version (decl1
, NULL
);
28837 if (lookup_attribute ("target", DECL_ATTRIBUTES (decl2
)) != NULL
)
28838 priority2
= get_builtin_code_for_version (decl2
, NULL
);
28840 return (int)priority1
- (int)priority2
;
28843 /* V1 and V2 point to function versions with different priorities
28844 based on the target ISA. This function compares their priorities. */
28847 feature_compare (const void *v1
, const void *v2
)
28849 typedef struct _function_version_info
28852 tree predicate_chain
;
28853 unsigned int dispatch_priority
;
28854 } function_version_info
;
28856 const function_version_info c1
= *(const function_version_info
*)v1
;
28857 const function_version_info c2
= *(const function_version_info
*)v2
;
28858 return (c2
.dispatch_priority
- c1
.dispatch_priority
);
28861 /* This function generates the dispatch function for
28862 multi-versioned functions. DISPATCH_DECL is the function which will
28863 contain the dispatch logic. FNDECLS are the function choices for
28864 dispatch, and is a tree chain. EMPTY_BB is the basic block pointer
28865 in DISPATCH_DECL in which the dispatch code is generated. */
28868 dispatch_function_versions (tree dispatch_decl
,
28870 basic_block
*empty_bb
)
28873 gimple ifunc_cpu_init_stmt
;
28877 vec
<tree
> *fndecls
;
28878 unsigned int num_versions
= 0;
28879 unsigned int actual_versions
= 0;
28882 struct _function_version_info
28885 tree predicate_chain
;
28886 unsigned int dispatch_priority
;
28887 }*function_version_info
;
28889 gcc_assert (dispatch_decl
!= NULL
28890 && fndecls_p
!= NULL
28891 && empty_bb
!= NULL
);
28893 /*fndecls_p is actually a vector. */
28894 fndecls
= static_cast<vec
<tree
> *> (fndecls_p
);
28896 /* At least one more version other than the default. */
28897 num_versions
= fndecls
->length ();
28898 gcc_assert (num_versions
>= 2);
28900 function_version_info
= (struct _function_version_info
*)
28901 XNEWVEC (struct _function_version_info
, (num_versions
- 1));
28903 /* The first version in the vector is the default decl. */
28904 default_decl
= (*fndecls
)[0];
28906 push_cfun (DECL_STRUCT_FUNCTION (dispatch_decl
));
28908 gseq
= bb_seq (*empty_bb
);
28909 /* Function version dispatch is via IFUNC. IFUNC resolvers fire before
28910 constructors, so explicity call __builtin_cpu_init here. */
28911 ifunc_cpu_init_stmt
= gimple_build_call_vec (
28912 ix86_builtins
[(int) IX86_BUILTIN_CPU_INIT
], vNULL
);
28913 gimple_seq_add_stmt (&gseq
, ifunc_cpu_init_stmt
);
28914 gimple_set_bb (ifunc_cpu_init_stmt
, *empty_bb
);
28915 set_bb_seq (*empty_bb
, gseq
);
28920 for (ix
= 1; fndecls
->iterate (ix
, &ele
); ++ix
)
28922 tree version_decl
= ele
;
28923 tree predicate_chain
= NULL_TREE
;
28924 unsigned int priority
;
28925 /* Get attribute string, parse it and find the right predicate decl.
28926 The predicate function could be a lengthy combination of many
28927 features, like arch-type and various isa-variants. */
28928 priority
= get_builtin_code_for_version (version_decl
,
28931 if (predicate_chain
== NULL_TREE
)
28935 function_version_info
[ix
- 1].version_decl
= version_decl
;
28936 function_version_info
[ix
- 1].predicate_chain
= predicate_chain
;
28937 function_version_info
[ix
- 1].dispatch_priority
= priority
;
28940 /* Sort the versions according to descending order of dispatch priority. The
28941 priority is based on the ISA. This is not a perfect solution. There
28942 could still be ambiguity. If more than one function version is suitable
28943 to execute, which one should be dispatched? In future, allow the user
28944 to specify a dispatch priority next to the version. */
28945 qsort (function_version_info
, actual_versions
,
28946 sizeof (struct _function_version_info
), feature_compare
);
28948 for (i
= 0; i
< actual_versions
; ++i
)
28949 *empty_bb
= add_condition_to_bb (dispatch_decl
,
28950 function_version_info
[i
].version_decl
,
28951 function_version_info
[i
].predicate_chain
,
28954 /* dispatch default version at the end. */
28955 *empty_bb
= add_condition_to_bb (dispatch_decl
, default_decl
,
28958 free (function_version_info
);
28962 /* Comparator function to be used in qsort routine to sort attribute
28963 specification strings to "target". */
28966 attr_strcmp (const void *v1
, const void *v2
)
28968 const char *c1
= *(char *const*)v1
;
28969 const char *c2
= *(char *const*)v2
;
28970 return strcmp (c1
, c2
);
28973 /* ARGLIST is the argument to target attribute. This function tokenizes
28974 the comma separated arguments, sorts them and returns a string which
28975 is a unique identifier for the comma separated arguments. It also
28976 replaces non-identifier characters "=,-" with "_". */
28979 sorted_attr_string (tree arglist
)
28982 size_t str_len_sum
= 0;
28983 char **args
= NULL
;
28984 char *attr_str
, *ret_str
;
28986 unsigned int argnum
= 1;
28989 for (arg
= arglist
; arg
; arg
= TREE_CHAIN (arg
))
28991 const char *str
= TREE_STRING_POINTER (TREE_VALUE (arg
));
28992 size_t len
= strlen (str
);
28993 str_len_sum
+= len
+ 1;
28994 if (arg
!= arglist
)
28996 for (i
= 0; i
< strlen (str
); i
++)
29001 attr_str
= XNEWVEC (char, str_len_sum
);
29003 for (arg
= arglist
; arg
; arg
= TREE_CHAIN (arg
))
29005 const char *str
= TREE_STRING_POINTER (TREE_VALUE (arg
));
29006 size_t len
= strlen (str
);
29007 memcpy (attr_str
+ str_len_sum
, str
, len
);
29008 attr_str
[str_len_sum
+ len
] = TREE_CHAIN (arg
) ? ',' : '\0';
29009 str_len_sum
+= len
+ 1;
29012 /* Replace "=,-" with "_". */
29013 for (i
= 0; i
< strlen (attr_str
); i
++)
29014 if (attr_str
[i
] == '=' || attr_str
[i
]== '-')
29020 args
= XNEWVEC (char *, argnum
);
29023 attr
= strtok (attr_str
, ",");
29024 while (attr
!= NULL
)
29028 attr
= strtok (NULL
, ",");
29031 qsort (args
, argnum
, sizeof (char *), attr_strcmp
);
29033 ret_str
= XNEWVEC (char, str_len_sum
);
29035 for (i
= 0; i
< argnum
; i
++)
29037 size_t len
= strlen (args
[i
]);
29038 memcpy (ret_str
+ str_len_sum
, args
[i
], len
);
29039 ret_str
[str_len_sum
+ len
] = i
< argnum
- 1 ? '_' : '\0';
29040 str_len_sum
+= len
+ 1;
29044 XDELETEVEC (attr_str
);
29048 /* This function changes the assembler name for functions that are
29049 versions. If DECL is a function version and has a "target"
29050 attribute, it appends the attribute string to its assembler name. */
29053 ix86_mangle_function_version_assembler_name (tree decl
, tree id
)
29056 const char *orig_name
, *version_string
;
29057 char *attr_str
, *assembler_name
;
29059 if (DECL_DECLARED_INLINE_P (decl
)
29060 && lookup_attribute ("gnu_inline",
29061 DECL_ATTRIBUTES (decl
)))
29062 error_at (DECL_SOURCE_LOCATION (decl
),
29063 "Function versions cannot be marked as gnu_inline,"
29064 " bodies have to be generated");
29066 if (DECL_VIRTUAL_P (decl
)
29067 || DECL_VINDEX (decl
))
29068 error_at (DECL_SOURCE_LOCATION (decl
),
29069 "Virtual function versioning not supported\n");
29071 version_attr
= lookup_attribute ("target", DECL_ATTRIBUTES (decl
));
29073 /* target attribute string is NULL for default functions. */
29074 if (version_attr
== NULL_TREE
)
29077 orig_name
= IDENTIFIER_POINTER (id
);
29079 = TREE_STRING_POINTER (TREE_VALUE (TREE_VALUE (version_attr
)));
29081 if (strcmp (version_string
, "default") == 0)
29084 attr_str
= sorted_attr_string (TREE_VALUE (version_attr
));
29085 assembler_name
= XNEWVEC (char, strlen (orig_name
) + strlen (attr_str
) + 2);
29087 sprintf (assembler_name
, "%s.%s", orig_name
, attr_str
);
29089 /* Allow assembler name to be modified if already set. */
29090 if (DECL_ASSEMBLER_NAME_SET_P (decl
))
29091 SET_DECL_RTL (decl
, NULL
);
29093 tree ret
= get_identifier (assembler_name
);
29094 XDELETEVEC (attr_str
);
29095 XDELETEVEC (assembler_name
);
29099 /* This function returns true if FN1 and FN2 are versions of the same function,
29100 that is, the target strings of the function decls are different. This assumes
29101 that FN1 and FN2 have the same signature. */
29104 ix86_function_versions (tree fn1
, tree fn2
)
29107 char *target1
, *target2
;
29110 if (TREE_CODE (fn1
) != FUNCTION_DECL
29111 || TREE_CODE (fn2
) != FUNCTION_DECL
)
29114 attr1
= lookup_attribute ("target", DECL_ATTRIBUTES (fn1
));
29115 attr2
= lookup_attribute ("target", DECL_ATTRIBUTES (fn2
));
29117 /* At least one function decl should have the target attribute specified. */
29118 if (attr1
== NULL_TREE
&& attr2
== NULL_TREE
)
29121 /* Diagnose missing target attribute if one of the decls is already
29122 multi-versioned. */
29123 if (attr1
== NULL_TREE
|| attr2
== NULL_TREE
)
29125 if (DECL_FUNCTION_VERSIONED (fn1
) || DECL_FUNCTION_VERSIONED (fn2
))
29127 if (attr2
!= NULL_TREE
)
29134 error_at (DECL_SOURCE_LOCATION (fn2
),
29135 "missing %<target%> attribute for multi-versioned %D",
29137 error_at (DECL_SOURCE_LOCATION (fn1
),
29138 "previous declaration of %D", fn1
);
29139 /* Prevent diagnosing of the same error multiple times. */
29140 DECL_ATTRIBUTES (fn2
)
29141 = tree_cons (get_identifier ("target"),
29142 copy_node (TREE_VALUE (attr1
)),
29143 DECL_ATTRIBUTES (fn2
));
29148 target1
= sorted_attr_string (TREE_VALUE (attr1
));
29149 target2
= sorted_attr_string (TREE_VALUE (attr2
));
29151 /* The sorted target strings must be different for fn1 and fn2
29153 if (strcmp (target1
, target2
) == 0)
29158 XDELETEVEC (target1
);
29159 XDELETEVEC (target2
);
29165 ix86_mangle_decl_assembler_name (tree decl
, tree id
)
29167 /* For function version, add the target suffix to the assembler name. */
29168 if (TREE_CODE (decl
) == FUNCTION_DECL
29169 && DECL_FUNCTION_VERSIONED (decl
))
29170 id
= ix86_mangle_function_version_assembler_name (decl
, id
);
29171 #ifdef SUBTARGET_MANGLE_DECL_ASSEMBLER_NAME
29172 id
= SUBTARGET_MANGLE_DECL_ASSEMBLER_NAME (decl
, id
);
29178 /* Return a new name by appending SUFFIX to the DECL name. If make_unique
29179 is true, append the full path name of the source file. */
29182 make_name (tree decl
, const char *suffix
, bool make_unique
)
29184 char *global_var_name
;
29187 const char *unique_name
= NULL
;
29189 name
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
));
29191 /* Get a unique name that can be used globally without any chances
29192 of collision at link time. */
29194 unique_name
= IDENTIFIER_POINTER (get_file_function_name ("\0"));
29196 name_len
= strlen (name
) + strlen (suffix
) + 2;
29199 name_len
+= strlen (unique_name
) + 1;
29200 global_var_name
= XNEWVEC (char, name_len
);
29202 /* Use '.' to concatenate names as it is demangler friendly. */
29204 snprintf (global_var_name
, name_len
, "%s.%s.%s", name
, unique_name
,
29207 snprintf (global_var_name
, name_len
, "%s.%s", name
, suffix
);
29209 return global_var_name
;
29212 #if defined (ASM_OUTPUT_TYPE_DIRECTIVE) && HAVE_GNU_INDIRECT_FUNCTION
29214 /* Make a dispatcher declaration for the multi-versioned function DECL.
29215 Calls to DECL function will be replaced with calls to the dispatcher
29216 by the front-end. Return the decl created. */
29219 make_dispatcher_decl (const tree decl
)
29223 tree fn_type
, func_type
;
29224 bool is_uniq
= false;
29226 if (TREE_PUBLIC (decl
) == 0)
29229 func_name
= make_name (decl
, "ifunc", is_uniq
);
29231 fn_type
= TREE_TYPE (decl
);
29232 func_type
= build_function_type (TREE_TYPE (fn_type
),
29233 TYPE_ARG_TYPES (fn_type
));
29235 func_decl
= build_fn_decl (func_name
, func_type
);
29236 XDELETEVEC (func_name
);
29237 TREE_USED (func_decl
) = 1;
29238 DECL_CONTEXT (func_decl
) = NULL_TREE
;
29239 DECL_INITIAL (func_decl
) = error_mark_node
;
29240 DECL_ARTIFICIAL (func_decl
) = 1;
29241 /* Mark this func as external, the resolver will flip it again if
29242 it gets generated. */
29243 DECL_EXTERNAL (func_decl
) = 1;
29244 /* This will be of type IFUNCs have to be externally visible. */
29245 TREE_PUBLIC (func_decl
) = 1;
29252 /* Returns true if decl is multi-versioned and DECL is the default function,
29253 that is it is not tagged with target specific optimization. */
29256 is_function_default_version (const tree decl
)
29258 if (TREE_CODE (decl
) != FUNCTION_DECL
29259 || !DECL_FUNCTION_VERSIONED (decl
))
29261 tree attr
= lookup_attribute ("target", DECL_ATTRIBUTES (decl
));
29263 attr
= TREE_VALUE (TREE_VALUE (attr
));
29264 return (TREE_CODE (attr
) == STRING_CST
29265 && strcmp (TREE_STRING_POINTER (attr
), "default") == 0);
29268 /* Make a dispatcher declaration for the multi-versioned function DECL.
29269 Calls to DECL function will be replaced with calls to the dispatcher
29270 by the front-end. Returns the decl of the dispatcher function. */
29273 ix86_get_function_versions_dispatcher (void *decl
)
29275 tree fn
= (tree
) decl
;
29276 struct cgraph_node
*node
= NULL
;
29277 struct cgraph_node
*default_node
= NULL
;
29278 struct cgraph_function_version_info
*node_v
= NULL
;
29279 struct cgraph_function_version_info
*first_v
= NULL
;
29281 tree dispatch_decl
= NULL
;
29283 #if defined (ASM_OUTPUT_TYPE_DIRECTIVE) && HAVE_GNU_INDIRECT_FUNCTION
29284 struct cgraph_function_version_info
*it_v
= NULL
;
29285 struct cgraph_node
*dispatcher_node
= NULL
;
29286 struct cgraph_function_version_info
*dispatcher_version_info
= NULL
;
29289 struct cgraph_function_version_info
*default_version_info
= NULL
;
29291 gcc_assert (fn
!= NULL
&& DECL_FUNCTION_VERSIONED (fn
));
29293 node
= cgraph_get_node (fn
);
29294 gcc_assert (node
!= NULL
);
29296 node_v
= get_cgraph_node_version (node
);
29297 gcc_assert (node_v
!= NULL
);
29299 if (node_v
->dispatcher_resolver
!= NULL
)
29300 return node_v
->dispatcher_resolver
;
29302 /* Find the default version and make it the first node. */
29304 /* Go to the beginnig of the chain. */
29305 while (first_v
->prev
!= NULL
)
29306 first_v
= first_v
->prev
;
29307 default_version_info
= first_v
;
29308 while (default_version_info
!= NULL
)
29310 if (is_function_default_version
29311 (default_version_info
->this_node
->symbol
.decl
))
29313 default_version_info
= default_version_info
->next
;
29316 /* If there is no default node, just return NULL. */
29317 if (default_version_info
== NULL
)
29320 /* Make default info the first node. */
29321 if (first_v
!= default_version_info
)
29323 default_version_info
->prev
->next
= default_version_info
->next
;
29324 if (default_version_info
->next
)
29325 default_version_info
->next
->prev
= default_version_info
->prev
;
29326 first_v
->prev
= default_version_info
;
29327 default_version_info
->next
= first_v
;
29328 default_version_info
->prev
= NULL
;
29331 default_node
= default_version_info
->this_node
;
29333 #if defined (ASM_OUTPUT_TYPE_DIRECTIVE) && HAVE_GNU_INDIRECT_FUNCTION
29334 /* Right now, the dispatching is done via ifunc. */
29335 dispatch_decl
= make_dispatcher_decl (default_node
->symbol
.decl
);
29337 dispatcher_node
= cgraph_get_create_node (dispatch_decl
);
29338 gcc_assert (dispatcher_node
!= NULL
);
29339 dispatcher_node
->dispatcher_function
= 1;
29340 dispatcher_version_info
29341 = insert_new_cgraph_node_version (dispatcher_node
);
29342 dispatcher_version_info
->next
= default_version_info
;
29343 dispatcher_node
->local
.finalized
= 1;
29345 /* Set the dispatcher for all the versions. */
29346 it_v
= default_version_info
;
29347 while (it_v
!= NULL
)
29349 it_v
->dispatcher_resolver
= dispatch_decl
;
29353 error_at (DECL_SOURCE_LOCATION (default_node
->symbol
.decl
),
29354 "multiversioning needs ifunc which is not supported "
29355 "in this configuration");
29357 return dispatch_decl
;
29360 /* Makes a function attribute of the form NAME(ARG_NAME) and chains
29364 make_attribute (const char *name
, const char *arg_name
, tree chain
)
29367 tree attr_arg_name
;
29371 attr_name
= get_identifier (name
);
29372 attr_arg_name
= build_string (strlen (arg_name
), arg_name
);
29373 attr_args
= tree_cons (NULL_TREE
, attr_arg_name
, NULL_TREE
);
29374 attr
= tree_cons (attr_name
, attr_args
, chain
);
29378 /* Make the resolver function decl to dispatch the versions of
29379 a multi-versioned function, DEFAULT_DECL. Create an
29380 empty basic block in the resolver and store the pointer in
29381 EMPTY_BB. Return the decl of the resolver function. */
29384 make_resolver_func (const tree default_decl
,
29385 const tree dispatch_decl
,
29386 basic_block
*empty_bb
)
29388 char *resolver_name
;
29389 tree decl
, type
, decl_name
, t
;
29390 bool is_uniq
= false;
29392 /* IFUNC's have to be globally visible. So, if the default_decl is
29393 not, then the name of the IFUNC should be made unique. */
29394 if (TREE_PUBLIC (default_decl
) == 0)
29397 /* Append the filename to the resolver function if the versions are
29398 not externally visible. This is because the resolver function has
29399 to be externally visible for the loader to find it. So, appending
29400 the filename will prevent conflicts with a resolver function from
29401 another module which is based on the same version name. */
29402 resolver_name
= make_name (default_decl
, "resolver", is_uniq
);
29404 /* The resolver function should return a (void *). */
29405 type
= build_function_type_list (ptr_type_node
, NULL_TREE
);
29407 decl
= build_fn_decl (resolver_name
, type
);
29408 decl_name
= get_identifier (resolver_name
);
29409 SET_DECL_ASSEMBLER_NAME (decl
, decl_name
);
29411 DECL_NAME (decl
) = decl_name
;
29412 TREE_USED (decl
) = 1;
29413 DECL_ARTIFICIAL (decl
) = 1;
29414 DECL_IGNORED_P (decl
) = 0;
29415 /* IFUNC resolvers have to be externally visible. */
29416 TREE_PUBLIC (decl
) = 1;
29417 DECL_UNINLINABLE (decl
) = 0;
29419 /* Resolver is not external, body is generated. */
29420 DECL_EXTERNAL (decl
) = 0;
29421 DECL_EXTERNAL (dispatch_decl
) = 0;
29423 DECL_CONTEXT (decl
) = NULL_TREE
;
29424 DECL_INITIAL (decl
) = make_node (BLOCK
);
29425 DECL_STATIC_CONSTRUCTOR (decl
) = 0;
29427 if (DECL_COMDAT_GROUP (default_decl
)
29428 || TREE_PUBLIC (default_decl
))
29430 /* In this case, each translation unit with a call to this
29431 versioned function will put out a resolver. Ensure it
29432 is comdat to keep just one copy. */
29433 DECL_COMDAT (decl
) = 1;
29434 make_decl_one_only (decl
, DECL_ASSEMBLER_NAME (decl
));
29436 /* Build result decl and add to function_decl. */
29437 t
= build_decl (UNKNOWN_LOCATION
, RESULT_DECL
, NULL_TREE
, ptr_type_node
);
29438 DECL_ARTIFICIAL (t
) = 1;
29439 DECL_IGNORED_P (t
) = 1;
29440 DECL_RESULT (decl
) = t
;
29442 gimplify_function_tree (decl
);
29443 push_cfun (DECL_STRUCT_FUNCTION (decl
));
29444 *empty_bb
= init_lowered_empty_function (decl
, false);
29446 cgraph_add_new_function (decl
, true);
29447 cgraph_call_function_insertion_hooks (cgraph_get_create_node (decl
));
29451 gcc_assert (dispatch_decl
!= NULL
);
29452 /* Mark dispatch_decl as "ifunc" with resolver as resolver_name. */
29453 DECL_ATTRIBUTES (dispatch_decl
)
29454 = make_attribute ("ifunc", resolver_name
, DECL_ATTRIBUTES (dispatch_decl
));
29456 /* Create the alias for dispatch to resolver here. */
29457 /*cgraph_create_function_alias (dispatch_decl, decl);*/
29458 cgraph_same_body_alias (NULL
, dispatch_decl
, decl
);
29459 XDELETEVEC (resolver_name
);
29463 /* Generate the dispatching code body to dispatch multi-versioned function
29464 DECL. The target hook is called to process the "target" attributes and
29465 provide the code to dispatch the right function at run-time. NODE points
29466 to the dispatcher decl whose body will be created. */
29469 ix86_generate_version_dispatcher_body (void *node_p
)
29471 tree resolver_decl
;
29472 basic_block empty_bb
;
29473 vec
<tree
> fn_ver_vec
= vNULL
;
29474 tree default_ver_decl
;
29475 struct cgraph_node
*versn
;
29476 struct cgraph_node
*node
;
29478 struct cgraph_function_version_info
*node_version_info
= NULL
;
29479 struct cgraph_function_version_info
*versn_info
= NULL
;
29481 node
= (cgraph_node
*)node_p
;
29483 node_version_info
= get_cgraph_node_version (node
);
29484 gcc_assert (node
->dispatcher_function
29485 && node_version_info
!= NULL
);
29487 if (node_version_info
->dispatcher_resolver
)
29488 return node_version_info
->dispatcher_resolver
;
29490 /* The first version in the chain corresponds to the default version. */
29491 default_ver_decl
= node_version_info
->next
->this_node
->symbol
.decl
;
29493 /* node is going to be an alias, so remove the finalized bit. */
29494 node
->local
.finalized
= false;
29496 resolver_decl
= make_resolver_func (default_ver_decl
,
29497 node
->symbol
.decl
, &empty_bb
);
29499 node_version_info
->dispatcher_resolver
= resolver_decl
;
29501 push_cfun (DECL_STRUCT_FUNCTION (resolver_decl
));
29503 fn_ver_vec
.create (2);
29505 for (versn_info
= node_version_info
->next
; versn_info
;
29506 versn_info
= versn_info
->next
)
29508 versn
= versn_info
->this_node
;
29509 /* Check for virtual functions here again, as by this time it should
29510 have been determined if this function needs a vtable index or
29511 not. This happens for methods in derived classes that override
29512 virtual methods in base classes but are not explicitly marked as
29514 if (DECL_VINDEX (versn
->symbol
.decl
))
29515 error_at (DECL_SOURCE_LOCATION (versn
->symbol
.decl
),
29516 "Virtual function multiversioning not supported");
29517 fn_ver_vec
.safe_push (versn
->symbol
.decl
);
29520 dispatch_function_versions (resolver_decl
, &fn_ver_vec
, &empty_bb
);
29521 fn_ver_vec
.release ();
29522 rebuild_cgraph_edges ();
29524 return resolver_decl
;
29526 /* This builds the processor_model struct type defined in
29527 libgcc/config/i386/cpuinfo.c */
29530 build_processor_model_struct (void)
29532 const char *field_name
[] = {"__cpu_vendor", "__cpu_type", "__cpu_subtype",
29534 tree field
= NULL_TREE
, field_chain
= NULL_TREE
;
29536 tree type
= make_node (RECORD_TYPE
);
29538 /* The first 3 fields are unsigned int. */
29539 for (i
= 0; i
< 3; ++i
)
29541 field
= build_decl (UNKNOWN_LOCATION
, FIELD_DECL
,
29542 get_identifier (field_name
[i
]), unsigned_type_node
);
29543 if (field_chain
!= NULL_TREE
)
29544 DECL_CHAIN (field
) = field_chain
;
29545 field_chain
= field
;
29548 /* The last field is an array of unsigned integers of size one. */
29549 field
= build_decl (UNKNOWN_LOCATION
, FIELD_DECL
,
29550 get_identifier (field_name
[3]),
29551 build_array_type (unsigned_type_node
,
29552 build_index_type (size_one_node
)));
29553 if (field_chain
!= NULL_TREE
)
29554 DECL_CHAIN (field
) = field_chain
;
29555 field_chain
= field
;
29557 finish_builtin_struct (type
, "__processor_model", field_chain
, NULL_TREE
);
29561 /* Returns a extern, comdat VAR_DECL of type TYPE and name NAME. */
29564 make_var_decl (tree type
, const char *name
)
29568 new_decl
= build_decl (UNKNOWN_LOCATION
,
29570 get_identifier(name
),
29573 DECL_EXTERNAL (new_decl
) = 1;
29574 TREE_STATIC (new_decl
) = 1;
29575 TREE_PUBLIC (new_decl
) = 1;
29576 DECL_INITIAL (new_decl
) = 0;
29577 DECL_ARTIFICIAL (new_decl
) = 0;
29578 DECL_PRESERVE_P (new_decl
) = 1;
29580 make_decl_one_only (new_decl
, DECL_ASSEMBLER_NAME (new_decl
));
29581 assemble_variable (new_decl
, 0, 0, 0);
29586 /* FNDECL is a __builtin_cpu_is or a __builtin_cpu_supports call that is folded
29587 into an integer defined in libgcc/config/i386/cpuinfo.c */
29590 fold_builtin_cpu (tree fndecl
, tree
*args
)
29593 enum ix86_builtins fn_code
= (enum ix86_builtins
)
29594 DECL_FUNCTION_CODE (fndecl
);
29595 tree param_string_cst
= NULL
;
29597 /* This is the order of bit-fields in __processor_features in cpuinfo.c */
29598 enum processor_features
29614 /* These are the values for vendor types and cpu types and subtypes
29615 in cpuinfo.c. Cpu types and subtypes should be subtracted by
29616 the corresponding start value. */
29617 enum processor_model
29627 M_CPU_SUBTYPE_START
,
29628 M_INTEL_COREI7_NEHALEM
,
29629 M_INTEL_COREI7_WESTMERE
,
29630 M_INTEL_COREI7_SANDYBRIDGE
,
29631 M_AMDFAM10H_BARCELONA
,
29632 M_AMDFAM10H_SHANGHAI
,
29633 M_AMDFAM10H_ISTANBUL
,
29634 M_AMDFAM15H_BDVER1
,
29635 M_AMDFAM15H_BDVER2
,
29639 static struct _arch_names_table
29641 const char *const name
;
29642 const enum processor_model model
;
29644 const arch_names_table
[] =
29647 {"intel", M_INTEL
},
29648 {"atom", M_INTEL_ATOM
},
29649 {"core2", M_INTEL_CORE2
},
29650 {"corei7", M_INTEL_COREI7
},
29651 {"nehalem", M_INTEL_COREI7_NEHALEM
},
29652 {"westmere", M_INTEL_COREI7_WESTMERE
},
29653 {"sandybridge", M_INTEL_COREI7_SANDYBRIDGE
},
29654 {"amdfam10h", M_AMDFAM10H
},
29655 {"barcelona", M_AMDFAM10H_BARCELONA
},
29656 {"shanghai", M_AMDFAM10H_SHANGHAI
},
29657 {"istanbul", M_AMDFAM10H_ISTANBUL
},
29658 {"amdfam15h", M_AMDFAM15H
},
29659 {"bdver1", M_AMDFAM15H_BDVER1
},
29660 {"bdver2", M_AMDFAM15H_BDVER2
},
29661 {"bdver3", M_AMDFAM15H_BDVER3
},
29664 static struct _isa_names_table
29666 const char *const name
;
29667 const enum processor_features feature
;
29669 const isa_names_table
[] =
29673 {"popcnt", F_POPCNT
},
29677 {"ssse3", F_SSSE3
},
29678 {"sse4.1", F_SSE4_1
},
29679 {"sse4.2", F_SSE4_2
},
29684 tree __processor_model_type
= build_processor_model_struct ();
29685 tree __cpu_model_var
= make_var_decl (__processor_model_type
,
29688 gcc_assert ((args
!= NULL
) && (*args
!= NULL
));
29690 param_string_cst
= *args
;
29691 while (param_string_cst
29692 && TREE_CODE (param_string_cst
) != STRING_CST
)
29694 /* *args must be a expr that can contain other EXPRS leading to a
29696 if (!EXPR_P (param_string_cst
))
29698 error ("Parameter to builtin must be a string constant or literal");
29699 return integer_zero_node
;
29701 param_string_cst
= TREE_OPERAND (EXPR_CHECK (param_string_cst
), 0);
29704 gcc_assert (param_string_cst
);
29706 if (fn_code
== IX86_BUILTIN_CPU_IS
)
29712 unsigned int field_val
= 0;
29713 unsigned int NUM_ARCH_NAMES
29714 = sizeof (arch_names_table
) / sizeof (struct _arch_names_table
);
29716 for (i
= 0; i
< NUM_ARCH_NAMES
; i
++)
29717 if (strcmp (arch_names_table
[i
].name
,
29718 TREE_STRING_POINTER (param_string_cst
)) == 0)
29721 if (i
== NUM_ARCH_NAMES
)
29723 error ("Parameter to builtin not valid: %s",
29724 TREE_STRING_POINTER (param_string_cst
));
29725 return integer_zero_node
;
29728 field
= TYPE_FIELDS (__processor_model_type
);
29729 field_val
= arch_names_table
[i
].model
;
29731 /* CPU types are stored in the next field. */
29732 if (field_val
> M_CPU_TYPE_START
29733 && field_val
< M_CPU_SUBTYPE_START
)
29735 field
= DECL_CHAIN (field
);
29736 field_val
-= M_CPU_TYPE_START
;
29739 /* CPU subtypes are stored in the next field. */
29740 if (field_val
> M_CPU_SUBTYPE_START
)
29742 field
= DECL_CHAIN ( DECL_CHAIN (field
));
29743 field_val
-= M_CPU_SUBTYPE_START
;
29746 /* Get the appropriate field in __cpu_model. */
29747 ref
= build3 (COMPONENT_REF
, TREE_TYPE (field
), __cpu_model_var
,
29750 /* Check the value. */
29751 final
= build2 (EQ_EXPR
, unsigned_type_node
, ref
,
29752 build_int_cstu (unsigned_type_node
, field_val
));
29753 return build1 (CONVERT_EXPR
, integer_type_node
, final
);
29755 else if (fn_code
== IX86_BUILTIN_CPU_SUPPORTS
)
29762 unsigned int field_val
= 0;
29763 unsigned int NUM_ISA_NAMES
29764 = sizeof (isa_names_table
) / sizeof (struct _isa_names_table
);
29766 for (i
= 0; i
< NUM_ISA_NAMES
; i
++)
29767 if (strcmp (isa_names_table
[i
].name
,
29768 TREE_STRING_POINTER (param_string_cst
)) == 0)
29771 if (i
== NUM_ISA_NAMES
)
29773 error ("Parameter to builtin not valid: %s",
29774 TREE_STRING_POINTER (param_string_cst
));
29775 return integer_zero_node
;
29778 field
= TYPE_FIELDS (__processor_model_type
);
29779 /* Get the last field, which is __cpu_features. */
29780 while (DECL_CHAIN (field
))
29781 field
= DECL_CHAIN (field
);
29783 /* Get the appropriate field: __cpu_model.__cpu_features */
29784 ref
= build3 (COMPONENT_REF
, TREE_TYPE (field
), __cpu_model_var
,
29787 /* Access the 0th element of __cpu_features array. */
29788 array_elt
= build4 (ARRAY_REF
, unsigned_type_node
, ref
,
29789 integer_zero_node
, NULL_TREE
, NULL_TREE
);
29791 field_val
= (1 << isa_names_table
[i
].feature
);
29792 /* Return __cpu_model.__cpu_features[0] & field_val */
29793 final
= build2 (BIT_AND_EXPR
, unsigned_type_node
, array_elt
,
29794 build_int_cstu (unsigned_type_node
, field_val
));
29795 return build1 (CONVERT_EXPR
, integer_type_node
, final
);
29797 gcc_unreachable ();
29801 ix86_fold_builtin (tree fndecl
, int n_args
,
29802 tree
*args
, bool ignore ATTRIBUTE_UNUSED
)
29804 if (DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_MD
)
29806 enum ix86_builtins fn_code
= (enum ix86_builtins
)
29807 DECL_FUNCTION_CODE (fndecl
);
29808 if (fn_code
== IX86_BUILTIN_CPU_IS
29809 || fn_code
== IX86_BUILTIN_CPU_SUPPORTS
)
29811 gcc_assert (n_args
== 1);
29812 return fold_builtin_cpu (fndecl
, args
);
29816 #ifdef SUBTARGET_FOLD_BUILTIN
29817 return SUBTARGET_FOLD_BUILTIN (fndecl
, n_args
, args
, ignore
);
29823 /* Make builtins to detect cpu type and features supported. NAME is
29824 the builtin name, CODE is the builtin code, and FTYPE is the function
29825 type of the builtin. */
29828 make_cpu_type_builtin (const char* name
, int code
,
29829 enum ix86_builtin_func_type ftype
, bool is_const
)
29834 type
= ix86_get_builtin_func_type (ftype
);
29835 decl
= add_builtin_function (name
, type
, code
, BUILT_IN_MD
,
29837 gcc_assert (decl
!= NULL_TREE
);
29838 ix86_builtins
[(int) code
] = decl
;
29839 TREE_READONLY (decl
) = is_const
;
29842 /* Make builtins to get CPU type and features supported. The created
29845 __builtin_cpu_init (), to detect cpu type and features,
29846 __builtin_cpu_is ("<CPUNAME>"), to check if cpu is of type <CPUNAME>,
29847 __builtin_cpu_supports ("<FEATURE>"), to check if cpu supports <FEATURE>
29851 ix86_init_platform_type_builtins (void)
29853 make_cpu_type_builtin ("__builtin_cpu_init", IX86_BUILTIN_CPU_INIT
,
29854 INT_FTYPE_VOID
, false);
29855 make_cpu_type_builtin ("__builtin_cpu_is", IX86_BUILTIN_CPU_IS
,
29856 INT_FTYPE_PCCHAR
, true);
29857 make_cpu_type_builtin ("__builtin_cpu_supports", IX86_BUILTIN_CPU_SUPPORTS
,
29858 INT_FTYPE_PCCHAR
, true);
29861 /* Internal method for ix86_init_builtins. */
29864 ix86_init_builtins_va_builtins_abi (void)
29866 tree ms_va_ref
, sysv_va_ref
;
29867 tree fnvoid_va_end_ms
, fnvoid_va_end_sysv
;
29868 tree fnvoid_va_start_ms
, fnvoid_va_start_sysv
;
29869 tree fnvoid_va_copy_ms
, fnvoid_va_copy_sysv
;
29870 tree fnattr_ms
= NULL_TREE
, fnattr_sysv
= NULL_TREE
;
29874 fnattr_ms
= build_tree_list (get_identifier ("ms_abi"), NULL_TREE
);
29875 fnattr_sysv
= build_tree_list (get_identifier ("sysv_abi"), NULL_TREE
);
29876 ms_va_ref
= build_reference_type (ms_va_list_type_node
);
29878 build_pointer_type (TREE_TYPE (sysv_va_list_type_node
));
29881 build_function_type_list (void_type_node
, ms_va_ref
, NULL_TREE
);
29882 fnvoid_va_start_ms
=
29883 build_varargs_function_type_list (void_type_node
, ms_va_ref
, NULL_TREE
);
29884 fnvoid_va_end_sysv
=
29885 build_function_type_list (void_type_node
, sysv_va_ref
, NULL_TREE
);
29886 fnvoid_va_start_sysv
=
29887 build_varargs_function_type_list (void_type_node
, sysv_va_ref
,
29889 fnvoid_va_copy_ms
=
29890 build_function_type_list (void_type_node
, ms_va_ref
, ms_va_list_type_node
,
29892 fnvoid_va_copy_sysv
=
29893 build_function_type_list (void_type_node
, sysv_va_ref
,
29894 sysv_va_ref
, NULL_TREE
);
29896 add_builtin_function ("__builtin_ms_va_start", fnvoid_va_start_ms
,
29897 BUILT_IN_VA_START
, BUILT_IN_NORMAL
, NULL
, fnattr_ms
);
29898 add_builtin_function ("__builtin_ms_va_end", fnvoid_va_end_ms
,
29899 BUILT_IN_VA_END
, BUILT_IN_NORMAL
, NULL
, fnattr_ms
);
29900 add_builtin_function ("__builtin_ms_va_copy", fnvoid_va_copy_ms
,
29901 BUILT_IN_VA_COPY
, BUILT_IN_NORMAL
, NULL
, fnattr_ms
);
29902 add_builtin_function ("__builtin_sysv_va_start", fnvoid_va_start_sysv
,
29903 BUILT_IN_VA_START
, BUILT_IN_NORMAL
, NULL
, fnattr_sysv
);
29904 add_builtin_function ("__builtin_sysv_va_end", fnvoid_va_end_sysv
,
29905 BUILT_IN_VA_END
, BUILT_IN_NORMAL
, NULL
, fnattr_sysv
);
29906 add_builtin_function ("__builtin_sysv_va_copy", fnvoid_va_copy_sysv
,
29907 BUILT_IN_VA_COPY
, BUILT_IN_NORMAL
, NULL
, fnattr_sysv
);
29911 ix86_init_builtin_types (void)
29913 tree float128_type_node
, float80_type_node
;
29915 /* The __float80 type. */
29916 float80_type_node
= long_double_type_node
;
29917 if (TYPE_MODE (float80_type_node
) != XFmode
)
29919 /* The __float80 type. */
29920 float80_type_node
= make_node (REAL_TYPE
);
29922 TYPE_PRECISION (float80_type_node
) = 80;
29923 layout_type (float80_type_node
);
29925 lang_hooks
.types
.register_builtin_type (float80_type_node
, "__float80");
29927 /* The __float128 type. */
29928 float128_type_node
= make_node (REAL_TYPE
);
29929 TYPE_PRECISION (float128_type_node
) = 128;
29930 layout_type (float128_type_node
);
29931 lang_hooks
.types
.register_builtin_type (float128_type_node
, "__float128");
29933 /* This macro is built by i386-builtin-types.awk. */
29934 DEFINE_BUILTIN_PRIMITIVE_TYPES
;
29938 ix86_init_builtins (void)
29942 ix86_init_builtin_types ();
29944 /* Builtins to get CPU type and features. */
29945 ix86_init_platform_type_builtins ();
29947 /* TFmode support builtins. */
29948 def_builtin_const (0, "__builtin_infq",
29949 FLOAT128_FTYPE_VOID
, IX86_BUILTIN_INFQ
);
29950 def_builtin_const (0, "__builtin_huge_valq",
29951 FLOAT128_FTYPE_VOID
, IX86_BUILTIN_HUGE_VALQ
);
29953 /* We will expand them to normal call if SSE isn't available since
29954 they are used by libgcc. */
29955 t
= ix86_get_builtin_func_type (FLOAT128_FTYPE_FLOAT128
);
29956 t
= add_builtin_function ("__builtin_fabsq", t
, IX86_BUILTIN_FABSQ
,
29957 BUILT_IN_MD
, "__fabstf2", NULL_TREE
);
29958 TREE_READONLY (t
) = 1;
29959 ix86_builtins
[(int) IX86_BUILTIN_FABSQ
] = t
;
29961 t
= ix86_get_builtin_func_type (FLOAT128_FTYPE_FLOAT128_FLOAT128
);
29962 t
= add_builtin_function ("__builtin_copysignq", t
, IX86_BUILTIN_COPYSIGNQ
,
29963 BUILT_IN_MD
, "__copysigntf3", NULL_TREE
);
29964 TREE_READONLY (t
) = 1;
29965 ix86_builtins
[(int) IX86_BUILTIN_COPYSIGNQ
] = t
;
29967 ix86_init_tm_builtins ();
29968 ix86_init_mmx_sse_builtins ();
29971 ix86_init_builtins_va_builtins_abi ();
29973 #ifdef SUBTARGET_INIT_BUILTINS
29974 SUBTARGET_INIT_BUILTINS
;
29978 /* Return the ix86 builtin for CODE. */
29981 ix86_builtin_decl (unsigned code
, bool initialize_p ATTRIBUTE_UNUSED
)
29983 if (code
>= IX86_BUILTIN_MAX
)
29984 return error_mark_node
;
29986 return ix86_builtins
[code
];
29989 /* Errors in the source file can cause expand_expr to return const0_rtx
29990 where we expect a vector. To avoid crashing, use one of the vector
29991 clear instructions. */
29993 safe_vector_operand (rtx x
, enum machine_mode mode
)
29995 if (x
== const0_rtx
)
29996 x
= CONST0_RTX (mode
);
30000 /* Subroutine of ix86_expand_builtin to take care of binop insns. */
30003 ix86_expand_binop_builtin (enum insn_code icode
, tree exp
, rtx target
)
30006 tree arg0
= CALL_EXPR_ARG (exp
, 0);
30007 tree arg1
= CALL_EXPR_ARG (exp
, 1);
30008 rtx op0
= expand_normal (arg0
);
30009 rtx op1
= expand_normal (arg1
);
30010 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
30011 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
30012 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
30014 if (VECTOR_MODE_P (mode0
))
30015 op0
= safe_vector_operand (op0
, mode0
);
30016 if (VECTOR_MODE_P (mode1
))
30017 op1
= safe_vector_operand (op1
, mode1
);
30019 if (optimize
|| !target
30020 || GET_MODE (target
) != tmode
30021 || !insn_data
[icode
].operand
[0].predicate (target
, tmode
))
30022 target
= gen_reg_rtx (tmode
);
30024 if (GET_MODE (op1
) == SImode
&& mode1
== TImode
)
30026 rtx x
= gen_reg_rtx (V4SImode
);
30027 emit_insn (gen_sse2_loadd (x
, op1
));
30028 op1
= gen_lowpart (TImode
, x
);
30031 if (!insn_data
[icode
].operand
[1].predicate (op0
, mode0
))
30032 op0
= copy_to_mode_reg (mode0
, op0
);
30033 if (!insn_data
[icode
].operand
[2].predicate (op1
, mode1
))
30034 op1
= copy_to_mode_reg (mode1
, op1
);
30036 pat
= GEN_FCN (icode
) (target
, op0
, op1
);
30045 /* Subroutine of ix86_expand_builtin to take care of 2-4 argument insns. */
30048 ix86_expand_multi_arg_builtin (enum insn_code icode
, tree exp
, rtx target
,
30049 enum ix86_builtin_func_type m_type
,
30050 enum rtx_code sub_code
)
30055 bool comparison_p
= false;
30057 bool last_arg_constant
= false;
30058 int num_memory
= 0;
30061 enum machine_mode mode
;
30064 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
30068 case MULTI_ARG_4_DF2_DI_I
:
30069 case MULTI_ARG_4_DF2_DI_I1
:
30070 case MULTI_ARG_4_SF2_SI_I
:
30071 case MULTI_ARG_4_SF2_SI_I1
:
30073 last_arg_constant
= true;
30076 case MULTI_ARG_3_SF
:
30077 case MULTI_ARG_3_DF
:
30078 case MULTI_ARG_3_SF2
:
30079 case MULTI_ARG_3_DF2
:
30080 case MULTI_ARG_3_DI
:
30081 case MULTI_ARG_3_SI
:
30082 case MULTI_ARG_3_SI_DI
:
30083 case MULTI_ARG_3_HI
:
30084 case MULTI_ARG_3_HI_SI
:
30085 case MULTI_ARG_3_QI
:
30086 case MULTI_ARG_3_DI2
:
30087 case MULTI_ARG_3_SI2
:
30088 case MULTI_ARG_3_HI2
:
30089 case MULTI_ARG_3_QI2
:
30093 case MULTI_ARG_2_SF
:
30094 case MULTI_ARG_2_DF
:
30095 case MULTI_ARG_2_DI
:
30096 case MULTI_ARG_2_SI
:
30097 case MULTI_ARG_2_HI
:
30098 case MULTI_ARG_2_QI
:
30102 case MULTI_ARG_2_DI_IMM
:
30103 case MULTI_ARG_2_SI_IMM
:
30104 case MULTI_ARG_2_HI_IMM
:
30105 case MULTI_ARG_2_QI_IMM
:
30107 last_arg_constant
= true;
30110 case MULTI_ARG_1_SF
:
30111 case MULTI_ARG_1_DF
:
30112 case MULTI_ARG_1_SF2
:
30113 case MULTI_ARG_1_DF2
:
30114 case MULTI_ARG_1_DI
:
30115 case MULTI_ARG_1_SI
:
30116 case MULTI_ARG_1_HI
:
30117 case MULTI_ARG_1_QI
:
30118 case MULTI_ARG_1_SI_DI
:
30119 case MULTI_ARG_1_HI_DI
:
30120 case MULTI_ARG_1_HI_SI
:
30121 case MULTI_ARG_1_QI_DI
:
30122 case MULTI_ARG_1_QI_SI
:
30123 case MULTI_ARG_1_QI_HI
:
30127 case MULTI_ARG_2_DI_CMP
:
30128 case MULTI_ARG_2_SI_CMP
:
30129 case MULTI_ARG_2_HI_CMP
:
30130 case MULTI_ARG_2_QI_CMP
:
30132 comparison_p
= true;
30135 case MULTI_ARG_2_SF_TF
:
30136 case MULTI_ARG_2_DF_TF
:
30137 case MULTI_ARG_2_DI_TF
:
30138 case MULTI_ARG_2_SI_TF
:
30139 case MULTI_ARG_2_HI_TF
:
30140 case MULTI_ARG_2_QI_TF
:
30146 gcc_unreachable ();
30149 if (optimize
|| !target
30150 || GET_MODE (target
) != tmode
30151 || !insn_data
[icode
].operand
[0].predicate (target
, tmode
))
30152 target
= gen_reg_rtx (tmode
);
30154 gcc_assert (nargs
<= 4);
30156 for (i
= 0; i
< nargs
; i
++)
30158 tree arg
= CALL_EXPR_ARG (exp
, i
);
30159 rtx op
= expand_normal (arg
);
30160 int adjust
= (comparison_p
) ? 1 : 0;
30161 enum machine_mode mode
= insn_data
[icode
].operand
[i
+adjust
+1].mode
;
30163 if (last_arg_constant
&& i
== nargs
- 1)
30165 if (!insn_data
[icode
].operand
[i
+ 1].predicate (op
, mode
))
30167 enum insn_code new_icode
= icode
;
30170 case CODE_FOR_xop_vpermil2v2df3
:
30171 case CODE_FOR_xop_vpermil2v4sf3
:
30172 case CODE_FOR_xop_vpermil2v4df3
:
30173 case CODE_FOR_xop_vpermil2v8sf3
:
30174 error ("the last argument must be a 2-bit immediate");
30175 return gen_reg_rtx (tmode
);
30176 case CODE_FOR_xop_rotlv2di3
:
30177 new_icode
= CODE_FOR_rotlv2di3
;
30179 case CODE_FOR_xop_rotlv4si3
:
30180 new_icode
= CODE_FOR_rotlv4si3
;
30182 case CODE_FOR_xop_rotlv8hi3
:
30183 new_icode
= CODE_FOR_rotlv8hi3
;
30185 case CODE_FOR_xop_rotlv16qi3
:
30186 new_icode
= CODE_FOR_rotlv16qi3
;
30188 if (CONST_INT_P (op
))
30190 int mask
= GET_MODE_BITSIZE (GET_MODE_INNER (tmode
)) - 1;
30191 op
= GEN_INT (INTVAL (op
) & mask
);
30192 gcc_checking_assert
30193 (insn_data
[icode
].operand
[i
+ 1].predicate (op
, mode
));
30197 gcc_checking_assert
30199 && insn_data
[new_icode
].operand
[0].mode
== tmode
30200 && insn_data
[new_icode
].operand
[1].mode
== tmode
30201 && insn_data
[new_icode
].operand
[2].mode
== mode
30202 && insn_data
[new_icode
].operand
[0].predicate
30203 == insn_data
[icode
].operand
[0].predicate
30204 && insn_data
[new_icode
].operand
[1].predicate
30205 == insn_data
[icode
].operand
[1].predicate
);
30211 gcc_unreachable ();
30218 if (VECTOR_MODE_P (mode
))
30219 op
= safe_vector_operand (op
, mode
);
30221 /* If we aren't optimizing, only allow one memory operand to be
30223 if (memory_operand (op
, mode
))
30226 gcc_assert (GET_MODE (op
) == mode
|| GET_MODE (op
) == VOIDmode
);
30229 || !insn_data
[icode
].operand
[i
+adjust
+1].predicate (op
, mode
)
30231 op
= force_reg (mode
, op
);
30235 args
[i
].mode
= mode
;
30241 pat
= GEN_FCN (icode
) (target
, args
[0].op
);
30246 pat
= GEN_FCN (icode
) (target
, args
[0].op
, args
[1].op
,
30247 GEN_INT ((int)sub_code
));
30248 else if (! comparison_p
)
30249 pat
= GEN_FCN (icode
) (target
, args
[0].op
, args
[1].op
);
30252 rtx cmp_op
= gen_rtx_fmt_ee (sub_code
, GET_MODE (target
),
30256 pat
= GEN_FCN (icode
) (target
, cmp_op
, args
[0].op
, args
[1].op
);
30261 pat
= GEN_FCN (icode
) (target
, args
[0].op
, args
[1].op
, args
[2].op
);
30265 pat
= GEN_FCN (icode
) (target
, args
[0].op
, args
[1].op
, args
[2].op
, args
[3].op
);
30269 gcc_unreachable ();
30279 /* Subroutine of ix86_expand_args_builtin to take care of scalar unop
30280 insns with vec_merge. */
30283 ix86_expand_unop_vec_merge_builtin (enum insn_code icode
, tree exp
,
30287 tree arg0
= CALL_EXPR_ARG (exp
, 0);
30288 rtx op1
, op0
= expand_normal (arg0
);
30289 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
30290 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
30292 if (optimize
|| !target
30293 || GET_MODE (target
) != tmode
30294 || !insn_data
[icode
].operand
[0].predicate (target
, tmode
))
30295 target
= gen_reg_rtx (tmode
);
30297 if (VECTOR_MODE_P (mode0
))
30298 op0
= safe_vector_operand (op0
, mode0
);
30300 if ((optimize
&& !register_operand (op0
, mode0
))
30301 || !insn_data
[icode
].operand
[1].predicate (op0
, mode0
))
30302 op0
= copy_to_mode_reg (mode0
, op0
);
30305 if (!insn_data
[icode
].operand
[2].predicate (op1
, mode0
))
30306 op1
= copy_to_mode_reg (mode0
, op1
);
30308 pat
= GEN_FCN (icode
) (target
, op0
, op1
);
30315 /* Subroutine of ix86_expand_builtin to take care of comparison insns. */
30318 ix86_expand_sse_compare (const struct builtin_description
*d
,
30319 tree exp
, rtx target
, bool swap
)
30322 tree arg0
= CALL_EXPR_ARG (exp
, 0);
30323 tree arg1
= CALL_EXPR_ARG (exp
, 1);
30324 rtx op0
= expand_normal (arg0
);
30325 rtx op1
= expand_normal (arg1
);
30327 enum machine_mode tmode
= insn_data
[d
->icode
].operand
[0].mode
;
30328 enum machine_mode mode0
= insn_data
[d
->icode
].operand
[1].mode
;
30329 enum machine_mode mode1
= insn_data
[d
->icode
].operand
[2].mode
;
30330 enum rtx_code comparison
= d
->comparison
;
30332 if (VECTOR_MODE_P (mode0
))
30333 op0
= safe_vector_operand (op0
, mode0
);
30334 if (VECTOR_MODE_P (mode1
))
30335 op1
= safe_vector_operand (op1
, mode1
);
30337 /* Swap operands if we have a comparison that isn't available in
30341 rtx tmp
= gen_reg_rtx (mode1
);
30342 emit_move_insn (tmp
, op1
);
30347 if (optimize
|| !target
30348 || GET_MODE (target
) != tmode
30349 || !insn_data
[d
->icode
].operand
[0].predicate (target
, tmode
))
30350 target
= gen_reg_rtx (tmode
);
30352 if ((optimize
&& !register_operand (op0
, mode0
))
30353 || !insn_data
[d
->icode
].operand
[1].predicate (op0
, mode0
))
30354 op0
= copy_to_mode_reg (mode0
, op0
);
30355 if ((optimize
&& !register_operand (op1
, mode1
))
30356 || !insn_data
[d
->icode
].operand
[2].predicate (op1
, mode1
))
30357 op1
= copy_to_mode_reg (mode1
, op1
);
30359 op2
= gen_rtx_fmt_ee (comparison
, mode0
, op0
, op1
);
30360 pat
= GEN_FCN (d
->icode
) (target
, op0
, op1
, op2
);
30367 /* Subroutine of ix86_expand_builtin to take care of comi insns. */
30370 ix86_expand_sse_comi (const struct builtin_description
*d
, tree exp
,
30374 tree arg0
= CALL_EXPR_ARG (exp
, 0);
30375 tree arg1
= CALL_EXPR_ARG (exp
, 1);
30376 rtx op0
= expand_normal (arg0
);
30377 rtx op1
= expand_normal (arg1
);
30378 enum machine_mode mode0
= insn_data
[d
->icode
].operand
[0].mode
;
30379 enum machine_mode mode1
= insn_data
[d
->icode
].operand
[1].mode
;
30380 enum rtx_code comparison
= d
->comparison
;
30382 if (VECTOR_MODE_P (mode0
))
30383 op0
= safe_vector_operand (op0
, mode0
);
30384 if (VECTOR_MODE_P (mode1
))
30385 op1
= safe_vector_operand (op1
, mode1
);
30387 /* Swap operands if we have a comparison that isn't available in
30389 if (d
->flag
& BUILTIN_DESC_SWAP_OPERANDS
)
30396 target
= gen_reg_rtx (SImode
);
30397 emit_move_insn (target
, const0_rtx
);
30398 target
= gen_rtx_SUBREG (QImode
, target
, 0);
30400 if ((optimize
&& !register_operand (op0
, mode0
))
30401 || !insn_data
[d
->icode
].operand
[0].predicate (op0
, mode0
))
30402 op0
= copy_to_mode_reg (mode0
, op0
);
30403 if ((optimize
&& !register_operand (op1
, mode1
))
30404 || !insn_data
[d
->icode
].operand
[1].predicate (op1
, mode1
))
30405 op1
= copy_to_mode_reg (mode1
, op1
);
30407 pat
= GEN_FCN (d
->icode
) (op0
, op1
);
30411 emit_insn (gen_rtx_SET (VOIDmode
,
30412 gen_rtx_STRICT_LOW_PART (VOIDmode
, target
),
30413 gen_rtx_fmt_ee (comparison
, QImode
,
30417 return SUBREG_REG (target
);
30420 /* Subroutines of ix86_expand_args_builtin to take care of round insns. */
30423 ix86_expand_sse_round (const struct builtin_description
*d
, tree exp
,
30427 tree arg0
= CALL_EXPR_ARG (exp
, 0);
30428 rtx op1
, op0
= expand_normal (arg0
);
30429 enum machine_mode tmode
= insn_data
[d
->icode
].operand
[0].mode
;
30430 enum machine_mode mode0
= insn_data
[d
->icode
].operand
[1].mode
;
30432 if (optimize
|| target
== 0
30433 || GET_MODE (target
) != tmode
30434 || !insn_data
[d
->icode
].operand
[0].predicate (target
, tmode
))
30435 target
= gen_reg_rtx (tmode
);
30437 if (VECTOR_MODE_P (mode0
))
30438 op0
= safe_vector_operand (op0
, mode0
);
30440 if ((optimize
&& !register_operand (op0
, mode0
))
30441 || !insn_data
[d
->icode
].operand
[0].predicate (op0
, mode0
))
30442 op0
= copy_to_mode_reg (mode0
, op0
);
30444 op1
= GEN_INT (d
->comparison
);
30446 pat
= GEN_FCN (d
->icode
) (target
, op0
, op1
);
30454 ix86_expand_sse_round_vec_pack_sfix (const struct builtin_description
*d
,
30455 tree exp
, rtx target
)
30458 tree arg0
= CALL_EXPR_ARG (exp
, 0);
30459 tree arg1
= CALL_EXPR_ARG (exp
, 1);
30460 rtx op0
= expand_normal (arg0
);
30461 rtx op1
= expand_normal (arg1
);
30463 enum machine_mode tmode
= insn_data
[d
->icode
].operand
[0].mode
;
30464 enum machine_mode mode0
= insn_data
[d
->icode
].operand
[1].mode
;
30465 enum machine_mode mode1
= insn_data
[d
->icode
].operand
[2].mode
;
30467 if (optimize
|| target
== 0
30468 || GET_MODE (target
) != tmode
30469 || !insn_data
[d
->icode
].operand
[0].predicate (target
, tmode
))
30470 target
= gen_reg_rtx (tmode
);
30472 op0
= safe_vector_operand (op0
, mode0
);
30473 op1
= safe_vector_operand (op1
, mode1
);
30475 if ((optimize
&& !register_operand (op0
, mode0
))
30476 || !insn_data
[d
->icode
].operand
[0].predicate (op0
, mode0
))
30477 op0
= copy_to_mode_reg (mode0
, op0
);
30478 if ((optimize
&& !register_operand (op1
, mode1
))
30479 || !insn_data
[d
->icode
].operand
[1].predicate (op1
, mode1
))
30480 op1
= copy_to_mode_reg (mode1
, op1
);
30482 op2
= GEN_INT (d
->comparison
);
30484 pat
= GEN_FCN (d
->icode
) (target
, op0
, op1
, op2
);
30491 /* Subroutine of ix86_expand_builtin to take care of ptest insns. */
30494 ix86_expand_sse_ptest (const struct builtin_description
*d
, tree exp
,
30498 tree arg0
= CALL_EXPR_ARG (exp
, 0);
30499 tree arg1
= CALL_EXPR_ARG (exp
, 1);
30500 rtx op0
= expand_normal (arg0
);
30501 rtx op1
= expand_normal (arg1
);
30502 enum machine_mode mode0
= insn_data
[d
->icode
].operand
[0].mode
;
30503 enum machine_mode mode1
= insn_data
[d
->icode
].operand
[1].mode
;
30504 enum rtx_code comparison
= d
->comparison
;
30506 if (VECTOR_MODE_P (mode0
))
30507 op0
= safe_vector_operand (op0
, mode0
);
30508 if (VECTOR_MODE_P (mode1
))
30509 op1
= safe_vector_operand (op1
, mode1
);
30511 target
= gen_reg_rtx (SImode
);
30512 emit_move_insn (target
, const0_rtx
);
30513 target
= gen_rtx_SUBREG (QImode
, target
, 0);
30515 if ((optimize
&& !register_operand (op0
, mode0
))
30516 || !insn_data
[d
->icode
].operand
[0].predicate (op0
, mode0
))
30517 op0
= copy_to_mode_reg (mode0
, op0
);
30518 if ((optimize
&& !register_operand (op1
, mode1
))
30519 || !insn_data
[d
->icode
].operand
[1].predicate (op1
, mode1
))
30520 op1
= copy_to_mode_reg (mode1
, op1
);
30522 pat
= GEN_FCN (d
->icode
) (op0
, op1
);
30526 emit_insn (gen_rtx_SET (VOIDmode
,
30527 gen_rtx_STRICT_LOW_PART (VOIDmode
, target
),
30528 gen_rtx_fmt_ee (comparison
, QImode
,
30532 return SUBREG_REG (target
);
30535 /* Subroutine of ix86_expand_builtin to take care of pcmpestr[im] insns. */
30538 ix86_expand_sse_pcmpestr (const struct builtin_description
*d
,
30539 tree exp
, rtx target
)
30542 tree arg0
= CALL_EXPR_ARG (exp
, 0);
30543 tree arg1
= CALL_EXPR_ARG (exp
, 1);
30544 tree arg2
= CALL_EXPR_ARG (exp
, 2);
30545 tree arg3
= CALL_EXPR_ARG (exp
, 3);
30546 tree arg4
= CALL_EXPR_ARG (exp
, 4);
30547 rtx scratch0
, scratch1
;
30548 rtx op0
= expand_normal (arg0
);
30549 rtx op1
= expand_normal (arg1
);
30550 rtx op2
= expand_normal (arg2
);
30551 rtx op3
= expand_normal (arg3
);
30552 rtx op4
= expand_normal (arg4
);
30553 enum machine_mode tmode0
, tmode1
, modev2
, modei3
, modev4
, modei5
, modeimm
;
30555 tmode0
= insn_data
[d
->icode
].operand
[0].mode
;
30556 tmode1
= insn_data
[d
->icode
].operand
[1].mode
;
30557 modev2
= insn_data
[d
->icode
].operand
[2].mode
;
30558 modei3
= insn_data
[d
->icode
].operand
[3].mode
;
30559 modev4
= insn_data
[d
->icode
].operand
[4].mode
;
30560 modei5
= insn_data
[d
->icode
].operand
[5].mode
;
30561 modeimm
= insn_data
[d
->icode
].operand
[6].mode
;
30563 if (VECTOR_MODE_P (modev2
))
30564 op0
= safe_vector_operand (op0
, modev2
);
30565 if (VECTOR_MODE_P (modev4
))
30566 op2
= safe_vector_operand (op2
, modev4
);
30568 if (!insn_data
[d
->icode
].operand
[2].predicate (op0
, modev2
))
30569 op0
= copy_to_mode_reg (modev2
, op0
);
30570 if (!insn_data
[d
->icode
].operand
[3].predicate (op1
, modei3
))
30571 op1
= copy_to_mode_reg (modei3
, op1
);
30572 if ((optimize
&& !register_operand (op2
, modev4
))
30573 || !insn_data
[d
->icode
].operand
[4].predicate (op2
, modev4
))
30574 op2
= copy_to_mode_reg (modev4
, op2
);
30575 if (!insn_data
[d
->icode
].operand
[5].predicate (op3
, modei5
))
30576 op3
= copy_to_mode_reg (modei5
, op3
);
30578 if (!insn_data
[d
->icode
].operand
[6].predicate (op4
, modeimm
))
30580 error ("the fifth argument must be an 8-bit immediate");
30584 if (d
->code
== IX86_BUILTIN_PCMPESTRI128
)
30586 if (optimize
|| !target
30587 || GET_MODE (target
) != tmode0
30588 || !insn_data
[d
->icode
].operand
[0].predicate (target
, tmode0
))
30589 target
= gen_reg_rtx (tmode0
);
30591 scratch1
= gen_reg_rtx (tmode1
);
30593 pat
= GEN_FCN (d
->icode
) (target
, scratch1
, op0
, op1
, op2
, op3
, op4
);
30595 else if (d
->code
== IX86_BUILTIN_PCMPESTRM128
)
30597 if (optimize
|| !target
30598 || GET_MODE (target
) != tmode1
30599 || !insn_data
[d
->icode
].operand
[1].predicate (target
, tmode1
))
30600 target
= gen_reg_rtx (tmode1
);
30602 scratch0
= gen_reg_rtx (tmode0
);
30604 pat
= GEN_FCN (d
->icode
) (scratch0
, target
, op0
, op1
, op2
, op3
, op4
);
30608 gcc_assert (d
->flag
);
30610 scratch0
= gen_reg_rtx (tmode0
);
30611 scratch1
= gen_reg_rtx (tmode1
);
30613 pat
= GEN_FCN (d
->icode
) (scratch0
, scratch1
, op0
, op1
, op2
, op3
, op4
);
30623 target
= gen_reg_rtx (SImode
);
30624 emit_move_insn (target
, const0_rtx
);
30625 target
= gen_rtx_SUBREG (QImode
, target
, 0);
30628 (gen_rtx_SET (VOIDmode
, gen_rtx_STRICT_LOW_PART (VOIDmode
, target
),
30629 gen_rtx_fmt_ee (EQ
, QImode
,
30630 gen_rtx_REG ((enum machine_mode
) d
->flag
,
30633 return SUBREG_REG (target
);
30640 /* Subroutine of ix86_expand_builtin to take care of pcmpistr[im] insns. */
30643 ix86_expand_sse_pcmpistr (const struct builtin_description
*d
,
30644 tree exp
, rtx target
)
30647 tree arg0
= CALL_EXPR_ARG (exp
, 0);
30648 tree arg1
= CALL_EXPR_ARG (exp
, 1);
30649 tree arg2
= CALL_EXPR_ARG (exp
, 2);
30650 rtx scratch0
, scratch1
;
30651 rtx op0
= expand_normal (arg0
);
30652 rtx op1
= expand_normal (arg1
);
30653 rtx op2
= expand_normal (arg2
);
30654 enum machine_mode tmode0
, tmode1
, modev2
, modev3
, modeimm
;
30656 tmode0
= insn_data
[d
->icode
].operand
[0].mode
;
30657 tmode1
= insn_data
[d
->icode
].operand
[1].mode
;
30658 modev2
= insn_data
[d
->icode
].operand
[2].mode
;
30659 modev3
= insn_data
[d
->icode
].operand
[3].mode
;
30660 modeimm
= insn_data
[d
->icode
].operand
[4].mode
;
30662 if (VECTOR_MODE_P (modev2
))
30663 op0
= safe_vector_operand (op0
, modev2
);
30664 if (VECTOR_MODE_P (modev3
))
30665 op1
= safe_vector_operand (op1
, modev3
);
30667 if (!insn_data
[d
->icode
].operand
[2].predicate (op0
, modev2
))
30668 op0
= copy_to_mode_reg (modev2
, op0
);
30669 if ((optimize
&& !register_operand (op1
, modev3
))
30670 || !insn_data
[d
->icode
].operand
[3].predicate (op1
, modev3
))
30671 op1
= copy_to_mode_reg (modev3
, op1
);
30673 if (!insn_data
[d
->icode
].operand
[4].predicate (op2
, modeimm
))
30675 error ("the third argument must be an 8-bit immediate");
30679 if (d
->code
== IX86_BUILTIN_PCMPISTRI128
)
30681 if (optimize
|| !target
30682 || GET_MODE (target
) != tmode0
30683 || !insn_data
[d
->icode
].operand
[0].predicate (target
, tmode0
))
30684 target
= gen_reg_rtx (tmode0
);
30686 scratch1
= gen_reg_rtx (tmode1
);
30688 pat
= GEN_FCN (d
->icode
) (target
, scratch1
, op0
, op1
, op2
);
30690 else if (d
->code
== IX86_BUILTIN_PCMPISTRM128
)
30692 if (optimize
|| !target
30693 || GET_MODE (target
) != tmode1
30694 || !insn_data
[d
->icode
].operand
[1].predicate (target
, tmode1
))
30695 target
= gen_reg_rtx (tmode1
);
30697 scratch0
= gen_reg_rtx (tmode0
);
30699 pat
= GEN_FCN (d
->icode
) (scratch0
, target
, op0
, op1
, op2
);
30703 gcc_assert (d
->flag
);
30705 scratch0
= gen_reg_rtx (tmode0
);
30706 scratch1
= gen_reg_rtx (tmode1
);
30708 pat
= GEN_FCN (d
->icode
) (scratch0
, scratch1
, op0
, op1
, op2
);
30718 target
= gen_reg_rtx (SImode
);
30719 emit_move_insn (target
, const0_rtx
);
30720 target
= gen_rtx_SUBREG (QImode
, target
, 0);
30723 (gen_rtx_SET (VOIDmode
, gen_rtx_STRICT_LOW_PART (VOIDmode
, target
),
30724 gen_rtx_fmt_ee (EQ
, QImode
,
30725 gen_rtx_REG ((enum machine_mode
) d
->flag
,
30728 return SUBREG_REG (target
);
30734 /* Subroutine of ix86_expand_builtin to take care of insns with
30735 variable number of operands. */
30738 ix86_expand_args_builtin (const struct builtin_description
*d
,
30739 tree exp
, rtx target
)
30741 rtx pat
, real_target
;
30742 unsigned int i
, nargs
;
30743 unsigned int nargs_constant
= 0;
30744 int num_memory
= 0;
30748 enum machine_mode mode
;
30750 bool last_arg_count
= false;
30751 enum insn_code icode
= d
->icode
;
30752 const struct insn_data_d
*insn_p
= &insn_data
[icode
];
30753 enum machine_mode tmode
= insn_p
->operand
[0].mode
;
30754 enum machine_mode rmode
= VOIDmode
;
30756 enum rtx_code comparison
= d
->comparison
;
30758 switch ((enum ix86_builtin_func_type
) d
->flag
)
30760 case V2DF_FTYPE_V2DF_ROUND
:
30761 case V4DF_FTYPE_V4DF_ROUND
:
30762 case V4SF_FTYPE_V4SF_ROUND
:
30763 case V8SF_FTYPE_V8SF_ROUND
:
30764 case V4SI_FTYPE_V4SF_ROUND
:
30765 case V8SI_FTYPE_V8SF_ROUND
:
30766 return ix86_expand_sse_round (d
, exp
, target
);
30767 case V4SI_FTYPE_V2DF_V2DF_ROUND
:
30768 case V8SI_FTYPE_V4DF_V4DF_ROUND
:
30769 return ix86_expand_sse_round_vec_pack_sfix (d
, exp
, target
);
30770 case INT_FTYPE_V8SF_V8SF_PTEST
:
30771 case INT_FTYPE_V4DI_V4DI_PTEST
:
30772 case INT_FTYPE_V4DF_V4DF_PTEST
:
30773 case INT_FTYPE_V4SF_V4SF_PTEST
:
30774 case INT_FTYPE_V2DI_V2DI_PTEST
:
30775 case INT_FTYPE_V2DF_V2DF_PTEST
:
30776 return ix86_expand_sse_ptest (d
, exp
, target
);
30777 case FLOAT128_FTYPE_FLOAT128
:
30778 case FLOAT_FTYPE_FLOAT
:
30779 case INT_FTYPE_INT
:
30780 case UINT64_FTYPE_INT
:
30781 case UINT16_FTYPE_UINT16
:
30782 case INT64_FTYPE_INT64
:
30783 case INT64_FTYPE_V4SF
:
30784 case INT64_FTYPE_V2DF
:
30785 case INT_FTYPE_V16QI
:
30786 case INT_FTYPE_V8QI
:
30787 case INT_FTYPE_V8SF
:
30788 case INT_FTYPE_V4DF
:
30789 case INT_FTYPE_V4SF
:
30790 case INT_FTYPE_V2DF
:
30791 case INT_FTYPE_V32QI
:
30792 case V16QI_FTYPE_V16QI
:
30793 case V8SI_FTYPE_V8SF
:
30794 case V8SI_FTYPE_V4SI
:
30795 case V8HI_FTYPE_V8HI
:
30796 case V8HI_FTYPE_V16QI
:
30797 case V8QI_FTYPE_V8QI
:
30798 case V8SF_FTYPE_V8SF
:
30799 case V8SF_FTYPE_V8SI
:
30800 case V8SF_FTYPE_V4SF
:
30801 case V8SF_FTYPE_V8HI
:
30802 case V4SI_FTYPE_V4SI
:
30803 case V4SI_FTYPE_V16QI
:
30804 case V4SI_FTYPE_V4SF
:
30805 case V4SI_FTYPE_V8SI
:
30806 case V4SI_FTYPE_V8HI
:
30807 case V4SI_FTYPE_V4DF
:
30808 case V4SI_FTYPE_V2DF
:
30809 case V4HI_FTYPE_V4HI
:
30810 case V4DF_FTYPE_V4DF
:
30811 case V4DF_FTYPE_V4SI
:
30812 case V4DF_FTYPE_V4SF
:
30813 case V4DF_FTYPE_V2DF
:
30814 case V4SF_FTYPE_V4SF
:
30815 case V4SF_FTYPE_V4SI
:
30816 case V4SF_FTYPE_V8SF
:
30817 case V4SF_FTYPE_V4DF
:
30818 case V4SF_FTYPE_V8HI
:
30819 case V4SF_FTYPE_V2DF
:
30820 case V2DI_FTYPE_V2DI
:
30821 case V2DI_FTYPE_V16QI
:
30822 case V2DI_FTYPE_V8HI
:
30823 case V2DI_FTYPE_V4SI
:
30824 case V2DF_FTYPE_V2DF
:
30825 case V2DF_FTYPE_V4SI
:
30826 case V2DF_FTYPE_V4DF
:
30827 case V2DF_FTYPE_V4SF
:
30828 case V2DF_FTYPE_V2SI
:
30829 case V2SI_FTYPE_V2SI
:
30830 case V2SI_FTYPE_V4SF
:
30831 case V2SI_FTYPE_V2SF
:
30832 case V2SI_FTYPE_V2DF
:
30833 case V2SF_FTYPE_V2SF
:
30834 case V2SF_FTYPE_V2SI
:
30835 case V32QI_FTYPE_V32QI
:
30836 case V32QI_FTYPE_V16QI
:
30837 case V16HI_FTYPE_V16HI
:
30838 case V16HI_FTYPE_V8HI
:
30839 case V8SI_FTYPE_V8SI
:
30840 case V16HI_FTYPE_V16QI
:
30841 case V8SI_FTYPE_V16QI
:
30842 case V4DI_FTYPE_V16QI
:
30843 case V8SI_FTYPE_V8HI
:
30844 case V4DI_FTYPE_V8HI
:
30845 case V4DI_FTYPE_V4SI
:
30846 case V4DI_FTYPE_V2DI
:
30849 case V4SF_FTYPE_V4SF_VEC_MERGE
:
30850 case V2DF_FTYPE_V2DF_VEC_MERGE
:
30851 return ix86_expand_unop_vec_merge_builtin (icode
, exp
, target
);
30852 case FLOAT128_FTYPE_FLOAT128_FLOAT128
:
30853 case V16QI_FTYPE_V16QI_V16QI
:
30854 case V16QI_FTYPE_V8HI_V8HI
:
30855 case V8QI_FTYPE_V8QI_V8QI
:
30856 case V8QI_FTYPE_V4HI_V4HI
:
30857 case V8HI_FTYPE_V8HI_V8HI
:
30858 case V8HI_FTYPE_V16QI_V16QI
:
30859 case V8HI_FTYPE_V4SI_V4SI
:
30860 case V8SF_FTYPE_V8SF_V8SF
:
30861 case V8SF_FTYPE_V8SF_V8SI
:
30862 case V4SI_FTYPE_V4SI_V4SI
:
30863 case V4SI_FTYPE_V8HI_V8HI
:
30864 case V4SI_FTYPE_V4SF_V4SF
:
30865 case V4SI_FTYPE_V2DF_V2DF
:
30866 case V4HI_FTYPE_V4HI_V4HI
:
30867 case V4HI_FTYPE_V8QI_V8QI
:
30868 case V4HI_FTYPE_V2SI_V2SI
:
30869 case V4DF_FTYPE_V4DF_V4DF
:
30870 case V4DF_FTYPE_V4DF_V4DI
:
30871 case V4SF_FTYPE_V4SF_V4SF
:
30872 case V4SF_FTYPE_V4SF_V4SI
:
30873 case V4SF_FTYPE_V4SF_V2SI
:
30874 case V4SF_FTYPE_V4SF_V2DF
:
30875 case V4SF_FTYPE_V4SF_DI
:
30876 case V4SF_FTYPE_V4SF_SI
:
30877 case V2DI_FTYPE_V2DI_V2DI
:
30878 case V2DI_FTYPE_V16QI_V16QI
:
30879 case V2DI_FTYPE_V4SI_V4SI
:
30880 case V2UDI_FTYPE_V4USI_V4USI
:
30881 case V2DI_FTYPE_V2DI_V16QI
:
30882 case V2DI_FTYPE_V2DF_V2DF
:
30883 case V2SI_FTYPE_V2SI_V2SI
:
30884 case V2SI_FTYPE_V4HI_V4HI
:
30885 case V2SI_FTYPE_V2SF_V2SF
:
30886 case V2DF_FTYPE_V2DF_V2DF
:
30887 case V2DF_FTYPE_V2DF_V4SF
:
30888 case V2DF_FTYPE_V2DF_V2DI
:
30889 case V2DF_FTYPE_V2DF_DI
:
30890 case V2DF_FTYPE_V2DF_SI
:
30891 case V2SF_FTYPE_V2SF_V2SF
:
30892 case V1DI_FTYPE_V1DI_V1DI
:
30893 case V1DI_FTYPE_V8QI_V8QI
:
30894 case V1DI_FTYPE_V2SI_V2SI
:
30895 case V32QI_FTYPE_V16HI_V16HI
:
30896 case V16HI_FTYPE_V8SI_V8SI
:
30897 case V32QI_FTYPE_V32QI_V32QI
:
30898 case V16HI_FTYPE_V32QI_V32QI
:
30899 case V16HI_FTYPE_V16HI_V16HI
:
30900 case V8SI_FTYPE_V4DF_V4DF
:
30901 case V8SI_FTYPE_V8SI_V8SI
:
30902 case V8SI_FTYPE_V16HI_V16HI
:
30903 case V4DI_FTYPE_V4DI_V4DI
:
30904 case V4DI_FTYPE_V8SI_V8SI
:
30905 case V4UDI_FTYPE_V8USI_V8USI
:
30906 if (comparison
== UNKNOWN
)
30907 return ix86_expand_binop_builtin (icode
, exp
, target
);
30910 case V4SF_FTYPE_V4SF_V4SF_SWAP
:
30911 case V2DF_FTYPE_V2DF_V2DF_SWAP
:
30912 gcc_assert (comparison
!= UNKNOWN
);
30916 case V16HI_FTYPE_V16HI_V8HI_COUNT
:
30917 case V16HI_FTYPE_V16HI_SI_COUNT
:
30918 case V8SI_FTYPE_V8SI_V4SI_COUNT
:
30919 case V8SI_FTYPE_V8SI_SI_COUNT
:
30920 case V4DI_FTYPE_V4DI_V2DI_COUNT
:
30921 case V4DI_FTYPE_V4DI_INT_COUNT
:
30922 case V8HI_FTYPE_V8HI_V8HI_COUNT
:
30923 case V8HI_FTYPE_V8HI_SI_COUNT
:
30924 case V4SI_FTYPE_V4SI_V4SI_COUNT
:
30925 case V4SI_FTYPE_V4SI_SI_COUNT
:
30926 case V4HI_FTYPE_V4HI_V4HI_COUNT
:
30927 case V4HI_FTYPE_V4HI_SI_COUNT
:
30928 case V2DI_FTYPE_V2DI_V2DI_COUNT
:
30929 case V2DI_FTYPE_V2DI_SI_COUNT
:
30930 case V2SI_FTYPE_V2SI_V2SI_COUNT
:
30931 case V2SI_FTYPE_V2SI_SI_COUNT
:
30932 case V1DI_FTYPE_V1DI_V1DI_COUNT
:
30933 case V1DI_FTYPE_V1DI_SI_COUNT
:
30935 last_arg_count
= true;
30937 case UINT64_FTYPE_UINT64_UINT64
:
30938 case UINT_FTYPE_UINT_UINT
:
30939 case UINT_FTYPE_UINT_USHORT
:
30940 case UINT_FTYPE_UINT_UCHAR
:
30941 case UINT16_FTYPE_UINT16_INT
:
30942 case UINT8_FTYPE_UINT8_INT
:
30945 case V2DI_FTYPE_V2DI_INT_CONVERT
:
30948 nargs_constant
= 1;
30950 case V4DI_FTYPE_V4DI_INT_CONVERT
:
30953 nargs_constant
= 1;
30955 case V8HI_FTYPE_V8HI_INT
:
30956 case V8HI_FTYPE_V8SF_INT
:
30957 case V8HI_FTYPE_V4SF_INT
:
30958 case V8SF_FTYPE_V8SF_INT
:
30959 case V4SI_FTYPE_V4SI_INT
:
30960 case V4SI_FTYPE_V8SI_INT
:
30961 case V4HI_FTYPE_V4HI_INT
:
30962 case V4DF_FTYPE_V4DF_INT
:
30963 case V4SF_FTYPE_V4SF_INT
:
30964 case V4SF_FTYPE_V8SF_INT
:
30965 case V2DI_FTYPE_V2DI_INT
:
30966 case V2DF_FTYPE_V2DF_INT
:
30967 case V2DF_FTYPE_V4DF_INT
:
30968 case V16HI_FTYPE_V16HI_INT
:
30969 case V8SI_FTYPE_V8SI_INT
:
30970 case V4DI_FTYPE_V4DI_INT
:
30971 case V2DI_FTYPE_V4DI_INT
:
30973 nargs_constant
= 1;
30975 case V16QI_FTYPE_V16QI_V16QI_V16QI
:
30976 case V8SF_FTYPE_V8SF_V8SF_V8SF
:
30977 case V4DF_FTYPE_V4DF_V4DF_V4DF
:
30978 case V4SF_FTYPE_V4SF_V4SF_V4SF
:
30979 case V2DF_FTYPE_V2DF_V2DF_V2DF
:
30980 case V32QI_FTYPE_V32QI_V32QI_V32QI
:
30983 case V32QI_FTYPE_V32QI_V32QI_INT
:
30984 case V16HI_FTYPE_V16HI_V16HI_INT
:
30985 case V16QI_FTYPE_V16QI_V16QI_INT
:
30986 case V4DI_FTYPE_V4DI_V4DI_INT
:
30987 case V8HI_FTYPE_V8HI_V8HI_INT
:
30988 case V8SI_FTYPE_V8SI_V8SI_INT
:
30989 case V8SI_FTYPE_V8SI_V4SI_INT
:
30990 case V8SF_FTYPE_V8SF_V8SF_INT
:
30991 case V8SF_FTYPE_V8SF_V4SF_INT
:
30992 case V4SI_FTYPE_V4SI_V4SI_INT
:
30993 case V4DF_FTYPE_V4DF_V4DF_INT
:
30994 case V4DF_FTYPE_V4DF_V2DF_INT
:
30995 case V4SF_FTYPE_V4SF_V4SF_INT
:
30996 case V2DI_FTYPE_V2DI_V2DI_INT
:
30997 case V4DI_FTYPE_V4DI_V2DI_INT
:
30998 case V2DF_FTYPE_V2DF_V2DF_INT
:
31000 nargs_constant
= 1;
31002 case V4DI_FTYPE_V4DI_V4DI_INT_CONVERT
:
31005 nargs_constant
= 1;
31007 case V2DI_FTYPE_V2DI_V2DI_INT_CONVERT
:
31010 nargs_constant
= 1;
31012 case V1DI_FTYPE_V1DI_V1DI_INT_CONVERT
:
31015 nargs_constant
= 1;
31017 case V2DI_FTYPE_V2DI_UINT_UINT
:
31019 nargs_constant
= 2;
31021 case V2DF_FTYPE_V2DF_V2DF_V2DI_INT
:
31022 case V4DF_FTYPE_V4DF_V4DF_V4DI_INT
:
31023 case V4SF_FTYPE_V4SF_V4SF_V4SI_INT
:
31024 case V8SF_FTYPE_V8SF_V8SF_V8SI_INT
:
31026 nargs_constant
= 1;
31028 case V2DI_FTYPE_V2DI_V2DI_UINT_UINT
:
31030 nargs_constant
= 2;
31032 case UCHAR_FTYPE_UCHAR_UINT_UINT_PUNSIGNED
:
31033 case UCHAR_FTYPE_UCHAR_ULONGLONG_ULONGLONG_PULONGLONG
:
31037 gcc_unreachable ();
31040 gcc_assert (nargs
<= ARRAY_SIZE (args
));
31042 if (comparison
!= UNKNOWN
)
31044 gcc_assert (nargs
== 2);
31045 return ix86_expand_sse_compare (d
, exp
, target
, swap
);
31048 if (rmode
== VOIDmode
|| rmode
== tmode
)
31052 || GET_MODE (target
) != tmode
31053 || !insn_p
->operand
[0].predicate (target
, tmode
))
31054 target
= gen_reg_rtx (tmode
);
31055 real_target
= target
;
31059 target
= gen_reg_rtx (rmode
);
31060 real_target
= simplify_gen_subreg (tmode
, target
, rmode
, 0);
31063 for (i
= 0; i
< nargs
; i
++)
31065 tree arg
= CALL_EXPR_ARG (exp
, i
);
31066 rtx op
= expand_normal (arg
);
31067 enum machine_mode mode
= insn_p
->operand
[i
+ 1].mode
;
31068 bool match
= insn_p
->operand
[i
+ 1].predicate (op
, mode
);
31070 if (last_arg_count
&& (i
+ 1) == nargs
)
31072 /* SIMD shift insns take either an 8-bit immediate or
31073 register as count. But builtin functions take int as
31074 count. If count doesn't match, we put it in register. */
31077 op
= simplify_gen_subreg (SImode
, op
, GET_MODE (op
), 0);
31078 if (!insn_p
->operand
[i
+ 1].predicate (op
, mode
))
31079 op
= copy_to_reg (op
);
31082 else if ((nargs
- i
) <= nargs_constant
)
31087 case CODE_FOR_avx2_inserti128
:
31088 case CODE_FOR_avx2_extracti128
:
31089 error ("the last argument must be an 1-bit immediate");
31092 case CODE_FOR_sse4_1_roundsd
:
31093 case CODE_FOR_sse4_1_roundss
:
31095 case CODE_FOR_sse4_1_roundpd
:
31096 case CODE_FOR_sse4_1_roundps
:
31097 case CODE_FOR_avx_roundpd256
:
31098 case CODE_FOR_avx_roundps256
:
31100 case CODE_FOR_sse4_1_roundpd_vec_pack_sfix
:
31101 case CODE_FOR_sse4_1_roundps_sfix
:
31102 case CODE_FOR_avx_roundpd_vec_pack_sfix256
:
31103 case CODE_FOR_avx_roundps_sfix256
:
31105 case CODE_FOR_sse4_1_blendps
:
31106 case CODE_FOR_avx_blendpd256
:
31107 case CODE_FOR_avx_vpermilv4df
:
31108 error ("the last argument must be a 4-bit immediate");
31111 case CODE_FOR_sse4_1_blendpd
:
31112 case CODE_FOR_avx_vpermilv2df
:
31113 case CODE_FOR_xop_vpermil2v2df3
:
31114 case CODE_FOR_xop_vpermil2v4sf3
:
31115 case CODE_FOR_xop_vpermil2v4df3
:
31116 case CODE_FOR_xop_vpermil2v8sf3
:
31117 error ("the last argument must be a 2-bit immediate");
31120 case CODE_FOR_avx_vextractf128v4df
:
31121 case CODE_FOR_avx_vextractf128v8sf
:
31122 case CODE_FOR_avx_vextractf128v8si
:
31123 case CODE_FOR_avx_vinsertf128v4df
:
31124 case CODE_FOR_avx_vinsertf128v8sf
:
31125 case CODE_FOR_avx_vinsertf128v8si
:
31126 error ("the last argument must be a 1-bit immediate");
31129 case CODE_FOR_avx_vmcmpv2df3
:
31130 case CODE_FOR_avx_vmcmpv4sf3
:
31131 case CODE_FOR_avx_cmpv2df3
:
31132 case CODE_FOR_avx_cmpv4sf3
:
31133 case CODE_FOR_avx_cmpv4df3
:
31134 case CODE_FOR_avx_cmpv8sf3
:
31135 error ("the last argument must be a 5-bit immediate");
31139 switch (nargs_constant
)
31142 if ((nargs
- i
) == nargs_constant
)
31144 error ("the next to last argument must be an 8-bit immediate");
31148 error ("the last argument must be an 8-bit immediate");
31151 gcc_unreachable ();
31158 if (VECTOR_MODE_P (mode
))
31159 op
= safe_vector_operand (op
, mode
);
31161 /* If we aren't optimizing, only allow one memory operand to
31163 if (memory_operand (op
, mode
))
31166 if (GET_MODE (op
) == mode
|| GET_MODE (op
) == VOIDmode
)
31168 if (optimize
|| !match
|| num_memory
> 1)
31169 op
= copy_to_mode_reg (mode
, op
);
31173 op
= copy_to_reg (op
);
31174 op
= simplify_gen_subreg (mode
, op
, GET_MODE (op
), 0);
31179 args
[i
].mode
= mode
;
31185 pat
= GEN_FCN (icode
) (real_target
, args
[0].op
);
31188 pat
= GEN_FCN (icode
) (real_target
, args
[0].op
, args
[1].op
);
31191 pat
= GEN_FCN (icode
) (real_target
, args
[0].op
, args
[1].op
,
31195 pat
= GEN_FCN (icode
) (real_target
, args
[0].op
, args
[1].op
,
31196 args
[2].op
, args
[3].op
);
31199 gcc_unreachable ();
31209 /* Subroutine of ix86_expand_builtin to take care of special insns
31210 with variable number of operands. */
31213 ix86_expand_special_args_builtin (const struct builtin_description
*d
,
31214 tree exp
, rtx target
)
31218 unsigned int i
, nargs
, arg_adjust
, memory
;
31222 enum machine_mode mode
;
31224 enum insn_code icode
= d
->icode
;
31225 bool last_arg_constant
= false;
31226 const struct insn_data_d
*insn_p
= &insn_data
[icode
];
31227 enum machine_mode tmode
= insn_p
->operand
[0].mode
;
31228 enum { load
, store
} klass
;
31230 switch ((enum ix86_builtin_func_type
) d
->flag
)
31232 case VOID_FTYPE_VOID
:
31233 emit_insn (GEN_FCN (icode
) (target
));
31235 case VOID_FTYPE_UINT64
:
31236 case VOID_FTYPE_UNSIGNED
:
31242 case INT_FTYPE_VOID
:
31243 case UINT64_FTYPE_VOID
:
31244 case UNSIGNED_FTYPE_VOID
:
31249 case UINT64_FTYPE_PUNSIGNED
:
31250 case V2DI_FTYPE_PV2DI
:
31251 case V4DI_FTYPE_PV4DI
:
31252 case V32QI_FTYPE_PCCHAR
:
31253 case V16QI_FTYPE_PCCHAR
:
31254 case V8SF_FTYPE_PCV4SF
:
31255 case V8SF_FTYPE_PCFLOAT
:
31256 case V4SF_FTYPE_PCFLOAT
:
31257 case V4DF_FTYPE_PCV2DF
:
31258 case V4DF_FTYPE_PCDOUBLE
:
31259 case V2DF_FTYPE_PCDOUBLE
:
31260 case VOID_FTYPE_PVOID
:
31265 case VOID_FTYPE_PV2SF_V4SF
:
31266 case VOID_FTYPE_PV4DI_V4DI
:
31267 case VOID_FTYPE_PV2DI_V2DI
:
31268 case VOID_FTYPE_PCHAR_V32QI
:
31269 case VOID_FTYPE_PCHAR_V16QI
:
31270 case VOID_FTYPE_PFLOAT_V8SF
:
31271 case VOID_FTYPE_PFLOAT_V4SF
:
31272 case VOID_FTYPE_PDOUBLE_V4DF
:
31273 case VOID_FTYPE_PDOUBLE_V2DF
:
31274 case VOID_FTYPE_PLONGLONG_LONGLONG
:
31275 case VOID_FTYPE_PULONGLONG_ULONGLONG
:
31276 case VOID_FTYPE_PINT_INT
:
31279 /* Reserve memory operand for target. */
31280 memory
= ARRAY_SIZE (args
);
31282 case V4SF_FTYPE_V4SF_PCV2SF
:
31283 case V2DF_FTYPE_V2DF_PCDOUBLE
:
31288 case V8SF_FTYPE_PCV8SF_V8SI
:
31289 case V4DF_FTYPE_PCV4DF_V4DI
:
31290 case V4SF_FTYPE_PCV4SF_V4SI
:
31291 case V2DF_FTYPE_PCV2DF_V2DI
:
31292 case V8SI_FTYPE_PCV8SI_V8SI
:
31293 case V4DI_FTYPE_PCV4DI_V4DI
:
31294 case V4SI_FTYPE_PCV4SI_V4SI
:
31295 case V2DI_FTYPE_PCV2DI_V2DI
:
31300 case VOID_FTYPE_PV8SF_V8SI_V8SF
:
31301 case VOID_FTYPE_PV4DF_V4DI_V4DF
:
31302 case VOID_FTYPE_PV4SF_V4SI_V4SF
:
31303 case VOID_FTYPE_PV2DF_V2DI_V2DF
:
31304 case VOID_FTYPE_PV8SI_V8SI_V8SI
:
31305 case VOID_FTYPE_PV4DI_V4DI_V4DI
:
31306 case VOID_FTYPE_PV4SI_V4SI_V4SI
:
31307 case VOID_FTYPE_PV2DI_V2DI_V2DI
:
31310 /* Reserve memory operand for target. */
31311 memory
= ARRAY_SIZE (args
);
31313 case VOID_FTYPE_UINT_UINT_UINT
:
31314 case VOID_FTYPE_UINT64_UINT_UINT
:
31315 case UCHAR_FTYPE_UINT_UINT_UINT
:
31316 case UCHAR_FTYPE_UINT64_UINT_UINT
:
31319 memory
= ARRAY_SIZE (args
);
31320 last_arg_constant
= true;
31323 gcc_unreachable ();
31326 gcc_assert (nargs
<= ARRAY_SIZE (args
));
31328 if (klass
== store
)
31330 arg
= CALL_EXPR_ARG (exp
, 0);
31331 op
= expand_normal (arg
);
31332 gcc_assert (target
== 0);
31335 op
= force_reg (Pmode
, convert_to_mode (Pmode
, op
, 1));
31336 target
= gen_rtx_MEM (tmode
, op
);
31339 target
= force_reg (tmode
, op
);
31347 || !register_operand (target
, tmode
)
31348 || GET_MODE (target
) != tmode
)
31349 target
= gen_reg_rtx (tmode
);
31352 for (i
= 0; i
< nargs
; i
++)
31354 enum machine_mode mode
= insn_p
->operand
[i
+ 1].mode
;
31357 arg
= CALL_EXPR_ARG (exp
, i
+ arg_adjust
);
31358 op
= expand_normal (arg
);
31359 match
= insn_p
->operand
[i
+ 1].predicate (op
, mode
);
31361 if (last_arg_constant
&& (i
+ 1) == nargs
)
31365 if (icode
== CODE_FOR_lwp_lwpvalsi3
31366 || icode
== CODE_FOR_lwp_lwpinssi3
31367 || icode
== CODE_FOR_lwp_lwpvaldi3
31368 || icode
== CODE_FOR_lwp_lwpinsdi3
)
31369 error ("the last argument must be a 32-bit immediate");
31371 error ("the last argument must be an 8-bit immediate");
31379 /* This must be the memory operand. */
31380 op
= force_reg (Pmode
, convert_to_mode (Pmode
, op
, 1));
31381 op
= gen_rtx_MEM (mode
, op
);
31382 gcc_assert (GET_MODE (op
) == mode
31383 || GET_MODE (op
) == VOIDmode
);
31387 /* This must be register. */
31388 if (VECTOR_MODE_P (mode
))
31389 op
= safe_vector_operand (op
, mode
);
31391 gcc_assert (GET_MODE (op
) == mode
31392 || GET_MODE (op
) == VOIDmode
);
31393 op
= copy_to_mode_reg (mode
, op
);
31398 args
[i
].mode
= mode
;
31404 pat
= GEN_FCN (icode
) (target
);
31407 pat
= GEN_FCN (icode
) (target
, args
[0].op
);
31410 pat
= GEN_FCN (icode
) (target
, args
[0].op
, args
[1].op
);
31413 pat
= GEN_FCN (icode
) (target
, args
[0].op
, args
[1].op
, args
[2].op
);
31416 gcc_unreachable ();
31422 return klass
== store
? 0 : target
;
31425 /* Return the integer constant in ARG. Constrain it to be in the range
31426 of the subparts of VEC_TYPE; issue an error if not. */
31429 get_element_number (tree vec_type
, tree arg
)
31431 unsigned HOST_WIDE_INT elt
, max
= TYPE_VECTOR_SUBPARTS (vec_type
) - 1;
31433 if (!host_integerp (arg
, 1)
31434 || (elt
= tree_low_cst (arg
, 1), elt
> max
))
31436 error ("selector must be an integer constant in the range 0..%wi", max
);
31443 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
31444 ix86_expand_vector_init. We DO have language-level syntax for this, in
31445 the form of (type){ init-list }. Except that since we can't place emms
31446 instructions from inside the compiler, we can't allow the use of MMX
31447 registers unless the user explicitly asks for it. So we do *not* define
31448 vec_set/vec_extract/vec_init patterns for MMX modes in mmx.md. Instead
31449 we have builtins invoked by mmintrin.h that gives us license to emit
31450 these sorts of instructions. */
31453 ix86_expand_vec_init_builtin (tree type
, tree exp
, rtx target
)
31455 enum machine_mode tmode
= TYPE_MODE (type
);
31456 enum machine_mode inner_mode
= GET_MODE_INNER (tmode
);
31457 int i
, n_elt
= GET_MODE_NUNITS (tmode
);
31458 rtvec v
= rtvec_alloc (n_elt
);
31460 gcc_assert (VECTOR_MODE_P (tmode
));
31461 gcc_assert (call_expr_nargs (exp
) == n_elt
);
31463 for (i
= 0; i
< n_elt
; ++i
)
31465 rtx x
= expand_normal (CALL_EXPR_ARG (exp
, i
));
31466 RTVEC_ELT (v
, i
) = gen_lowpart (inner_mode
, x
);
31469 if (!target
|| !register_operand (target
, tmode
))
31470 target
= gen_reg_rtx (tmode
);
31472 ix86_expand_vector_init (true, target
, gen_rtx_PARALLEL (tmode
, v
));
31476 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
31477 ix86_expand_vector_extract. They would be redundant (for non-MMX) if we
31478 had a language-level syntax for referencing vector elements. */
31481 ix86_expand_vec_ext_builtin (tree exp
, rtx target
)
31483 enum machine_mode tmode
, mode0
;
31488 arg0
= CALL_EXPR_ARG (exp
, 0);
31489 arg1
= CALL_EXPR_ARG (exp
, 1);
31491 op0
= expand_normal (arg0
);
31492 elt
= get_element_number (TREE_TYPE (arg0
), arg1
);
31494 tmode
= TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0
)));
31495 mode0
= TYPE_MODE (TREE_TYPE (arg0
));
31496 gcc_assert (VECTOR_MODE_P (mode0
));
31498 op0
= force_reg (mode0
, op0
);
31500 if (optimize
|| !target
|| !register_operand (target
, tmode
))
31501 target
= gen_reg_rtx (tmode
);
31503 ix86_expand_vector_extract (true, target
, op0
, elt
);
31508 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
31509 ix86_expand_vector_set. They would be redundant (for non-MMX) if we had
31510 a language-level syntax for referencing vector elements. */
31513 ix86_expand_vec_set_builtin (tree exp
)
31515 enum machine_mode tmode
, mode1
;
31516 tree arg0
, arg1
, arg2
;
31518 rtx op0
, op1
, target
;
31520 arg0
= CALL_EXPR_ARG (exp
, 0);
31521 arg1
= CALL_EXPR_ARG (exp
, 1);
31522 arg2
= CALL_EXPR_ARG (exp
, 2);
31524 tmode
= TYPE_MODE (TREE_TYPE (arg0
));
31525 mode1
= TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0
)));
31526 gcc_assert (VECTOR_MODE_P (tmode
));
31528 op0
= expand_expr (arg0
, NULL_RTX
, tmode
, EXPAND_NORMAL
);
31529 op1
= expand_expr (arg1
, NULL_RTX
, mode1
, EXPAND_NORMAL
);
31530 elt
= get_element_number (TREE_TYPE (arg0
), arg2
);
31532 if (GET_MODE (op1
) != mode1
&& GET_MODE (op1
) != VOIDmode
)
31533 op1
= convert_modes (mode1
, GET_MODE (op1
), op1
, true);
31535 op0
= force_reg (tmode
, op0
);
31536 op1
= force_reg (mode1
, op1
);
31538 /* OP0 is the source of these builtin functions and shouldn't be
31539 modified. Create a copy, use it and return it as target. */
31540 target
= gen_reg_rtx (tmode
);
31541 emit_move_insn (target
, op0
);
31542 ix86_expand_vector_set (true, target
, op1
, elt
);
31547 /* Expand an expression EXP that calls a built-in function,
31548 with result going to TARGET if that's convenient
31549 (and in mode MODE if that's convenient).
31550 SUBTARGET may be used as the target for computing one of EXP's operands.
31551 IGNORE is nonzero if the value is to be ignored. */
31554 ix86_expand_builtin (tree exp
, rtx target
, rtx subtarget ATTRIBUTE_UNUSED
,
31555 enum machine_mode mode ATTRIBUTE_UNUSED
,
31556 int ignore ATTRIBUTE_UNUSED
)
31558 const struct builtin_description
*d
;
31560 enum insn_code icode
;
31561 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
31562 tree arg0
, arg1
, arg2
, arg3
, arg4
;
31563 rtx op0
, op1
, op2
, op3
, op4
, pat
, insn
;
31564 enum machine_mode mode0
, mode1
, mode2
, mode3
, mode4
;
31565 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
31567 /* For CPU builtins that can be folded, fold first and expand the fold. */
31570 case IX86_BUILTIN_CPU_INIT
:
31572 /* Make it call __cpu_indicator_init in libgcc. */
31573 tree call_expr
, fndecl
, type
;
31574 type
= build_function_type_list (integer_type_node
, NULL_TREE
);
31575 fndecl
= build_fn_decl ("__cpu_indicator_init", type
);
31576 call_expr
= build_call_expr (fndecl
, 0);
31577 return expand_expr (call_expr
, target
, mode
, EXPAND_NORMAL
);
31579 case IX86_BUILTIN_CPU_IS
:
31580 case IX86_BUILTIN_CPU_SUPPORTS
:
31582 tree arg0
= CALL_EXPR_ARG (exp
, 0);
31583 tree fold_expr
= fold_builtin_cpu (fndecl
, &arg0
);
31584 gcc_assert (fold_expr
!= NULL_TREE
);
31585 return expand_expr (fold_expr
, target
, mode
, EXPAND_NORMAL
);
31589 /* Determine whether the builtin function is available under the current ISA.
31590 Originally the builtin was not created if it wasn't applicable to the
31591 current ISA based on the command line switches. With function specific
31592 options, we need to check in the context of the function making the call
31593 whether it is supported. */
31594 if (ix86_builtins_isa
[fcode
].isa
31595 && !(ix86_builtins_isa
[fcode
].isa
& ix86_isa_flags
))
31597 char *opts
= ix86_target_string (ix86_builtins_isa
[fcode
].isa
, 0, NULL
,
31598 NULL
, (enum fpmath_unit
) 0, false);
31601 error ("%qE needs unknown isa option", fndecl
);
31604 gcc_assert (opts
!= NULL
);
31605 error ("%qE needs isa option %s", fndecl
, opts
);
31613 case IX86_BUILTIN_MASKMOVQ
:
31614 case IX86_BUILTIN_MASKMOVDQU
:
31615 icode
= (fcode
== IX86_BUILTIN_MASKMOVQ
31616 ? CODE_FOR_mmx_maskmovq
31617 : CODE_FOR_sse2_maskmovdqu
);
31618 /* Note the arg order is different from the operand order. */
31619 arg1
= CALL_EXPR_ARG (exp
, 0);
31620 arg2
= CALL_EXPR_ARG (exp
, 1);
31621 arg0
= CALL_EXPR_ARG (exp
, 2);
31622 op0
= expand_normal (arg0
);
31623 op1
= expand_normal (arg1
);
31624 op2
= expand_normal (arg2
);
31625 mode0
= insn_data
[icode
].operand
[0].mode
;
31626 mode1
= insn_data
[icode
].operand
[1].mode
;
31627 mode2
= insn_data
[icode
].operand
[2].mode
;
31629 op0
= force_reg (Pmode
, convert_to_mode (Pmode
, op0
, 1));
31630 op0
= gen_rtx_MEM (mode1
, op0
);
31632 if (!insn_data
[icode
].operand
[0].predicate (op0
, mode0
))
31633 op0
= copy_to_mode_reg (mode0
, op0
);
31634 if (!insn_data
[icode
].operand
[1].predicate (op1
, mode1
))
31635 op1
= copy_to_mode_reg (mode1
, op1
);
31636 if (!insn_data
[icode
].operand
[2].predicate (op2
, mode2
))
31637 op2
= copy_to_mode_reg (mode2
, op2
);
31638 pat
= GEN_FCN (icode
) (op0
, op1
, op2
);
31644 case IX86_BUILTIN_LDMXCSR
:
31645 op0
= expand_normal (CALL_EXPR_ARG (exp
, 0));
31646 target
= assign_386_stack_local (SImode
, SLOT_TEMP
);
31647 emit_move_insn (target
, op0
);
31648 emit_insn (gen_sse_ldmxcsr (target
));
31651 case IX86_BUILTIN_STMXCSR
:
31652 target
= assign_386_stack_local (SImode
, SLOT_TEMP
);
31653 emit_insn (gen_sse_stmxcsr (target
));
31654 return copy_to_mode_reg (SImode
, target
);
31656 case IX86_BUILTIN_CLFLUSH
:
31657 arg0
= CALL_EXPR_ARG (exp
, 0);
31658 op0
= expand_normal (arg0
);
31659 icode
= CODE_FOR_sse2_clflush
;
31660 if (!insn_data
[icode
].operand
[0].predicate (op0
, Pmode
))
31661 op0
= force_reg (Pmode
, convert_to_mode (Pmode
, op0
, 1));
31663 emit_insn (gen_sse2_clflush (op0
));
31666 case IX86_BUILTIN_MONITOR
:
31667 arg0
= CALL_EXPR_ARG (exp
, 0);
31668 arg1
= CALL_EXPR_ARG (exp
, 1);
31669 arg2
= CALL_EXPR_ARG (exp
, 2);
31670 op0
= expand_normal (arg0
);
31671 op1
= expand_normal (arg1
);
31672 op2
= expand_normal (arg2
);
31674 op0
= force_reg (Pmode
, convert_to_mode (Pmode
, op0
, 1));
31676 op1
= copy_to_mode_reg (SImode
, op1
);
31678 op2
= copy_to_mode_reg (SImode
, op2
);
31679 emit_insn (ix86_gen_monitor (op0
, op1
, op2
));
31682 case IX86_BUILTIN_MWAIT
:
31683 arg0
= CALL_EXPR_ARG (exp
, 0);
31684 arg1
= CALL_EXPR_ARG (exp
, 1);
31685 op0
= expand_normal (arg0
);
31686 op1
= expand_normal (arg1
);
31688 op0
= copy_to_mode_reg (SImode
, op0
);
31690 op1
= copy_to_mode_reg (SImode
, op1
);
31691 emit_insn (gen_sse3_mwait (op0
, op1
));
31694 case IX86_BUILTIN_VEC_INIT_V2SI
:
31695 case IX86_BUILTIN_VEC_INIT_V4HI
:
31696 case IX86_BUILTIN_VEC_INIT_V8QI
:
31697 return ix86_expand_vec_init_builtin (TREE_TYPE (exp
), exp
, target
);
31699 case IX86_BUILTIN_VEC_EXT_V2DF
:
31700 case IX86_BUILTIN_VEC_EXT_V2DI
:
31701 case IX86_BUILTIN_VEC_EXT_V4SF
:
31702 case IX86_BUILTIN_VEC_EXT_V4SI
:
31703 case IX86_BUILTIN_VEC_EXT_V8HI
:
31704 case IX86_BUILTIN_VEC_EXT_V2SI
:
31705 case IX86_BUILTIN_VEC_EXT_V4HI
:
31706 case IX86_BUILTIN_VEC_EXT_V16QI
:
31707 return ix86_expand_vec_ext_builtin (exp
, target
);
31709 case IX86_BUILTIN_VEC_SET_V2DI
:
31710 case IX86_BUILTIN_VEC_SET_V4SF
:
31711 case IX86_BUILTIN_VEC_SET_V4SI
:
31712 case IX86_BUILTIN_VEC_SET_V8HI
:
31713 case IX86_BUILTIN_VEC_SET_V4HI
:
31714 case IX86_BUILTIN_VEC_SET_V16QI
:
31715 return ix86_expand_vec_set_builtin (exp
);
31717 case IX86_BUILTIN_INFQ
:
31718 case IX86_BUILTIN_HUGE_VALQ
:
31720 REAL_VALUE_TYPE inf
;
31724 tmp
= CONST_DOUBLE_FROM_REAL_VALUE (inf
, mode
);
31726 tmp
= validize_mem (force_const_mem (mode
, tmp
));
31729 target
= gen_reg_rtx (mode
);
31731 emit_move_insn (target
, tmp
);
31735 case IX86_BUILTIN_RDPMC
:
31736 case IX86_BUILTIN_RDTSC
:
31737 case IX86_BUILTIN_RDTSCP
:
31739 op0
= gen_reg_rtx (DImode
);
31740 op1
= gen_reg_rtx (DImode
);
31742 if (fcode
== IX86_BUILTIN_RDPMC
)
31744 arg0
= CALL_EXPR_ARG (exp
, 0);
31745 op2
= expand_normal (arg0
);
31746 if (!register_operand (op2
, SImode
))
31747 op2
= copy_to_mode_reg (SImode
, op2
);
31749 insn
= (TARGET_64BIT
31750 ? gen_rdpmc_rex64 (op0
, op1
, op2
)
31751 : gen_rdpmc (op0
, op2
));
31754 else if (fcode
== IX86_BUILTIN_RDTSC
)
31756 insn
= (TARGET_64BIT
31757 ? gen_rdtsc_rex64 (op0
, op1
)
31758 : gen_rdtsc (op0
));
31763 op2
= gen_reg_rtx (SImode
);
31765 insn
= (TARGET_64BIT
31766 ? gen_rdtscp_rex64 (op0
, op1
, op2
)
31767 : gen_rdtscp (op0
, op2
));
31770 arg0
= CALL_EXPR_ARG (exp
, 0);
31771 op4
= expand_normal (arg0
);
31772 if (!address_operand (op4
, VOIDmode
))
31774 op4
= convert_memory_address (Pmode
, op4
);
31775 op4
= copy_addr_to_reg (op4
);
31777 emit_move_insn (gen_rtx_MEM (SImode
, op4
), op2
);
31781 target
= gen_reg_rtx (mode
);
31785 op1
= expand_simple_binop (DImode
, ASHIFT
, op1
, GEN_INT (32),
31786 op1
, 1, OPTAB_DIRECT
);
31787 op0
= expand_simple_binop (DImode
, IOR
, op0
, op1
,
31788 op0
, 1, OPTAB_DIRECT
);
31791 emit_move_insn (target
, op0
);
31794 case IX86_BUILTIN_FXSAVE
:
31795 case IX86_BUILTIN_FXRSTOR
:
31796 case IX86_BUILTIN_FXSAVE64
:
31797 case IX86_BUILTIN_FXRSTOR64
:
31800 case IX86_BUILTIN_FXSAVE
:
31801 icode
= CODE_FOR_fxsave
;
31803 case IX86_BUILTIN_FXRSTOR
:
31804 icode
= CODE_FOR_fxrstor
;
31806 case IX86_BUILTIN_FXSAVE64
:
31807 icode
= CODE_FOR_fxsave64
;
31809 case IX86_BUILTIN_FXRSTOR64
:
31810 icode
= CODE_FOR_fxrstor64
;
31813 gcc_unreachable ();
31816 arg0
= CALL_EXPR_ARG (exp
, 0);
31817 op0
= expand_normal (arg0
);
31819 if (!address_operand (op0
, VOIDmode
))
31821 op0
= convert_memory_address (Pmode
, op0
);
31822 op0
= copy_addr_to_reg (op0
);
31824 op0
= gen_rtx_MEM (BLKmode
, op0
);
31826 pat
= GEN_FCN (icode
) (op0
);
31831 case IX86_BUILTIN_XSAVE
:
31832 case IX86_BUILTIN_XRSTOR
:
31833 case IX86_BUILTIN_XSAVE64
:
31834 case IX86_BUILTIN_XRSTOR64
:
31835 case IX86_BUILTIN_XSAVEOPT
:
31836 case IX86_BUILTIN_XSAVEOPT64
:
31837 arg0
= CALL_EXPR_ARG (exp
, 0);
31838 arg1
= CALL_EXPR_ARG (exp
, 1);
31839 op0
= expand_normal (arg0
);
31840 op1
= expand_normal (arg1
);
31842 if (!address_operand (op0
, VOIDmode
))
31844 op0
= convert_memory_address (Pmode
, op0
);
31845 op0
= copy_addr_to_reg (op0
);
31847 op0
= gen_rtx_MEM (BLKmode
, op0
);
31849 op1
= force_reg (DImode
, op1
);
31853 op2
= expand_simple_binop (DImode
, LSHIFTRT
, op1
, GEN_INT (32),
31854 NULL
, 1, OPTAB_DIRECT
);
31857 case IX86_BUILTIN_XSAVE
:
31858 icode
= CODE_FOR_xsave_rex64
;
31860 case IX86_BUILTIN_XRSTOR
:
31861 icode
= CODE_FOR_xrstor_rex64
;
31863 case IX86_BUILTIN_XSAVE64
:
31864 icode
= CODE_FOR_xsave64
;
31866 case IX86_BUILTIN_XRSTOR64
:
31867 icode
= CODE_FOR_xrstor64
;
31869 case IX86_BUILTIN_XSAVEOPT
:
31870 icode
= CODE_FOR_xsaveopt_rex64
;
31872 case IX86_BUILTIN_XSAVEOPT64
:
31873 icode
= CODE_FOR_xsaveopt64
;
31876 gcc_unreachable ();
31879 op2
= gen_lowpart (SImode
, op2
);
31880 op1
= gen_lowpart (SImode
, op1
);
31881 pat
= GEN_FCN (icode
) (op0
, op1
, op2
);
31887 case IX86_BUILTIN_XSAVE
:
31888 icode
= CODE_FOR_xsave
;
31890 case IX86_BUILTIN_XRSTOR
:
31891 icode
= CODE_FOR_xrstor
;
31893 case IX86_BUILTIN_XSAVEOPT
:
31894 icode
= CODE_FOR_xsaveopt
;
31897 gcc_unreachable ();
31899 pat
= GEN_FCN (icode
) (op0
, op1
);
31906 case IX86_BUILTIN_LLWPCB
:
31907 arg0
= CALL_EXPR_ARG (exp
, 0);
31908 op0
= expand_normal (arg0
);
31909 icode
= CODE_FOR_lwp_llwpcb
;
31910 if (!insn_data
[icode
].operand
[0].predicate (op0
, Pmode
))
31911 op0
= force_reg (Pmode
, convert_to_mode (Pmode
, op0
, 1));
31912 emit_insn (gen_lwp_llwpcb (op0
));
31915 case IX86_BUILTIN_SLWPCB
:
31916 icode
= CODE_FOR_lwp_slwpcb
;
31918 || !insn_data
[icode
].operand
[0].predicate (target
, Pmode
))
31919 target
= gen_reg_rtx (Pmode
);
31920 emit_insn (gen_lwp_slwpcb (target
));
31923 case IX86_BUILTIN_BEXTRI32
:
31924 case IX86_BUILTIN_BEXTRI64
:
31925 arg0
= CALL_EXPR_ARG (exp
, 0);
31926 arg1
= CALL_EXPR_ARG (exp
, 1);
31927 op0
= expand_normal (arg0
);
31928 op1
= expand_normal (arg1
);
31929 icode
= (fcode
== IX86_BUILTIN_BEXTRI32
31930 ? CODE_FOR_tbm_bextri_si
31931 : CODE_FOR_tbm_bextri_di
);
31932 if (!CONST_INT_P (op1
))
31934 error ("last argument must be an immediate");
31939 unsigned char length
= (INTVAL (op1
) >> 8) & 0xFF;
31940 unsigned char lsb_index
= INTVAL (op1
) & 0xFF;
31941 op1
= GEN_INT (length
);
31942 op2
= GEN_INT (lsb_index
);
31943 pat
= GEN_FCN (icode
) (target
, op0
, op1
, op2
);
31949 case IX86_BUILTIN_RDRAND16_STEP
:
31950 icode
= CODE_FOR_rdrandhi_1
;
31954 case IX86_BUILTIN_RDRAND32_STEP
:
31955 icode
= CODE_FOR_rdrandsi_1
;
31959 case IX86_BUILTIN_RDRAND64_STEP
:
31960 icode
= CODE_FOR_rdranddi_1
;
31964 op0
= gen_reg_rtx (mode0
);
31965 emit_insn (GEN_FCN (icode
) (op0
));
31967 arg0
= CALL_EXPR_ARG (exp
, 0);
31968 op1
= expand_normal (arg0
);
31969 if (!address_operand (op1
, VOIDmode
))
31971 op1
= convert_memory_address (Pmode
, op1
);
31972 op1
= copy_addr_to_reg (op1
);
31974 emit_move_insn (gen_rtx_MEM (mode0
, op1
), op0
);
31976 op1
= gen_reg_rtx (SImode
);
31977 emit_move_insn (op1
, CONST1_RTX (SImode
));
31979 /* Emit SImode conditional move. */
31980 if (mode0
== HImode
)
31982 op2
= gen_reg_rtx (SImode
);
31983 emit_insn (gen_zero_extendhisi2 (op2
, op0
));
31985 else if (mode0
== SImode
)
31988 op2
= gen_rtx_SUBREG (SImode
, op0
, 0);
31991 target
= gen_reg_rtx (SImode
);
31993 pat
= gen_rtx_GEU (VOIDmode
, gen_rtx_REG (CCCmode
, FLAGS_REG
),
31995 emit_insn (gen_rtx_SET (VOIDmode
, target
,
31996 gen_rtx_IF_THEN_ELSE (SImode
, pat
, op2
, op1
)));
31999 case IX86_BUILTIN_RDSEED16_STEP
:
32000 icode
= CODE_FOR_rdseedhi_1
;
32004 case IX86_BUILTIN_RDSEED32_STEP
:
32005 icode
= CODE_FOR_rdseedsi_1
;
32009 case IX86_BUILTIN_RDSEED64_STEP
:
32010 icode
= CODE_FOR_rdseeddi_1
;
32014 op0
= gen_reg_rtx (mode0
);
32015 emit_insn (GEN_FCN (icode
) (op0
));
32017 arg0
= CALL_EXPR_ARG (exp
, 0);
32018 op1
= expand_normal (arg0
);
32019 if (!address_operand (op1
, VOIDmode
))
32021 op1
= convert_memory_address (Pmode
, op1
);
32022 op1
= copy_addr_to_reg (op1
);
32024 emit_move_insn (gen_rtx_MEM (mode0
, op1
), op0
);
32026 op2
= gen_reg_rtx (QImode
);
32028 pat
= gen_rtx_LTU (QImode
, gen_rtx_REG (CCCmode
, FLAGS_REG
),
32030 emit_insn (gen_rtx_SET (VOIDmode
, op2
, pat
));
32033 target
= gen_reg_rtx (SImode
);
32035 emit_insn (gen_zero_extendqisi2 (target
, op2
));
32038 case IX86_BUILTIN_ADDCARRYX32
:
32039 icode
= TARGET_ADX
? CODE_FOR_adcxsi3
: CODE_FOR_addsi3_carry
;
32043 case IX86_BUILTIN_ADDCARRYX64
:
32044 icode
= TARGET_ADX
? CODE_FOR_adcxdi3
: CODE_FOR_adddi3_carry
;
32048 arg0
= CALL_EXPR_ARG (exp
, 0); /* unsigned char c_in. */
32049 arg1
= CALL_EXPR_ARG (exp
, 1); /* unsigned int src1. */
32050 arg2
= CALL_EXPR_ARG (exp
, 2); /* unsigned int src2. */
32051 arg3
= CALL_EXPR_ARG (exp
, 3); /* unsigned int *sum_out. */
32053 op0
= gen_reg_rtx (QImode
);
32055 /* Generate CF from input operand. */
32056 op1
= expand_normal (arg0
);
32057 op1
= copy_to_mode_reg (QImode
, convert_to_mode (QImode
, op1
, 1));
32058 emit_insn (gen_addqi3_cc (op0
, op1
, constm1_rtx
));
32060 /* Gen ADCX instruction to compute X+Y+CF. */
32061 op2
= expand_normal (arg1
);
32062 op3
= expand_normal (arg2
);
32065 op2
= copy_to_mode_reg (mode0
, op2
);
32067 op3
= copy_to_mode_reg (mode0
, op3
);
32069 op0
= gen_reg_rtx (mode0
);
32071 op4
= gen_rtx_REG (CCCmode
, FLAGS_REG
);
32072 pat
= gen_rtx_LTU (VOIDmode
, op4
, const0_rtx
);
32073 emit_insn (GEN_FCN (icode
) (op0
, op2
, op3
, op4
, pat
));
32075 /* Store the result. */
32076 op4
= expand_normal (arg3
);
32077 if (!address_operand (op4
, VOIDmode
))
32079 op4
= convert_memory_address (Pmode
, op4
);
32080 op4
= copy_addr_to_reg (op4
);
32082 emit_move_insn (gen_rtx_MEM (mode0
, op4
), op0
);
32084 /* Return current CF value. */
32086 target
= gen_reg_rtx (QImode
);
32088 PUT_MODE (pat
, QImode
);
32089 emit_insn (gen_rtx_SET (VOIDmode
, target
, pat
));
32092 case IX86_BUILTIN_GATHERSIV2DF
:
32093 icode
= CODE_FOR_avx2_gathersiv2df
;
32095 case IX86_BUILTIN_GATHERSIV4DF
:
32096 icode
= CODE_FOR_avx2_gathersiv4df
;
32098 case IX86_BUILTIN_GATHERDIV2DF
:
32099 icode
= CODE_FOR_avx2_gatherdiv2df
;
32101 case IX86_BUILTIN_GATHERDIV4DF
:
32102 icode
= CODE_FOR_avx2_gatherdiv4df
;
32104 case IX86_BUILTIN_GATHERSIV4SF
:
32105 icode
= CODE_FOR_avx2_gathersiv4sf
;
32107 case IX86_BUILTIN_GATHERSIV8SF
:
32108 icode
= CODE_FOR_avx2_gathersiv8sf
;
32110 case IX86_BUILTIN_GATHERDIV4SF
:
32111 icode
= CODE_FOR_avx2_gatherdiv4sf
;
32113 case IX86_BUILTIN_GATHERDIV8SF
:
32114 icode
= CODE_FOR_avx2_gatherdiv8sf
;
32116 case IX86_BUILTIN_GATHERSIV2DI
:
32117 icode
= CODE_FOR_avx2_gathersiv2di
;
32119 case IX86_BUILTIN_GATHERSIV4DI
:
32120 icode
= CODE_FOR_avx2_gathersiv4di
;
32122 case IX86_BUILTIN_GATHERDIV2DI
:
32123 icode
= CODE_FOR_avx2_gatherdiv2di
;
32125 case IX86_BUILTIN_GATHERDIV4DI
:
32126 icode
= CODE_FOR_avx2_gatherdiv4di
;
32128 case IX86_BUILTIN_GATHERSIV4SI
:
32129 icode
= CODE_FOR_avx2_gathersiv4si
;
32131 case IX86_BUILTIN_GATHERSIV8SI
:
32132 icode
= CODE_FOR_avx2_gathersiv8si
;
32134 case IX86_BUILTIN_GATHERDIV4SI
:
32135 icode
= CODE_FOR_avx2_gatherdiv4si
;
32137 case IX86_BUILTIN_GATHERDIV8SI
:
32138 icode
= CODE_FOR_avx2_gatherdiv8si
;
32140 case IX86_BUILTIN_GATHERALTSIV4DF
:
32141 icode
= CODE_FOR_avx2_gathersiv4df
;
32143 case IX86_BUILTIN_GATHERALTDIV8SF
:
32144 icode
= CODE_FOR_avx2_gatherdiv8sf
;
32146 case IX86_BUILTIN_GATHERALTSIV4DI
:
32147 icode
= CODE_FOR_avx2_gathersiv4di
;
32149 case IX86_BUILTIN_GATHERALTDIV8SI
:
32150 icode
= CODE_FOR_avx2_gatherdiv8si
;
32154 arg0
= CALL_EXPR_ARG (exp
, 0);
32155 arg1
= CALL_EXPR_ARG (exp
, 1);
32156 arg2
= CALL_EXPR_ARG (exp
, 2);
32157 arg3
= CALL_EXPR_ARG (exp
, 3);
32158 arg4
= CALL_EXPR_ARG (exp
, 4);
32159 op0
= expand_normal (arg0
);
32160 op1
= expand_normal (arg1
);
32161 op2
= expand_normal (arg2
);
32162 op3
= expand_normal (arg3
);
32163 op4
= expand_normal (arg4
);
32164 /* Note the arg order is different from the operand order. */
32165 mode0
= insn_data
[icode
].operand
[1].mode
;
32166 mode2
= insn_data
[icode
].operand
[3].mode
;
32167 mode3
= insn_data
[icode
].operand
[4].mode
;
32168 mode4
= insn_data
[icode
].operand
[5].mode
;
32170 if (target
== NULL_RTX
32171 || GET_MODE (target
) != insn_data
[icode
].operand
[0].mode
)
32172 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
32174 subtarget
= target
;
32176 if (fcode
== IX86_BUILTIN_GATHERALTSIV4DF
32177 || fcode
== IX86_BUILTIN_GATHERALTSIV4DI
)
32179 rtx half
= gen_reg_rtx (V4SImode
);
32180 if (!nonimmediate_operand (op2
, V8SImode
))
32181 op2
= copy_to_mode_reg (V8SImode
, op2
);
32182 emit_insn (gen_vec_extract_lo_v8si (half
, op2
));
32185 else if (fcode
== IX86_BUILTIN_GATHERALTDIV8SF
32186 || fcode
== IX86_BUILTIN_GATHERALTDIV8SI
)
32188 rtx (*gen
) (rtx
, rtx
);
32189 rtx half
= gen_reg_rtx (mode0
);
32190 if (mode0
== V4SFmode
)
32191 gen
= gen_vec_extract_lo_v8sf
;
32193 gen
= gen_vec_extract_lo_v8si
;
32194 if (!nonimmediate_operand (op0
, GET_MODE (op0
)))
32195 op0
= copy_to_mode_reg (GET_MODE (op0
), op0
);
32196 emit_insn (gen (half
, op0
));
32198 if (!nonimmediate_operand (op3
, GET_MODE (op3
)))
32199 op3
= copy_to_mode_reg (GET_MODE (op3
), op3
);
32200 emit_insn (gen (half
, op3
));
32204 /* Force memory operand only with base register here. But we
32205 don't want to do it on memory operand for other builtin
32207 op1
= force_reg (Pmode
, convert_to_mode (Pmode
, op1
, 1));
32209 if (!insn_data
[icode
].operand
[1].predicate (op0
, mode0
))
32210 op0
= copy_to_mode_reg (mode0
, op0
);
32211 if (!insn_data
[icode
].operand
[2].predicate (op1
, Pmode
))
32212 op1
= copy_to_mode_reg (Pmode
, op1
);
32213 if (!insn_data
[icode
].operand
[3].predicate (op2
, mode2
))
32214 op2
= copy_to_mode_reg (mode2
, op2
);
32215 if (!insn_data
[icode
].operand
[4].predicate (op3
, mode3
))
32216 op3
= copy_to_mode_reg (mode3
, op3
);
32217 if (!insn_data
[icode
].operand
[5].predicate (op4
, mode4
))
32219 error ("last argument must be scale 1, 2, 4, 8");
32223 /* Optimize. If mask is known to have all high bits set,
32224 replace op0 with pc_rtx to signal that the instruction
32225 overwrites the whole destination and doesn't use its
32226 previous contents. */
32229 if (TREE_CODE (arg3
) == VECTOR_CST
)
32231 unsigned int negative
= 0;
32232 for (i
= 0; i
< VECTOR_CST_NELTS (arg3
); ++i
)
32234 tree cst
= VECTOR_CST_ELT (arg3
, i
);
32235 if (TREE_CODE (cst
) == INTEGER_CST
32236 && tree_int_cst_sign_bit (cst
))
32238 else if (TREE_CODE (cst
) == REAL_CST
32239 && REAL_VALUE_NEGATIVE (TREE_REAL_CST (cst
)))
32242 if (negative
== TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg3
)))
32245 else if (TREE_CODE (arg3
) == SSA_NAME
)
32247 /* Recognize also when mask is like:
32248 __v2df src = _mm_setzero_pd ();
32249 __v2df mask = _mm_cmpeq_pd (src, src);
32251 __v8sf src = _mm256_setzero_ps ();
32252 __v8sf mask = _mm256_cmp_ps (src, src, _CMP_EQ_OQ);
32253 as that is a cheaper way to load all ones into
32254 a register than having to load a constant from
32256 gimple def_stmt
= SSA_NAME_DEF_STMT (arg3
);
32257 if (is_gimple_call (def_stmt
))
32259 tree fndecl
= gimple_call_fndecl (def_stmt
);
32261 && DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_MD
)
32262 switch ((unsigned int) DECL_FUNCTION_CODE (fndecl
))
32264 case IX86_BUILTIN_CMPPD
:
32265 case IX86_BUILTIN_CMPPS
:
32266 case IX86_BUILTIN_CMPPD256
:
32267 case IX86_BUILTIN_CMPPS256
:
32268 if (!integer_zerop (gimple_call_arg (def_stmt
, 2)))
32271 case IX86_BUILTIN_CMPEQPD
:
32272 case IX86_BUILTIN_CMPEQPS
:
32273 if (initializer_zerop (gimple_call_arg (def_stmt
, 0))
32274 && initializer_zerop (gimple_call_arg (def_stmt
,
32285 pat
= GEN_FCN (icode
) (subtarget
, op0
, op1
, op2
, op3
, op4
);
32290 if (fcode
== IX86_BUILTIN_GATHERDIV8SF
32291 || fcode
== IX86_BUILTIN_GATHERDIV8SI
)
32293 enum machine_mode tmode
= GET_MODE (subtarget
) == V8SFmode
32294 ? V4SFmode
: V4SImode
;
32295 if (target
== NULL_RTX
)
32296 target
= gen_reg_rtx (tmode
);
32297 if (tmode
== V4SFmode
)
32298 emit_insn (gen_vec_extract_lo_v8sf (target
, subtarget
));
32300 emit_insn (gen_vec_extract_lo_v8si (target
, subtarget
));
32303 target
= subtarget
;
32307 case IX86_BUILTIN_XABORT
:
32308 icode
= CODE_FOR_xabort
;
32309 arg0
= CALL_EXPR_ARG (exp
, 0);
32310 op0
= expand_normal (arg0
);
32311 mode0
= insn_data
[icode
].operand
[0].mode
;
32312 if (!insn_data
[icode
].operand
[0].predicate (op0
, mode0
))
32314 error ("the xabort's argument must be an 8-bit immediate");
32317 emit_insn (gen_xabort (op0
));
32324 for (i
= 0, d
= bdesc_special_args
;
32325 i
< ARRAY_SIZE (bdesc_special_args
);
32327 if (d
->code
== fcode
)
32328 return ix86_expand_special_args_builtin (d
, exp
, target
);
32330 for (i
= 0, d
= bdesc_args
;
32331 i
< ARRAY_SIZE (bdesc_args
);
32333 if (d
->code
== fcode
)
32336 case IX86_BUILTIN_FABSQ
:
32337 case IX86_BUILTIN_COPYSIGNQ
:
32339 /* Emit a normal call if SSE isn't available. */
32340 return expand_call (exp
, target
, ignore
);
32342 return ix86_expand_args_builtin (d
, exp
, target
);
32345 for (i
= 0, d
= bdesc_comi
; i
< ARRAY_SIZE (bdesc_comi
); i
++, d
++)
32346 if (d
->code
== fcode
)
32347 return ix86_expand_sse_comi (d
, exp
, target
);
32349 for (i
= 0, d
= bdesc_pcmpestr
;
32350 i
< ARRAY_SIZE (bdesc_pcmpestr
);
32352 if (d
->code
== fcode
)
32353 return ix86_expand_sse_pcmpestr (d
, exp
, target
);
32355 for (i
= 0, d
= bdesc_pcmpistr
;
32356 i
< ARRAY_SIZE (bdesc_pcmpistr
);
32358 if (d
->code
== fcode
)
32359 return ix86_expand_sse_pcmpistr (d
, exp
, target
);
32361 for (i
= 0, d
= bdesc_multi_arg
; i
< ARRAY_SIZE (bdesc_multi_arg
); i
++, d
++)
32362 if (d
->code
== fcode
)
32363 return ix86_expand_multi_arg_builtin (d
->icode
, exp
, target
,
32364 (enum ix86_builtin_func_type
)
32365 d
->flag
, d
->comparison
);
32367 gcc_unreachable ();
32370 /* Returns a function decl for a vectorized version of the builtin function
32371 with builtin function code FN and the result vector type TYPE, or NULL_TREE
32372 if it is not available. */
32375 ix86_builtin_vectorized_function (tree fndecl
, tree type_out
,
32378 enum machine_mode in_mode
, out_mode
;
32380 enum built_in_function fn
= DECL_FUNCTION_CODE (fndecl
);
32382 if (TREE_CODE (type_out
) != VECTOR_TYPE
32383 || TREE_CODE (type_in
) != VECTOR_TYPE
32384 || DECL_BUILT_IN_CLASS (fndecl
) != BUILT_IN_NORMAL
)
32387 out_mode
= TYPE_MODE (TREE_TYPE (type_out
));
32388 out_n
= TYPE_VECTOR_SUBPARTS (type_out
);
32389 in_mode
= TYPE_MODE (TREE_TYPE (type_in
));
32390 in_n
= TYPE_VECTOR_SUBPARTS (type_in
);
32394 case BUILT_IN_SQRT
:
32395 if (out_mode
== DFmode
&& in_mode
== DFmode
)
32397 if (out_n
== 2 && in_n
== 2)
32398 return ix86_builtins
[IX86_BUILTIN_SQRTPD
];
32399 else if (out_n
== 4 && in_n
== 4)
32400 return ix86_builtins
[IX86_BUILTIN_SQRTPD256
];
32404 case BUILT_IN_SQRTF
:
32405 if (out_mode
== SFmode
&& in_mode
== SFmode
)
32407 if (out_n
== 4 && in_n
== 4)
32408 return ix86_builtins
[IX86_BUILTIN_SQRTPS_NR
];
32409 else if (out_n
== 8 && in_n
== 8)
32410 return ix86_builtins
[IX86_BUILTIN_SQRTPS_NR256
];
32414 case BUILT_IN_IFLOOR
:
32415 case BUILT_IN_LFLOOR
:
32416 case BUILT_IN_LLFLOOR
:
32417 /* The round insn does not trap on denormals. */
32418 if (flag_trapping_math
|| !TARGET_ROUND
)
32421 if (out_mode
== SImode
&& in_mode
== DFmode
)
32423 if (out_n
== 4 && in_n
== 2)
32424 return ix86_builtins
[IX86_BUILTIN_FLOORPD_VEC_PACK_SFIX
];
32425 else if (out_n
== 8 && in_n
== 4)
32426 return ix86_builtins
[IX86_BUILTIN_FLOORPD_VEC_PACK_SFIX256
];
32430 case BUILT_IN_IFLOORF
:
32431 case BUILT_IN_LFLOORF
:
32432 case BUILT_IN_LLFLOORF
:
32433 /* The round insn does not trap on denormals. */
32434 if (flag_trapping_math
|| !TARGET_ROUND
)
32437 if (out_mode
== SImode
&& in_mode
== SFmode
)
32439 if (out_n
== 4 && in_n
== 4)
32440 return ix86_builtins
[IX86_BUILTIN_FLOORPS_SFIX
];
32441 else if (out_n
== 8 && in_n
== 8)
32442 return ix86_builtins
[IX86_BUILTIN_FLOORPS_SFIX256
];
32446 case BUILT_IN_ICEIL
:
32447 case BUILT_IN_LCEIL
:
32448 case BUILT_IN_LLCEIL
:
32449 /* The round insn does not trap on denormals. */
32450 if (flag_trapping_math
|| !TARGET_ROUND
)
32453 if (out_mode
== SImode
&& in_mode
== DFmode
)
32455 if (out_n
== 4 && in_n
== 2)
32456 return ix86_builtins
[IX86_BUILTIN_CEILPD_VEC_PACK_SFIX
];
32457 else if (out_n
== 8 && in_n
== 4)
32458 return ix86_builtins
[IX86_BUILTIN_CEILPD_VEC_PACK_SFIX256
];
32462 case BUILT_IN_ICEILF
:
32463 case BUILT_IN_LCEILF
:
32464 case BUILT_IN_LLCEILF
:
32465 /* The round insn does not trap on denormals. */
32466 if (flag_trapping_math
|| !TARGET_ROUND
)
32469 if (out_mode
== SImode
&& in_mode
== SFmode
)
32471 if (out_n
== 4 && in_n
== 4)
32472 return ix86_builtins
[IX86_BUILTIN_CEILPS_SFIX
];
32473 else if (out_n
== 8 && in_n
== 8)
32474 return ix86_builtins
[IX86_BUILTIN_CEILPS_SFIX256
];
32478 case BUILT_IN_IRINT
:
32479 case BUILT_IN_LRINT
:
32480 case BUILT_IN_LLRINT
:
32481 if (out_mode
== SImode
&& in_mode
== DFmode
)
32483 if (out_n
== 4 && in_n
== 2)
32484 return ix86_builtins
[IX86_BUILTIN_VEC_PACK_SFIX
];
32485 else if (out_n
== 8 && in_n
== 4)
32486 return ix86_builtins
[IX86_BUILTIN_VEC_PACK_SFIX256
];
32490 case BUILT_IN_IRINTF
:
32491 case BUILT_IN_LRINTF
:
32492 case BUILT_IN_LLRINTF
:
32493 if (out_mode
== SImode
&& in_mode
== SFmode
)
32495 if (out_n
== 4 && in_n
== 4)
32496 return ix86_builtins
[IX86_BUILTIN_CVTPS2DQ
];
32497 else if (out_n
== 8 && in_n
== 8)
32498 return ix86_builtins
[IX86_BUILTIN_CVTPS2DQ256
];
32502 case BUILT_IN_IROUND
:
32503 case BUILT_IN_LROUND
:
32504 case BUILT_IN_LLROUND
:
32505 /* The round insn does not trap on denormals. */
32506 if (flag_trapping_math
|| !TARGET_ROUND
)
32509 if (out_mode
== SImode
&& in_mode
== DFmode
)
32511 if (out_n
== 4 && in_n
== 2)
32512 return ix86_builtins
[IX86_BUILTIN_ROUNDPD_AZ_VEC_PACK_SFIX
];
32513 else if (out_n
== 8 && in_n
== 4)
32514 return ix86_builtins
[IX86_BUILTIN_ROUNDPD_AZ_VEC_PACK_SFIX256
];
32518 case BUILT_IN_IROUNDF
:
32519 case BUILT_IN_LROUNDF
:
32520 case BUILT_IN_LLROUNDF
:
32521 /* The round insn does not trap on denormals. */
32522 if (flag_trapping_math
|| !TARGET_ROUND
)
32525 if (out_mode
== SImode
&& in_mode
== SFmode
)
32527 if (out_n
== 4 && in_n
== 4)
32528 return ix86_builtins
[IX86_BUILTIN_ROUNDPS_AZ_SFIX
];
32529 else if (out_n
== 8 && in_n
== 8)
32530 return ix86_builtins
[IX86_BUILTIN_ROUNDPS_AZ_SFIX256
];
32534 case BUILT_IN_COPYSIGN
:
32535 if (out_mode
== DFmode
&& in_mode
== DFmode
)
32537 if (out_n
== 2 && in_n
== 2)
32538 return ix86_builtins
[IX86_BUILTIN_CPYSGNPD
];
32539 else if (out_n
== 4 && in_n
== 4)
32540 return ix86_builtins
[IX86_BUILTIN_CPYSGNPD256
];
32544 case BUILT_IN_COPYSIGNF
:
32545 if (out_mode
== SFmode
&& in_mode
== SFmode
)
32547 if (out_n
== 4 && in_n
== 4)
32548 return ix86_builtins
[IX86_BUILTIN_CPYSGNPS
];
32549 else if (out_n
== 8 && in_n
== 8)
32550 return ix86_builtins
[IX86_BUILTIN_CPYSGNPS256
];
32554 case BUILT_IN_FLOOR
:
32555 /* The round insn does not trap on denormals. */
32556 if (flag_trapping_math
|| !TARGET_ROUND
)
32559 if (out_mode
== DFmode
&& in_mode
== DFmode
)
32561 if (out_n
== 2 && in_n
== 2)
32562 return ix86_builtins
[IX86_BUILTIN_FLOORPD
];
32563 else if (out_n
== 4 && in_n
== 4)
32564 return ix86_builtins
[IX86_BUILTIN_FLOORPD256
];
32568 case BUILT_IN_FLOORF
:
32569 /* The round insn does not trap on denormals. */
32570 if (flag_trapping_math
|| !TARGET_ROUND
)
32573 if (out_mode
== SFmode
&& in_mode
== SFmode
)
32575 if (out_n
== 4 && in_n
== 4)
32576 return ix86_builtins
[IX86_BUILTIN_FLOORPS
];
32577 else if (out_n
== 8 && in_n
== 8)
32578 return ix86_builtins
[IX86_BUILTIN_FLOORPS256
];
32582 case BUILT_IN_CEIL
:
32583 /* The round insn does not trap on denormals. */
32584 if (flag_trapping_math
|| !TARGET_ROUND
)
32587 if (out_mode
== DFmode
&& in_mode
== DFmode
)
32589 if (out_n
== 2 && in_n
== 2)
32590 return ix86_builtins
[IX86_BUILTIN_CEILPD
];
32591 else if (out_n
== 4 && in_n
== 4)
32592 return ix86_builtins
[IX86_BUILTIN_CEILPD256
];
32596 case BUILT_IN_CEILF
:
32597 /* The round insn does not trap on denormals. */
32598 if (flag_trapping_math
|| !TARGET_ROUND
)
32601 if (out_mode
== SFmode
&& in_mode
== SFmode
)
32603 if (out_n
== 4 && in_n
== 4)
32604 return ix86_builtins
[IX86_BUILTIN_CEILPS
];
32605 else if (out_n
== 8 && in_n
== 8)
32606 return ix86_builtins
[IX86_BUILTIN_CEILPS256
];
32610 case BUILT_IN_TRUNC
:
32611 /* The round insn does not trap on denormals. */
32612 if (flag_trapping_math
|| !TARGET_ROUND
)
32615 if (out_mode
== DFmode
&& in_mode
== DFmode
)
32617 if (out_n
== 2 && in_n
== 2)
32618 return ix86_builtins
[IX86_BUILTIN_TRUNCPD
];
32619 else if (out_n
== 4 && in_n
== 4)
32620 return ix86_builtins
[IX86_BUILTIN_TRUNCPD256
];
32624 case BUILT_IN_TRUNCF
:
32625 /* The round insn does not trap on denormals. */
32626 if (flag_trapping_math
|| !TARGET_ROUND
)
32629 if (out_mode
== SFmode
&& in_mode
== SFmode
)
32631 if (out_n
== 4 && in_n
== 4)
32632 return ix86_builtins
[IX86_BUILTIN_TRUNCPS
];
32633 else if (out_n
== 8 && in_n
== 8)
32634 return ix86_builtins
[IX86_BUILTIN_TRUNCPS256
];
32638 case BUILT_IN_RINT
:
32639 /* The round insn does not trap on denormals. */
32640 if (flag_trapping_math
|| !TARGET_ROUND
)
32643 if (out_mode
== DFmode
&& in_mode
== DFmode
)
32645 if (out_n
== 2 && in_n
== 2)
32646 return ix86_builtins
[IX86_BUILTIN_RINTPD
];
32647 else if (out_n
== 4 && in_n
== 4)
32648 return ix86_builtins
[IX86_BUILTIN_RINTPD256
];
32652 case BUILT_IN_RINTF
:
32653 /* The round insn does not trap on denormals. */
32654 if (flag_trapping_math
|| !TARGET_ROUND
)
32657 if (out_mode
== SFmode
&& in_mode
== SFmode
)
32659 if (out_n
== 4 && in_n
== 4)
32660 return ix86_builtins
[IX86_BUILTIN_RINTPS
];
32661 else if (out_n
== 8 && in_n
== 8)
32662 return ix86_builtins
[IX86_BUILTIN_RINTPS256
];
32666 case BUILT_IN_ROUND
:
32667 /* The round insn does not trap on denormals. */
32668 if (flag_trapping_math
|| !TARGET_ROUND
)
32671 if (out_mode
== DFmode
&& in_mode
== DFmode
)
32673 if (out_n
== 2 && in_n
== 2)
32674 return ix86_builtins
[IX86_BUILTIN_ROUNDPD_AZ
];
32675 else if (out_n
== 4 && in_n
== 4)
32676 return ix86_builtins
[IX86_BUILTIN_ROUNDPD_AZ256
];
32680 case BUILT_IN_ROUNDF
:
32681 /* The round insn does not trap on denormals. */
32682 if (flag_trapping_math
|| !TARGET_ROUND
)
32685 if (out_mode
== SFmode
&& in_mode
== SFmode
)
32687 if (out_n
== 4 && in_n
== 4)
32688 return ix86_builtins
[IX86_BUILTIN_ROUNDPS_AZ
];
32689 else if (out_n
== 8 && in_n
== 8)
32690 return ix86_builtins
[IX86_BUILTIN_ROUNDPS_AZ256
];
32695 if (out_mode
== DFmode
&& in_mode
== DFmode
)
32697 if (out_n
== 2 && in_n
== 2)
32698 return ix86_builtins
[IX86_BUILTIN_VFMADDPD
];
32699 if (out_n
== 4 && in_n
== 4)
32700 return ix86_builtins
[IX86_BUILTIN_VFMADDPD256
];
32704 case BUILT_IN_FMAF
:
32705 if (out_mode
== SFmode
&& in_mode
== SFmode
)
32707 if (out_n
== 4 && in_n
== 4)
32708 return ix86_builtins
[IX86_BUILTIN_VFMADDPS
];
32709 if (out_n
== 8 && in_n
== 8)
32710 return ix86_builtins
[IX86_BUILTIN_VFMADDPS256
];
32718 /* Dispatch to a handler for a vectorization library. */
32719 if (ix86_veclib_handler
)
32720 return ix86_veclib_handler ((enum built_in_function
) fn
, type_out
,
32726 /* Handler for an SVML-style interface to
32727 a library with vectorized intrinsics. */
32730 ix86_veclibabi_svml (enum built_in_function fn
, tree type_out
, tree type_in
)
32733 tree fntype
, new_fndecl
, args
;
32736 enum machine_mode el_mode
, in_mode
;
32739 /* The SVML is suitable for unsafe math only. */
32740 if (!flag_unsafe_math_optimizations
)
32743 el_mode
= TYPE_MODE (TREE_TYPE (type_out
));
32744 n
= TYPE_VECTOR_SUBPARTS (type_out
);
32745 in_mode
= TYPE_MODE (TREE_TYPE (type_in
));
32746 in_n
= TYPE_VECTOR_SUBPARTS (type_in
);
32747 if (el_mode
!= in_mode
32755 case BUILT_IN_LOG10
:
32757 case BUILT_IN_TANH
:
32759 case BUILT_IN_ATAN
:
32760 case BUILT_IN_ATAN2
:
32761 case BUILT_IN_ATANH
:
32762 case BUILT_IN_CBRT
:
32763 case BUILT_IN_SINH
:
32765 case BUILT_IN_ASINH
:
32766 case BUILT_IN_ASIN
:
32767 case BUILT_IN_COSH
:
32769 case BUILT_IN_ACOSH
:
32770 case BUILT_IN_ACOS
:
32771 if (el_mode
!= DFmode
|| n
!= 2)
32775 case BUILT_IN_EXPF
:
32776 case BUILT_IN_LOGF
:
32777 case BUILT_IN_LOG10F
:
32778 case BUILT_IN_POWF
:
32779 case BUILT_IN_TANHF
:
32780 case BUILT_IN_TANF
:
32781 case BUILT_IN_ATANF
:
32782 case BUILT_IN_ATAN2F
:
32783 case BUILT_IN_ATANHF
:
32784 case BUILT_IN_CBRTF
:
32785 case BUILT_IN_SINHF
:
32786 case BUILT_IN_SINF
:
32787 case BUILT_IN_ASINHF
:
32788 case BUILT_IN_ASINF
:
32789 case BUILT_IN_COSHF
:
32790 case BUILT_IN_COSF
:
32791 case BUILT_IN_ACOSHF
:
32792 case BUILT_IN_ACOSF
:
32793 if (el_mode
!= SFmode
|| n
!= 4)
32801 bname
= IDENTIFIER_POINTER (DECL_NAME (builtin_decl_implicit (fn
)));
32803 if (fn
== BUILT_IN_LOGF
)
32804 strcpy (name
, "vmlsLn4");
32805 else if (fn
== BUILT_IN_LOG
)
32806 strcpy (name
, "vmldLn2");
32809 sprintf (name
, "vmls%s", bname
+10);
32810 name
[strlen (name
)-1] = '4';
32813 sprintf (name
, "vmld%s2", bname
+10);
32815 /* Convert to uppercase. */
32819 for (args
= DECL_ARGUMENTS (builtin_decl_implicit (fn
));
32821 args
= TREE_CHAIN (args
))
32825 fntype
= build_function_type_list (type_out
, type_in
, NULL
);
32827 fntype
= build_function_type_list (type_out
, type_in
, type_in
, NULL
);
32829 /* Build a function declaration for the vectorized function. */
32830 new_fndecl
= build_decl (BUILTINS_LOCATION
,
32831 FUNCTION_DECL
, get_identifier (name
), fntype
);
32832 TREE_PUBLIC (new_fndecl
) = 1;
32833 DECL_EXTERNAL (new_fndecl
) = 1;
32834 DECL_IS_NOVOPS (new_fndecl
) = 1;
32835 TREE_READONLY (new_fndecl
) = 1;
32840 /* Handler for an ACML-style interface to
32841 a library with vectorized intrinsics. */
32844 ix86_veclibabi_acml (enum built_in_function fn
, tree type_out
, tree type_in
)
32846 char name
[20] = "__vr.._";
32847 tree fntype
, new_fndecl
, args
;
32850 enum machine_mode el_mode
, in_mode
;
32853 /* The ACML is 64bits only and suitable for unsafe math only as
32854 it does not correctly support parts of IEEE with the required
32855 precision such as denormals. */
32857 || !flag_unsafe_math_optimizations
)
32860 el_mode
= TYPE_MODE (TREE_TYPE (type_out
));
32861 n
= TYPE_VECTOR_SUBPARTS (type_out
);
32862 in_mode
= TYPE_MODE (TREE_TYPE (type_in
));
32863 in_n
= TYPE_VECTOR_SUBPARTS (type_in
);
32864 if (el_mode
!= in_mode
32874 case BUILT_IN_LOG2
:
32875 case BUILT_IN_LOG10
:
32878 if (el_mode
!= DFmode
32883 case BUILT_IN_SINF
:
32884 case BUILT_IN_COSF
:
32885 case BUILT_IN_EXPF
:
32886 case BUILT_IN_POWF
:
32887 case BUILT_IN_LOGF
:
32888 case BUILT_IN_LOG2F
:
32889 case BUILT_IN_LOG10F
:
32892 if (el_mode
!= SFmode
32901 bname
= IDENTIFIER_POINTER (DECL_NAME (builtin_decl_implicit (fn
)));
32902 sprintf (name
+ 7, "%s", bname
+10);
32905 for (args
= DECL_ARGUMENTS (builtin_decl_implicit (fn
));
32907 args
= TREE_CHAIN (args
))
32911 fntype
= build_function_type_list (type_out
, type_in
, NULL
);
32913 fntype
= build_function_type_list (type_out
, type_in
, type_in
, NULL
);
32915 /* Build a function declaration for the vectorized function. */
32916 new_fndecl
= build_decl (BUILTINS_LOCATION
,
32917 FUNCTION_DECL
, get_identifier (name
), fntype
);
32918 TREE_PUBLIC (new_fndecl
) = 1;
32919 DECL_EXTERNAL (new_fndecl
) = 1;
32920 DECL_IS_NOVOPS (new_fndecl
) = 1;
32921 TREE_READONLY (new_fndecl
) = 1;
32926 /* Returns a decl of a function that implements gather load with
32927 memory type MEM_VECTYPE and index type INDEX_VECTYPE and SCALE.
32928 Return NULL_TREE if it is not available. */
32931 ix86_vectorize_builtin_gather (const_tree mem_vectype
,
32932 const_tree index_type
, int scale
)
32935 enum ix86_builtins code
;
32940 if ((TREE_CODE (index_type
) != INTEGER_TYPE
32941 && !POINTER_TYPE_P (index_type
))
32942 || (TYPE_MODE (index_type
) != SImode
32943 && TYPE_MODE (index_type
) != DImode
))
32946 if (TYPE_PRECISION (index_type
) > POINTER_SIZE
)
32949 /* v*gather* insn sign extends index to pointer mode. */
32950 if (TYPE_PRECISION (index_type
) < POINTER_SIZE
32951 && TYPE_UNSIGNED (index_type
))
32956 || (scale
& (scale
- 1)) != 0)
32959 si
= TYPE_MODE (index_type
) == SImode
;
32960 switch (TYPE_MODE (mem_vectype
))
32963 code
= si
? IX86_BUILTIN_GATHERSIV2DF
: IX86_BUILTIN_GATHERDIV2DF
;
32966 code
= si
? IX86_BUILTIN_GATHERALTSIV4DF
: IX86_BUILTIN_GATHERDIV4DF
;
32969 code
= si
? IX86_BUILTIN_GATHERSIV2DI
: IX86_BUILTIN_GATHERDIV2DI
;
32972 code
= si
? IX86_BUILTIN_GATHERALTSIV4DI
: IX86_BUILTIN_GATHERDIV4DI
;
32975 code
= si
? IX86_BUILTIN_GATHERSIV4SF
: IX86_BUILTIN_GATHERDIV4SF
;
32978 code
= si
? IX86_BUILTIN_GATHERSIV8SF
: IX86_BUILTIN_GATHERALTDIV8SF
;
32981 code
= si
? IX86_BUILTIN_GATHERSIV4SI
: IX86_BUILTIN_GATHERDIV4SI
;
32984 code
= si
? IX86_BUILTIN_GATHERSIV8SI
: IX86_BUILTIN_GATHERALTDIV8SI
;
32990 return ix86_builtins
[code
];
32993 /* Returns a code for a target-specific builtin that implements
32994 reciprocal of the function, or NULL_TREE if not available. */
32997 ix86_builtin_reciprocal (unsigned int fn
, bool md_fn
,
32998 bool sqrt ATTRIBUTE_UNUSED
)
33000 if (! (TARGET_SSE_MATH
&& !optimize_insn_for_size_p ()
33001 && flag_finite_math_only
&& !flag_trapping_math
33002 && flag_unsafe_math_optimizations
))
33006 /* Machine dependent builtins. */
33009 /* Vectorized version of sqrt to rsqrt conversion. */
33010 case IX86_BUILTIN_SQRTPS_NR
:
33011 return ix86_builtins
[IX86_BUILTIN_RSQRTPS_NR
];
33013 case IX86_BUILTIN_SQRTPS_NR256
:
33014 return ix86_builtins
[IX86_BUILTIN_RSQRTPS_NR256
];
33020 /* Normal builtins. */
33023 /* Sqrt to rsqrt conversion. */
33024 case BUILT_IN_SQRTF
:
33025 return ix86_builtins
[IX86_BUILTIN_RSQRTF
];
33032 /* Helper for avx_vpermilps256_operand et al. This is also used by
33033 the expansion functions to turn the parallel back into a mask.
33034 The return value is 0 for no match and the imm8+1 for a match. */
33037 avx_vpermilp_parallel (rtx par
, enum machine_mode mode
)
33039 unsigned i
, nelt
= GET_MODE_NUNITS (mode
);
33041 unsigned char ipar
[8] = {}; /* Silence -Wuninitialized warning. */
33043 if (XVECLEN (par
, 0) != (int) nelt
)
33046 /* Validate that all of the elements are constants, and not totally
33047 out of range. Copy the data into an integral array to make the
33048 subsequent checks easier. */
33049 for (i
= 0; i
< nelt
; ++i
)
33051 rtx er
= XVECEXP (par
, 0, i
);
33052 unsigned HOST_WIDE_INT ei
;
33054 if (!CONST_INT_P (er
))
33065 /* In the 256-bit DFmode case, we can only move elements within
33067 for (i
= 0; i
< 2; ++i
)
33071 mask
|= ipar
[i
] << i
;
33073 for (i
= 2; i
< 4; ++i
)
33077 mask
|= (ipar
[i
] - 2) << i
;
33082 /* In the 256-bit SFmode case, we have full freedom of movement
33083 within the low 128-bit lane, but the high 128-bit lane must
33084 mirror the exact same pattern. */
33085 for (i
= 0; i
< 4; ++i
)
33086 if (ipar
[i
] + 4 != ipar
[i
+ 4])
33093 /* In the 128-bit case, we've full freedom in the placement of
33094 the elements from the source operand. */
33095 for (i
= 0; i
< nelt
; ++i
)
33096 mask
|= ipar
[i
] << (i
* (nelt
/ 2));
33100 gcc_unreachable ();
33103 /* Make sure success has a non-zero value by adding one. */
33107 /* Helper for avx_vperm2f128_v4df_operand et al. This is also used by
33108 the expansion functions to turn the parallel back into a mask.
33109 The return value is 0 for no match and the imm8+1 for a match. */
33112 avx_vperm2f128_parallel (rtx par
, enum machine_mode mode
)
33114 unsigned i
, nelt
= GET_MODE_NUNITS (mode
), nelt2
= nelt
/ 2;
33116 unsigned char ipar
[8] = {}; /* Silence -Wuninitialized warning. */
33118 if (XVECLEN (par
, 0) != (int) nelt
)
33121 /* Validate that all of the elements are constants, and not totally
33122 out of range. Copy the data into an integral array to make the
33123 subsequent checks easier. */
33124 for (i
= 0; i
< nelt
; ++i
)
33126 rtx er
= XVECEXP (par
, 0, i
);
33127 unsigned HOST_WIDE_INT ei
;
33129 if (!CONST_INT_P (er
))
33132 if (ei
>= 2 * nelt
)
33137 /* Validate that the halves of the permute are halves. */
33138 for (i
= 0; i
< nelt2
- 1; ++i
)
33139 if (ipar
[i
] + 1 != ipar
[i
+ 1])
33141 for (i
= nelt2
; i
< nelt
- 1; ++i
)
33142 if (ipar
[i
] + 1 != ipar
[i
+ 1])
33145 /* Reconstruct the mask. */
33146 for (i
= 0; i
< 2; ++i
)
33148 unsigned e
= ipar
[i
* nelt2
];
33152 mask
|= e
<< (i
* 4);
33155 /* Make sure success has a non-zero value by adding one. */
33159 /* Store OPERAND to the memory after reload is completed. This means
33160 that we can't easily use assign_stack_local. */
33162 ix86_force_to_memory (enum machine_mode mode
, rtx operand
)
33166 gcc_assert (reload_completed
);
33167 if (ix86_using_red_zone ())
33169 result
= gen_rtx_MEM (mode
,
33170 gen_rtx_PLUS (Pmode
,
33172 GEN_INT (-RED_ZONE_SIZE
)));
33173 emit_move_insn (result
, operand
);
33175 else if (TARGET_64BIT
)
33181 operand
= gen_lowpart (DImode
, operand
);
33185 gen_rtx_SET (VOIDmode
,
33186 gen_rtx_MEM (DImode
,
33187 gen_rtx_PRE_DEC (DImode
,
33188 stack_pointer_rtx
)),
33192 gcc_unreachable ();
33194 result
= gen_rtx_MEM (mode
, stack_pointer_rtx
);
33203 split_double_mode (mode
, &operand
, 1, operands
, operands
+ 1);
33205 gen_rtx_SET (VOIDmode
,
33206 gen_rtx_MEM (SImode
,
33207 gen_rtx_PRE_DEC (Pmode
,
33208 stack_pointer_rtx
)),
33211 gen_rtx_SET (VOIDmode
,
33212 gen_rtx_MEM (SImode
,
33213 gen_rtx_PRE_DEC (Pmode
,
33214 stack_pointer_rtx
)),
33219 /* Store HImodes as SImodes. */
33220 operand
= gen_lowpart (SImode
, operand
);
33224 gen_rtx_SET (VOIDmode
,
33225 gen_rtx_MEM (GET_MODE (operand
),
33226 gen_rtx_PRE_DEC (SImode
,
33227 stack_pointer_rtx
)),
33231 gcc_unreachable ();
33233 result
= gen_rtx_MEM (mode
, stack_pointer_rtx
);
33238 /* Free operand from the memory. */
33240 ix86_free_from_memory (enum machine_mode mode
)
33242 if (!ix86_using_red_zone ())
33246 if (mode
== DImode
|| TARGET_64BIT
)
33250 /* Use LEA to deallocate stack space. In peephole2 it will be converted
33251 to pop or add instruction if registers are available. */
33252 emit_insn (gen_rtx_SET (VOIDmode
, stack_pointer_rtx
,
33253 gen_rtx_PLUS (Pmode
, stack_pointer_rtx
,
33258 /* Return a register priority for hard reg REGNO. */
33260 ix86_register_priority (int hard_regno
)
33262 /* ebp and r13 as the base always wants a displacement, r12 as the
33263 base always wants an index. So discourage their usage in an
33265 if (hard_regno
== R12_REG
|| hard_regno
== R13_REG
)
33267 if (hard_regno
== BP_REG
)
33269 /* New x86-64 int registers result in bigger code size. Discourage
33271 if (FIRST_REX_INT_REG
<= hard_regno
&& hard_regno
<= LAST_REX_INT_REG
)
33273 /* New x86-64 SSE registers result in bigger code size. Discourage
33275 if (FIRST_REX_SSE_REG
<= hard_regno
&& hard_regno
<= LAST_REX_SSE_REG
)
33277 /* Usage of AX register results in smaller code. Prefer it. */
33278 if (hard_regno
== 0)
33283 /* Implement TARGET_PREFERRED_RELOAD_CLASS.
33285 Put float CONST_DOUBLE in the constant pool instead of fp regs.
33286 QImode must go into class Q_REGS.
33287 Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
33288 movdf to do mem-to-mem moves through integer regs. */
33291 ix86_preferred_reload_class (rtx x
, reg_class_t regclass
)
33293 enum machine_mode mode
= GET_MODE (x
);
33295 /* We're only allowed to return a subclass of CLASS. Many of the
33296 following checks fail for NO_REGS, so eliminate that early. */
33297 if (regclass
== NO_REGS
)
33300 /* All classes can load zeros. */
33301 if (x
== CONST0_RTX (mode
))
33304 /* Force constants into memory if we are loading a (nonzero) constant into
33305 an MMX or SSE register. This is because there are no MMX/SSE instructions
33306 to load from a constant. */
33308 && (MAYBE_MMX_CLASS_P (regclass
) || MAYBE_SSE_CLASS_P (regclass
)))
33311 /* Prefer SSE regs only, if we can use them for math. */
33312 if (TARGET_SSE_MATH
&& !TARGET_MIX_SSE_I387
&& SSE_FLOAT_MODE_P (mode
))
33313 return SSE_CLASS_P (regclass
) ? regclass
: NO_REGS
;
33315 /* Floating-point constants need more complex checks. */
33316 if (GET_CODE (x
) == CONST_DOUBLE
&& GET_MODE (x
) != VOIDmode
)
33318 /* General regs can load everything. */
33319 if (reg_class_subset_p (regclass
, GENERAL_REGS
))
33322 /* Floats can load 0 and 1 plus some others. Note that we eliminated
33323 zero above. We only want to wind up preferring 80387 registers if
33324 we plan on doing computation with them. */
33326 && standard_80387_constant_p (x
) > 0)
33328 /* Limit class to non-sse. */
33329 if (regclass
== FLOAT_SSE_REGS
)
33331 if (regclass
== FP_TOP_SSE_REGS
)
33333 if (regclass
== FP_SECOND_SSE_REGS
)
33334 return FP_SECOND_REG
;
33335 if (regclass
== FLOAT_INT_REGS
|| regclass
== FLOAT_REGS
)
33342 /* Generally when we see PLUS here, it's the function invariant
33343 (plus soft-fp const_int). Which can only be computed into general
33345 if (GET_CODE (x
) == PLUS
)
33346 return reg_class_subset_p (regclass
, GENERAL_REGS
) ? regclass
: NO_REGS
;
33348 /* QImode constants are easy to load, but non-constant QImode data
33349 must go into Q_REGS. */
33350 if (GET_MODE (x
) == QImode
&& !CONSTANT_P (x
))
33352 if (reg_class_subset_p (regclass
, Q_REGS
))
33354 if (reg_class_subset_p (Q_REGS
, regclass
))
33362 /* Discourage putting floating-point values in SSE registers unless
33363 SSE math is being used, and likewise for the 387 registers. */
33365 ix86_preferred_output_reload_class (rtx x
, reg_class_t regclass
)
33367 enum machine_mode mode
= GET_MODE (x
);
33369 /* Restrict the output reload class to the register bank that we are doing
33370 math on. If we would like not to return a subset of CLASS, reject this
33371 alternative: if reload cannot do this, it will still use its choice. */
33372 mode
= GET_MODE (x
);
33373 if (TARGET_SSE_MATH
&& SSE_FLOAT_MODE_P (mode
))
33374 return MAYBE_SSE_CLASS_P (regclass
) ? SSE_REGS
: NO_REGS
;
33376 if (X87_FLOAT_MODE_P (mode
))
33378 if (regclass
== FP_TOP_SSE_REGS
)
33380 else if (regclass
== FP_SECOND_SSE_REGS
)
33381 return FP_SECOND_REG
;
33383 return FLOAT_CLASS_P (regclass
) ? regclass
: NO_REGS
;
33390 ix86_secondary_reload (bool in_p
, rtx x
, reg_class_t rclass
,
33391 enum machine_mode mode
, secondary_reload_info
*sri
)
33393 /* Double-word spills from general registers to non-offsettable memory
33394 references (zero-extended addresses) require special handling. */
33397 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
33398 && rclass
== GENERAL_REGS
33399 && !offsettable_memref_p (x
))
33402 ? CODE_FOR_reload_noff_load
33403 : CODE_FOR_reload_noff_store
);
33404 /* Add the cost of moving address to a temporary. */
33405 sri
->extra_cost
= 1;
33410 /* QImode spills from non-QI registers require
33411 intermediate register on 32bit targets. */
33413 && !in_p
&& mode
== QImode
33414 && (rclass
== GENERAL_REGS
33415 || rclass
== LEGACY_REGS
33416 || rclass
== NON_Q_REGS
33419 || rclass
== INDEX_REGS
))
33428 if (regno
>= FIRST_PSEUDO_REGISTER
|| GET_CODE (x
) == SUBREG
)
33429 regno
= true_regnum (x
);
33431 /* Return Q_REGS if the operand is in memory. */
33436 /* This condition handles corner case where an expression involving
33437 pointers gets vectorized. We're trying to use the address of a
33438 stack slot as a vector initializer.
33440 (set (reg:V2DI 74 [ vect_cst_.2 ])
33441 (vec_duplicate:V2DI (reg/f:DI 20 frame)))
33443 Eventually frame gets turned into sp+offset like this:
33445 (set (reg:V2DI 21 xmm0 [orig:74 vect_cst_.2 ] [74])
33446 (vec_duplicate:V2DI (plus:DI (reg/f:DI 7 sp)
33447 (const_int 392 [0x188]))))
33449 That later gets turned into:
33451 (set (reg:V2DI 21 xmm0 [orig:74 vect_cst_.2 ] [74])
33452 (vec_duplicate:V2DI (plus:DI (reg/f:DI 7 sp)
33453 (mem/u/c/i:DI (symbol_ref/u:DI ("*.LC0") [flags 0x2]) [0 S8 A64]))))
33455 We'll have the following reload recorded:
33457 Reload 0: reload_in (DI) =
33458 (plus:DI (reg/f:DI 7 sp)
33459 (mem/u/c/i:DI (symbol_ref/u:DI ("*.LC0") [flags 0x2]) [0 S8 A64]))
33460 reload_out (V2DI) = (reg:V2DI 21 xmm0 [orig:74 vect_cst_.2 ] [74])
33461 SSE_REGS, RELOAD_OTHER (opnum = 0), can't combine
33462 reload_in_reg: (plus:DI (reg/f:DI 7 sp) (const_int 392 [0x188]))
33463 reload_out_reg: (reg:V2DI 21 xmm0 [orig:74 vect_cst_.2 ] [74])
33464 reload_reg_rtx: (reg:V2DI 22 xmm1)
33466 Which isn't going to work since SSE instructions can't handle scalar
33467 additions. Returning GENERAL_REGS forces the addition into integer
33468 register and reload can handle subsequent reloads without problems. */
33470 if (in_p
&& GET_CODE (x
) == PLUS
33471 && SSE_CLASS_P (rclass
)
33472 && SCALAR_INT_MODE_P (mode
))
33473 return GENERAL_REGS
;
33478 /* Implement TARGET_CLASS_LIKELY_SPILLED_P. */
33481 ix86_class_likely_spilled_p (reg_class_t rclass
)
33492 case SSE_FIRST_REG
:
33494 case FP_SECOND_REG
:
33504 /* If we are copying between general and FP registers, we need a memory
33505 location. The same is true for SSE and MMX registers.
33507 To optimize register_move_cost performance, allow inline variant.
33509 The macro can't work reliably when one of the CLASSES is class containing
33510 registers from multiple units (SSE, MMX, integer). We avoid this by never
33511 combining those units in single alternative in the machine description.
33512 Ensure that this constraint holds to avoid unexpected surprises.
33514 When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not
33515 enforce these sanity checks. */
33518 inline_secondary_memory_needed (enum reg_class class1
, enum reg_class class2
,
33519 enum machine_mode mode
, int strict
)
33521 if (MAYBE_FLOAT_CLASS_P (class1
) != FLOAT_CLASS_P (class1
)
33522 || MAYBE_FLOAT_CLASS_P (class2
) != FLOAT_CLASS_P (class2
)
33523 || MAYBE_SSE_CLASS_P (class1
) != SSE_CLASS_P (class1
)
33524 || MAYBE_SSE_CLASS_P (class2
) != SSE_CLASS_P (class2
)
33525 || MAYBE_MMX_CLASS_P (class1
) != MMX_CLASS_P (class1
)
33526 || MAYBE_MMX_CLASS_P (class2
) != MMX_CLASS_P (class2
))
33528 gcc_assert (!strict
|| lra_in_progress
);
33532 if (FLOAT_CLASS_P (class1
) != FLOAT_CLASS_P (class2
))
33535 /* ??? This is a lie. We do have moves between mmx/general, and for
33536 mmx/sse2. But by saying we need secondary memory we discourage the
33537 register allocator from using the mmx registers unless needed. */
33538 if (MMX_CLASS_P (class1
) != MMX_CLASS_P (class2
))
33541 if (SSE_CLASS_P (class1
) != SSE_CLASS_P (class2
))
33543 /* SSE1 doesn't have any direct moves from other classes. */
33547 /* If the target says that inter-unit moves are more expensive
33548 than moving through memory, then don't generate them. */
33549 if (!TARGET_INTER_UNIT_MOVES
)
33552 /* Between SSE and general, we have moves no larger than word size. */
33553 if (GET_MODE_SIZE (mode
) > UNITS_PER_WORD
)
33561 ix86_secondary_memory_needed (enum reg_class class1
, enum reg_class class2
,
33562 enum machine_mode mode
, int strict
)
33564 return inline_secondary_memory_needed (class1
, class2
, mode
, strict
);
33567 /* Implement the TARGET_CLASS_MAX_NREGS hook.
33569 On the 80386, this is the size of MODE in words,
33570 except in the FP regs, where a single reg is always enough. */
33572 static unsigned char
33573 ix86_class_max_nregs (reg_class_t rclass
, enum machine_mode mode
)
33575 if (MAYBE_INTEGER_CLASS_P (rclass
))
33577 if (mode
== XFmode
)
33578 return (TARGET_64BIT
? 2 : 3);
33579 else if (mode
== XCmode
)
33580 return (TARGET_64BIT
? 4 : 6);
33582 return ((GET_MODE_SIZE (mode
) + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
);
33586 if (COMPLEX_MODE_P (mode
))
33593 /* Return true if the registers in CLASS cannot represent the change from
33594 modes FROM to TO. */
33597 ix86_cannot_change_mode_class (enum machine_mode from
, enum machine_mode to
,
33598 enum reg_class regclass
)
33603 /* x87 registers can't do subreg at all, as all values are reformatted
33604 to extended precision. */
33605 if (MAYBE_FLOAT_CLASS_P (regclass
))
33608 if (MAYBE_SSE_CLASS_P (regclass
) || MAYBE_MMX_CLASS_P (regclass
))
33610 /* Vector registers do not support QI or HImode loads. If we don't
33611 disallow a change to these modes, reload will assume it's ok to
33612 drop the subreg from (subreg:SI (reg:HI 100) 0). This affects
33613 the vec_dupv4hi pattern. */
33614 if (GET_MODE_SIZE (from
) < 4)
33617 /* Vector registers do not support subreg with nonzero offsets, which
33618 are otherwise valid for integer registers. Since we can't see
33619 whether we have a nonzero offset from here, prohibit all
33620 nonparadoxical subregs changing size. */
33621 if (GET_MODE_SIZE (to
) < GET_MODE_SIZE (from
))
33628 /* Return the cost of moving data of mode M between a
33629 register and memory. A value of 2 is the default; this cost is
33630 relative to those in `REGISTER_MOVE_COST'.
33632 This function is used extensively by register_move_cost that is used to
33633 build tables at startup. Make it inline in this case.
33634 When IN is 2, return maximum of in and out move cost.
33636 If moving between registers and memory is more expensive than
33637 between two registers, you should define this macro to express the
33640 Model also increased moving costs of QImode registers in non
33644 inline_memory_move_cost (enum machine_mode mode
, enum reg_class regclass
,
33648 if (FLOAT_CLASS_P (regclass
))
33666 return MAX (ix86_cost
->fp_load
[index
], ix86_cost
->fp_store
[index
]);
33667 return in
? ix86_cost
->fp_load
[index
] : ix86_cost
->fp_store
[index
];
33669 if (SSE_CLASS_P (regclass
))
33672 switch (GET_MODE_SIZE (mode
))
33687 return MAX (ix86_cost
->sse_load
[index
], ix86_cost
->sse_store
[index
]);
33688 return in
? ix86_cost
->sse_load
[index
] : ix86_cost
->sse_store
[index
];
33690 if (MMX_CLASS_P (regclass
))
33693 switch (GET_MODE_SIZE (mode
))
33705 return MAX (ix86_cost
->mmx_load
[index
], ix86_cost
->mmx_store
[index
]);
33706 return in
? ix86_cost
->mmx_load
[index
] : ix86_cost
->mmx_store
[index
];
33708 switch (GET_MODE_SIZE (mode
))
33711 if (Q_CLASS_P (regclass
) || TARGET_64BIT
)
33714 return ix86_cost
->int_store
[0];
33715 if (TARGET_PARTIAL_REG_DEPENDENCY
33716 && optimize_function_for_speed_p (cfun
))
33717 cost
= ix86_cost
->movzbl_load
;
33719 cost
= ix86_cost
->int_load
[0];
33721 return MAX (cost
, ix86_cost
->int_store
[0]);
33727 return MAX (ix86_cost
->movzbl_load
, ix86_cost
->int_store
[0] + 4);
33729 return ix86_cost
->movzbl_load
;
33731 return ix86_cost
->int_store
[0] + 4;
33736 return MAX (ix86_cost
->int_load
[1], ix86_cost
->int_store
[1]);
33737 return in
? ix86_cost
->int_load
[1] : ix86_cost
->int_store
[1];
33739 /* Compute number of 32bit moves needed. TFmode is moved as XFmode. */
33740 if (mode
== TFmode
)
33743 cost
= MAX (ix86_cost
->int_load
[2] , ix86_cost
->int_store
[2]);
33745 cost
= ix86_cost
->int_load
[2];
33747 cost
= ix86_cost
->int_store
[2];
33748 return (cost
* (((int) GET_MODE_SIZE (mode
)
33749 + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
));
33754 ix86_memory_move_cost (enum machine_mode mode
, reg_class_t regclass
,
33757 return inline_memory_move_cost (mode
, (enum reg_class
) regclass
, in
? 1 : 0);
33761 /* Return the cost of moving data from a register in class CLASS1 to
33762 one in class CLASS2.
33764 It is not required that the cost always equal 2 when FROM is the same as TO;
33765 on some machines it is expensive to move between registers if they are not
33766 general registers. */
33769 ix86_register_move_cost (enum machine_mode mode
, reg_class_t class1_i
,
33770 reg_class_t class2_i
)
33772 enum reg_class class1
= (enum reg_class
) class1_i
;
33773 enum reg_class class2
= (enum reg_class
) class2_i
;
33775 /* In case we require secondary memory, compute cost of the store followed
33776 by load. In order to avoid bad register allocation choices, we need
33777 for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */
33779 if (inline_secondary_memory_needed (class1
, class2
, mode
, 0))
33783 cost
+= inline_memory_move_cost (mode
, class1
, 2);
33784 cost
+= inline_memory_move_cost (mode
, class2
, 2);
33786 /* In case of copying from general_purpose_register we may emit multiple
33787 stores followed by single load causing memory size mismatch stall.
33788 Count this as arbitrarily high cost of 20. */
33789 if (targetm
.class_max_nregs (class1
, mode
)
33790 > targetm
.class_max_nregs (class2
, mode
))
33793 /* In the case of FP/MMX moves, the registers actually overlap, and we
33794 have to switch modes in order to treat them differently. */
33795 if ((MMX_CLASS_P (class1
) && MAYBE_FLOAT_CLASS_P (class2
))
33796 || (MMX_CLASS_P (class2
) && MAYBE_FLOAT_CLASS_P (class1
)))
33802 /* Moves between SSE/MMX and integer unit are expensive. */
33803 if (MMX_CLASS_P (class1
) != MMX_CLASS_P (class2
)
33804 || SSE_CLASS_P (class1
) != SSE_CLASS_P (class2
))
33806 /* ??? By keeping returned value relatively high, we limit the number
33807 of moves between integer and MMX/SSE registers for all targets.
33808 Additionally, high value prevents problem with x86_modes_tieable_p(),
33809 where integer modes in MMX/SSE registers are not tieable
33810 because of missing QImode and HImode moves to, from or between
33811 MMX/SSE registers. */
33812 return MAX (8, ix86_cost
->mmxsse_to_integer
);
33814 if (MAYBE_FLOAT_CLASS_P (class1
))
33815 return ix86_cost
->fp_move
;
33816 if (MAYBE_SSE_CLASS_P (class1
))
33817 return ix86_cost
->sse_move
;
33818 if (MAYBE_MMX_CLASS_P (class1
))
33819 return ix86_cost
->mmx_move
;
33823 /* Return TRUE if hard register REGNO can hold a value of machine-mode
33827 ix86_hard_regno_mode_ok (int regno
, enum machine_mode mode
)
33829 /* Flags and only flags can only hold CCmode values. */
33830 if (CC_REGNO_P (regno
))
33831 return GET_MODE_CLASS (mode
) == MODE_CC
;
33832 if (GET_MODE_CLASS (mode
) == MODE_CC
33833 || GET_MODE_CLASS (mode
) == MODE_RANDOM
33834 || GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
33836 if (STACK_REGNO_P (regno
))
33837 return VALID_FP_MODE_P (mode
);
33838 if (SSE_REGNO_P (regno
))
33840 /* We implement the move patterns for all vector modes into and
33841 out of SSE registers, even when no operation instructions
33842 are available. OImode move is available only when AVX is
33844 return ((TARGET_AVX
&& mode
== OImode
)
33845 || VALID_AVX256_REG_MODE (mode
)
33846 || VALID_SSE_REG_MODE (mode
)
33847 || VALID_SSE2_REG_MODE (mode
)
33848 || VALID_MMX_REG_MODE (mode
)
33849 || VALID_MMX_REG_MODE_3DNOW (mode
));
33851 if (MMX_REGNO_P (regno
))
33853 /* We implement the move patterns for 3DNOW modes even in MMX mode,
33854 so if the register is available at all, then we can move data of
33855 the given mode into or out of it. */
33856 return (VALID_MMX_REG_MODE (mode
)
33857 || VALID_MMX_REG_MODE_3DNOW (mode
));
33860 if (mode
== QImode
)
33862 /* Take care for QImode values - they can be in non-QI regs,
33863 but then they do cause partial register stalls. */
33864 if (TARGET_64BIT
|| QI_REGNO_P (regno
))
33866 if (!TARGET_PARTIAL_REG_STALL
)
33868 return !can_create_pseudo_p ();
33870 /* We handle both integer and floats in the general purpose registers. */
33871 else if (VALID_INT_MODE_P (mode
))
33873 else if (VALID_FP_MODE_P (mode
))
33875 else if (VALID_DFP_MODE_P (mode
))
33877 /* Lots of MMX code casts 8 byte vector modes to DImode. If we then go
33878 on to use that value in smaller contexts, this can easily force a
33879 pseudo to be allocated to GENERAL_REGS. Since this is no worse than
33880 supporting DImode, allow it. */
33881 else if (VALID_MMX_REG_MODE_3DNOW (mode
) || VALID_MMX_REG_MODE (mode
))
33887 /* A subroutine of ix86_modes_tieable_p. Return true if MODE is a
33888 tieable integer mode. */
33891 ix86_tieable_integer_mode_p (enum machine_mode mode
)
33900 return TARGET_64BIT
|| !TARGET_PARTIAL_REG_STALL
;
33903 return TARGET_64BIT
;
33910 /* Return true if MODE1 is accessible in a register that can hold MODE2
33911 without copying. That is, all register classes that can hold MODE2
33912 can also hold MODE1. */
33915 ix86_modes_tieable_p (enum machine_mode mode1
, enum machine_mode mode2
)
33917 if (mode1
== mode2
)
33920 if (ix86_tieable_integer_mode_p (mode1
)
33921 && ix86_tieable_integer_mode_p (mode2
))
33924 /* MODE2 being XFmode implies fp stack or general regs, which means we
33925 can tie any smaller floating point modes to it. Note that we do not
33926 tie this with TFmode. */
33927 if (mode2
== XFmode
)
33928 return mode1
== SFmode
|| mode1
== DFmode
;
33930 /* MODE2 being DFmode implies fp stack, general or sse regs, which means
33931 that we can tie it with SFmode. */
33932 if (mode2
== DFmode
)
33933 return mode1
== SFmode
;
33935 /* If MODE2 is only appropriate for an SSE register, then tie with
33936 any other mode acceptable to SSE registers. */
33937 if (GET_MODE_SIZE (mode2
) == 32
33938 && ix86_hard_regno_mode_ok (FIRST_SSE_REG
, mode2
))
33939 return (GET_MODE_SIZE (mode1
) == 32
33940 && ix86_hard_regno_mode_ok (FIRST_SSE_REG
, mode1
));
33941 if (GET_MODE_SIZE (mode2
) == 16
33942 && ix86_hard_regno_mode_ok (FIRST_SSE_REG
, mode2
))
33943 return (GET_MODE_SIZE (mode1
) == 16
33944 && ix86_hard_regno_mode_ok (FIRST_SSE_REG
, mode1
));
33946 /* If MODE2 is appropriate for an MMX register, then tie
33947 with any other mode acceptable to MMX registers. */
33948 if (GET_MODE_SIZE (mode2
) == 8
33949 && ix86_hard_regno_mode_ok (FIRST_MMX_REG
, mode2
))
33950 return (GET_MODE_SIZE (mode1
) == 8
33951 && ix86_hard_regno_mode_ok (FIRST_MMX_REG
, mode1
));
33956 /* Return the cost of moving between two registers of mode MODE. */
33959 ix86_set_reg_reg_cost (enum machine_mode mode
)
33961 unsigned int units
= UNITS_PER_WORD
;
33963 switch (GET_MODE_CLASS (mode
))
33969 units
= GET_MODE_SIZE (CCmode
);
33973 if ((TARGET_SSE
&& mode
== TFmode
)
33974 || (TARGET_80387
&& mode
== XFmode
)
33975 || ((TARGET_80387
|| TARGET_SSE2
) && mode
== DFmode
)
33976 || ((TARGET_80387
|| TARGET_SSE
) && mode
== SFmode
))
33977 units
= GET_MODE_SIZE (mode
);
33980 case MODE_COMPLEX_FLOAT
:
33981 if ((TARGET_SSE
&& mode
== TCmode
)
33982 || (TARGET_80387
&& mode
== XCmode
)
33983 || ((TARGET_80387
|| TARGET_SSE2
) && mode
== DCmode
)
33984 || ((TARGET_80387
|| TARGET_SSE
) && mode
== SCmode
))
33985 units
= GET_MODE_SIZE (mode
);
33988 case MODE_VECTOR_INT
:
33989 case MODE_VECTOR_FLOAT
:
33990 if ((TARGET_AVX
&& VALID_AVX256_REG_MODE (mode
))
33991 || (TARGET_SSE2
&& VALID_SSE2_REG_MODE (mode
))
33992 || (TARGET_SSE
&& VALID_SSE_REG_MODE (mode
))
33993 || (TARGET_MMX
&& VALID_MMX_REG_MODE (mode
)))
33994 units
= GET_MODE_SIZE (mode
);
33997 /* Return the cost of moving between two registers of mode MODE,
33998 assuming that the move will be in pieces of at most UNITS bytes. */
33999 return COSTS_N_INSNS ((GET_MODE_SIZE (mode
) + units
- 1) / units
);
34002 /* Compute a (partial) cost for rtx X. Return true if the complete
34003 cost has been computed, and false if subexpressions should be
34004 scanned. In either case, *TOTAL contains the cost result. */
34007 ix86_rtx_costs (rtx x
, int code_i
, int outer_code_i
, int opno
, int *total
,
34010 enum rtx_code code
= (enum rtx_code
) code_i
;
34011 enum rtx_code outer_code
= (enum rtx_code
) outer_code_i
;
34012 enum machine_mode mode
= GET_MODE (x
);
34013 const struct processor_costs
*cost
= speed
? ix86_cost
: &ix86_size_cost
;
34018 if (register_operand (SET_DEST (x
), VOIDmode
)
34019 && reg_or_0_operand (SET_SRC (x
), VOIDmode
))
34021 *total
= ix86_set_reg_reg_cost (GET_MODE (SET_DEST (x
)));
34030 if (TARGET_64BIT
&& !x86_64_immediate_operand (x
, VOIDmode
))
34032 else if (TARGET_64BIT
&& !x86_64_zext_immediate_operand (x
, VOIDmode
))
34034 else if (flag_pic
&& SYMBOLIC_CONST (x
)
34036 || (!GET_CODE (x
) != LABEL_REF
34037 && (GET_CODE (x
) != SYMBOL_REF
34038 || !SYMBOL_REF_LOCAL_P (x
)))))
34045 if (mode
== VOIDmode
)
34050 switch (standard_80387_constant_p (x
))
34055 default: /* Other constants */
34062 if (SSE_FLOAT_MODE_P (mode
))
34065 switch (standard_sse_constant_p (x
))
34069 case 1: /* 0: xor eliminates false dependency */
34072 default: /* -1: cmp contains false dependency */
34077 /* Fall back to (MEM (SYMBOL_REF)), since that's where
34078 it'll probably end up. Add a penalty for size. */
34079 *total
= (COSTS_N_INSNS (1)
34080 + (flag_pic
!= 0 && !TARGET_64BIT
)
34081 + (mode
== SFmode
? 0 : mode
== DFmode
? 1 : 2));
34085 /* The zero extensions is often completely free on x86_64, so make
34086 it as cheap as possible. */
34087 if (TARGET_64BIT
&& mode
== DImode
34088 && GET_MODE (XEXP (x
, 0)) == SImode
)
34090 else if (TARGET_ZERO_EXTEND_WITH_AND
)
34091 *total
= cost
->add
;
34093 *total
= cost
->movzx
;
34097 *total
= cost
->movsx
;
34101 if (SCALAR_INT_MODE_P (mode
)
34102 && GET_MODE_SIZE (mode
) < UNITS_PER_WORD
34103 && CONST_INT_P (XEXP (x
, 1)))
34105 HOST_WIDE_INT value
= INTVAL (XEXP (x
, 1));
34108 *total
= cost
->add
;
34111 if ((value
== 2 || value
== 3)
34112 && cost
->lea
<= cost
->shift_const
)
34114 *total
= cost
->lea
;
34124 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
34126 /* ??? Should be SSE vector operation cost. */
34127 /* At least for published AMD latencies, this really is the same
34128 as the latency for a simple fpu operation like fabs. */
34129 /* V*QImode is emulated with 1-11 insns. */
34130 if (mode
== V16QImode
|| mode
== V32QImode
)
34133 if (TARGET_XOP
&& mode
== V16QImode
)
34135 /* For XOP we use vpshab, which requires a broadcast of the
34136 value to the variable shift insn. For constants this
34137 means a V16Q const in mem; even when we can perform the
34138 shift with one insn set the cost to prefer paddb. */
34139 if (CONSTANT_P (XEXP (x
, 1)))
34141 *total
= (cost
->fabs
34142 + rtx_cost (XEXP (x
, 0), code
, 0, speed
)
34143 + (speed
? 2 : COSTS_N_BYTES (16)));
34148 else if (TARGET_SSSE3
)
34150 *total
= cost
->fabs
* count
;
34153 *total
= cost
->fabs
;
34155 else if (GET_MODE_SIZE (mode
) > UNITS_PER_WORD
)
34157 if (CONST_INT_P (XEXP (x
, 1)))
34159 if (INTVAL (XEXP (x
, 1)) > 32)
34160 *total
= cost
->shift_const
+ COSTS_N_INSNS (2);
34162 *total
= cost
->shift_const
* 2;
34166 if (GET_CODE (XEXP (x
, 1)) == AND
)
34167 *total
= cost
->shift_var
* 2;
34169 *total
= cost
->shift_var
* 6 + COSTS_N_INSNS (2);
34174 if (CONST_INT_P (XEXP (x
, 1)))
34175 *total
= cost
->shift_const
;
34177 *total
= cost
->shift_var
;
34185 gcc_assert (FLOAT_MODE_P (mode
));
34186 gcc_assert (TARGET_FMA
|| TARGET_FMA4
);
34188 /* ??? SSE scalar/vector cost should be used here. */
34189 /* ??? Bald assumption that fma has the same cost as fmul. */
34190 *total
= cost
->fmul
;
34191 *total
+= rtx_cost (XEXP (x
, 1), FMA
, 1, speed
);
34193 /* Negate in op0 or op2 is free: FMS, FNMA, FNMS. */
34195 if (GET_CODE (sub
) == NEG
)
34196 sub
= XEXP (sub
, 0);
34197 *total
+= rtx_cost (sub
, FMA
, 0, speed
);
34200 if (GET_CODE (sub
) == NEG
)
34201 sub
= XEXP (sub
, 0);
34202 *total
+= rtx_cost (sub
, FMA
, 2, speed
);
34207 if (SSE_FLOAT_MODE_P (mode
) && TARGET_SSE_MATH
)
34209 /* ??? SSE scalar cost should be used here. */
34210 *total
= cost
->fmul
;
34213 else if (X87_FLOAT_MODE_P (mode
))
34215 *total
= cost
->fmul
;
34218 else if (FLOAT_MODE_P (mode
))
34220 /* ??? SSE vector cost should be used here. */
34221 *total
= cost
->fmul
;
34224 else if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
34226 /* V*QImode is emulated with 7-13 insns. */
34227 if (mode
== V16QImode
|| mode
== V32QImode
)
34230 if (TARGET_XOP
&& mode
== V16QImode
)
34232 else if (TARGET_SSSE3
)
34234 *total
= cost
->fmul
* 2 + cost
->fabs
* extra
;
34236 /* V*DImode is emulated with 5-8 insns. */
34237 else if (mode
== V2DImode
|| mode
== V4DImode
)
34239 if (TARGET_XOP
&& mode
== V2DImode
)
34240 *total
= cost
->fmul
* 2 + cost
->fabs
* 3;
34242 *total
= cost
->fmul
* 3 + cost
->fabs
* 5;
34244 /* Without sse4.1, we don't have PMULLD; it's emulated with 7
34245 insns, including two PMULUDQ. */
34246 else if (mode
== V4SImode
&& !(TARGET_SSE4_1
|| TARGET_AVX
))
34247 *total
= cost
->fmul
* 2 + cost
->fabs
* 5;
34249 *total
= cost
->fmul
;
34254 rtx op0
= XEXP (x
, 0);
34255 rtx op1
= XEXP (x
, 1);
34257 if (CONST_INT_P (XEXP (x
, 1)))
34259 unsigned HOST_WIDE_INT value
= INTVAL (XEXP (x
, 1));
34260 for (nbits
= 0; value
!= 0; value
&= value
- 1)
34264 /* This is arbitrary. */
34267 /* Compute costs correctly for widening multiplication. */
34268 if ((GET_CODE (op0
) == SIGN_EXTEND
|| GET_CODE (op0
) == ZERO_EXTEND
)
34269 && GET_MODE_SIZE (GET_MODE (XEXP (op0
, 0))) * 2
34270 == GET_MODE_SIZE (mode
))
34272 int is_mulwiden
= 0;
34273 enum machine_mode inner_mode
= GET_MODE (op0
);
34275 if (GET_CODE (op0
) == GET_CODE (op1
))
34276 is_mulwiden
= 1, op1
= XEXP (op1
, 0);
34277 else if (CONST_INT_P (op1
))
34279 if (GET_CODE (op0
) == SIGN_EXTEND
)
34280 is_mulwiden
= trunc_int_for_mode (INTVAL (op1
), inner_mode
)
34283 is_mulwiden
= !(INTVAL (op1
) & ~GET_MODE_MASK (inner_mode
));
34287 op0
= XEXP (op0
, 0), mode
= GET_MODE (op0
);
34290 *total
= (cost
->mult_init
[MODE_INDEX (mode
)]
34291 + nbits
* cost
->mult_bit
34292 + rtx_cost (op0
, outer_code
, opno
, speed
)
34293 + rtx_cost (op1
, outer_code
, opno
, speed
));
34302 if (SSE_FLOAT_MODE_P (mode
) && TARGET_SSE_MATH
)
34303 /* ??? SSE cost should be used here. */
34304 *total
= cost
->fdiv
;
34305 else if (X87_FLOAT_MODE_P (mode
))
34306 *total
= cost
->fdiv
;
34307 else if (FLOAT_MODE_P (mode
))
34308 /* ??? SSE vector cost should be used here. */
34309 *total
= cost
->fdiv
;
34311 *total
= cost
->divide
[MODE_INDEX (mode
)];
34315 if (GET_MODE_CLASS (mode
) == MODE_INT
34316 && GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
34318 if (GET_CODE (XEXP (x
, 0)) == PLUS
34319 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == MULT
34320 && CONST_INT_P (XEXP (XEXP (XEXP (x
, 0), 0), 1))
34321 && CONSTANT_P (XEXP (x
, 1)))
34323 HOST_WIDE_INT val
= INTVAL (XEXP (XEXP (XEXP (x
, 0), 0), 1));
34324 if (val
== 2 || val
== 4 || val
== 8)
34326 *total
= cost
->lea
;
34327 *total
+= rtx_cost (XEXP (XEXP (x
, 0), 1),
34328 outer_code
, opno
, speed
);
34329 *total
+= rtx_cost (XEXP (XEXP (XEXP (x
, 0), 0), 0),
34330 outer_code
, opno
, speed
);
34331 *total
+= rtx_cost (XEXP (x
, 1), outer_code
, opno
, speed
);
34335 else if (GET_CODE (XEXP (x
, 0)) == MULT
34336 && CONST_INT_P (XEXP (XEXP (x
, 0), 1)))
34338 HOST_WIDE_INT val
= INTVAL (XEXP (XEXP (x
, 0), 1));
34339 if (val
== 2 || val
== 4 || val
== 8)
34341 *total
= cost
->lea
;
34342 *total
+= rtx_cost (XEXP (XEXP (x
, 0), 0),
34343 outer_code
, opno
, speed
);
34344 *total
+= rtx_cost (XEXP (x
, 1), outer_code
, opno
, speed
);
34348 else if (GET_CODE (XEXP (x
, 0)) == PLUS
)
34350 *total
= cost
->lea
;
34351 *total
+= rtx_cost (XEXP (XEXP (x
, 0), 0),
34352 outer_code
, opno
, speed
);
34353 *total
+= rtx_cost (XEXP (XEXP (x
, 0), 1),
34354 outer_code
, opno
, speed
);
34355 *total
+= rtx_cost (XEXP (x
, 1), outer_code
, opno
, speed
);
34362 if (SSE_FLOAT_MODE_P (mode
) && TARGET_SSE_MATH
)
34364 /* ??? SSE cost should be used here. */
34365 *total
= cost
->fadd
;
34368 else if (X87_FLOAT_MODE_P (mode
))
34370 *total
= cost
->fadd
;
34373 else if (FLOAT_MODE_P (mode
))
34375 /* ??? SSE vector cost should be used here. */
34376 *total
= cost
->fadd
;
34384 if (GET_MODE_CLASS (mode
) == MODE_INT
34385 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
)
34387 *total
= (cost
->add
* 2
34388 + (rtx_cost (XEXP (x
, 0), outer_code
, opno
, speed
)
34389 << (GET_MODE (XEXP (x
, 0)) != DImode
))
34390 + (rtx_cost (XEXP (x
, 1), outer_code
, opno
, speed
)
34391 << (GET_MODE (XEXP (x
, 1)) != DImode
)));
34397 if (SSE_FLOAT_MODE_P (mode
) && TARGET_SSE_MATH
)
34399 /* ??? SSE cost should be used here. */
34400 *total
= cost
->fchs
;
34403 else if (X87_FLOAT_MODE_P (mode
))
34405 *total
= cost
->fchs
;
34408 else if (FLOAT_MODE_P (mode
))
34410 /* ??? SSE vector cost should be used here. */
34411 *total
= cost
->fchs
;
34417 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
34419 /* ??? Should be SSE vector operation cost. */
34420 /* At least for published AMD latencies, this really is the same
34421 as the latency for a simple fpu operation like fabs. */
34422 *total
= cost
->fabs
;
34424 else if (GET_MODE_SIZE (mode
) > UNITS_PER_WORD
)
34425 *total
= cost
->add
* 2;
34427 *total
= cost
->add
;
34431 if (GET_CODE (XEXP (x
, 0)) == ZERO_EXTRACT
34432 && XEXP (XEXP (x
, 0), 1) == const1_rtx
34433 && CONST_INT_P (XEXP (XEXP (x
, 0), 2))
34434 && XEXP (x
, 1) == const0_rtx
)
34436 /* This kind of construct is implemented using test[bwl].
34437 Treat it as if we had an AND. */
34438 *total
= (cost
->add
34439 + rtx_cost (XEXP (XEXP (x
, 0), 0), outer_code
, opno
, speed
)
34440 + rtx_cost (const1_rtx
, outer_code
, opno
, speed
));
34446 if (!(SSE_FLOAT_MODE_P (mode
) && TARGET_SSE_MATH
))
34451 if (SSE_FLOAT_MODE_P (mode
) && TARGET_SSE_MATH
)
34452 /* ??? SSE cost should be used here. */
34453 *total
= cost
->fabs
;
34454 else if (X87_FLOAT_MODE_P (mode
))
34455 *total
= cost
->fabs
;
34456 else if (FLOAT_MODE_P (mode
))
34457 /* ??? SSE vector cost should be used here. */
34458 *total
= cost
->fabs
;
34462 if (SSE_FLOAT_MODE_P (mode
) && TARGET_SSE_MATH
)
34463 /* ??? SSE cost should be used here. */
34464 *total
= cost
->fsqrt
;
34465 else if (X87_FLOAT_MODE_P (mode
))
34466 *total
= cost
->fsqrt
;
34467 else if (FLOAT_MODE_P (mode
))
34468 /* ??? SSE vector cost should be used here. */
34469 *total
= cost
->fsqrt
;
34473 if (XINT (x
, 1) == UNSPEC_TP
)
34480 case VEC_DUPLICATE
:
34481 /* ??? Assume all of these vector manipulation patterns are
34482 recognizable. In which case they all pretty much have the
34484 *total
= cost
->fabs
;
34494 static int current_machopic_label_num
;
34496 /* Given a symbol name and its associated stub, write out the
34497 definition of the stub. */
34500 machopic_output_stub (FILE *file
, const char *symb
, const char *stub
)
34502 unsigned int length
;
34503 char *binder_name
, *symbol_name
, lazy_ptr_name
[32];
34504 int label
= ++current_machopic_label_num
;
34506 /* For 64-bit we shouldn't get here. */
34507 gcc_assert (!TARGET_64BIT
);
34509 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
34510 symb
= targetm
.strip_name_encoding (symb
);
34512 length
= strlen (stub
);
34513 binder_name
= XALLOCAVEC (char, length
+ 32);
34514 GEN_BINDER_NAME_FOR_STUB (binder_name
, stub
, length
);
34516 length
= strlen (symb
);
34517 symbol_name
= XALLOCAVEC (char, length
+ 32);
34518 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name
, symb
, length
);
34520 sprintf (lazy_ptr_name
, "L%d$lz", label
);
34522 if (MACHOPIC_ATT_STUB
)
34523 switch_to_section (darwin_sections
[machopic_picsymbol_stub3_section
]);
34524 else if (MACHOPIC_PURE
)
34525 switch_to_section (darwin_sections
[machopic_picsymbol_stub2_section
]);
34527 switch_to_section (darwin_sections
[machopic_symbol_stub_section
]);
34529 fprintf (file
, "%s:\n", stub
);
34530 fprintf (file
, "\t.indirect_symbol %s\n", symbol_name
);
34532 if (MACHOPIC_ATT_STUB
)
34534 fprintf (file
, "\thlt ; hlt ; hlt ; hlt ; hlt\n");
34536 else if (MACHOPIC_PURE
)
34539 /* 25-byte PIC stub using "CALL get_pc_thunk". */
34540 rtx tmp
= gen_rtx_REG (SImode
, 2 /* ECX */);
34541 output_set_got (tmp
, NULL_RTX
); /* "CALL ___<cpu>.get_pc_thunk.cx". */
34542 fprintf (file
, "LPC$%d:\tmovl\t%s-LPC$%d(%%ecx),%%ecx\n",
34543 label
, lazy_ptr_name
, label
);
34544 fprintf (file
, "\tjmp\t*%%ecx\n");
34547 fprintf (file
, "\tjmp\t*%s\n", lazy_ptr_name
);
34549 /* The AT&T-style ("self-modifying") stub is not lazily bound, thus
34550 it needs no stub-binding-helper. */
34551 if (MACHOPIC_ATT_STUB
)
34554 fprintf (file
, "%s:\n", binder_name
);
34558 fprintf (file
, "\tlea\t%s-%s(%%ecx),%%ecx\n", lazy_ptr_name
, binder_name
);
34559 fprintf (file
, "\tpushl\t%%ecx\n");
34562 fprintf (file
, "\tpushl\t$%s\n", lazy_ptr_name
);
34564 fputs ("\tjmp\tdyld_stub_binding_helper\n", file
);
34566 /* N.B. Keep the correspondence of these
34567 'symbol_ptr/symbol_ptr2/symbol_ptr3' sections consistent with the
34568 old-pic/new-pic/non-pic stubs; altering this will break
34569 compatibility with existing dylibs. */
34572 /* 25-byte PIC stub using "CALL get_pc_thunk". */
34573 switch_to_section (darwin_sections
[machopic_lazy_symbol_ptr2_section
]);
34576 /* 16-byte -mdynamic-no-pic stub. */
34577 switch_to_section(darwin_sections
[machopic_lazy_symbol_ptr3_section
]);
34579 fprintf (file
, "%s:\n", lazy_ptr_name
);
34580 fprintf (file
, "\t.indirect_symbol %s\n", symbol_name
);
34581 fprintf (file
, ASM_LONG
"%s\n", binder_name
);
34583 #endif /* TARGET_MACHO */
34585 /* Order the registers for register allocator. */
34588 x86_order_regs_for_local_alloc (void)
34593 /* First allocate the local general purpose registers. */
34594 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
34595 if (GENERAL_REGNO_P (i
) && call_used_regs
[i
])
34596 reg_alloc_order
[pos
++] = i
;
34598 /* Global general purpose registers. */
34599 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
34600 if (GENERAL_REGNO_P (i
) && !call_used_regs
[i
])
34601 reg_alloc_order
[pos
++] = i
;
34603 /* x87 registers come first in case we are doing FP math
34605 if (!TARGET_SSE_MATH
)
34606 for (i
= FIRST_STACK_REG
; i
<= LAST_STACK_REG
; i
++)
34607 reg_alloc_order
[pos
++] = i
;
34609 /* SSE registers. */
34610 for (i
= FIRST_SSE_REG
; i
<= LAST_SSE_REG
; i
++)
34611 reg_alloc_order
[pos
++] = i
;
34612 for (i
= FIRST_REX_SSE_REG
; i
<= LAST_REX_SSE_REG
; i
++)
34613 reg_alloc_order
[pos
++] = i
;
34615 /* x87 registers. */
34616 if (TARGET_SSE_MATH
)
34617 for (i
= FIRST_STACK_REG
; i
<= LAST_STACK_REG
; i
++)
34618 reg_alloc_order
[pos
++] = i
;
34620 for (i
= FIRST_MMX_REG
; i
<= LAST_MMX_REG
; i
++)
34621 reg_alloc_order
[pos
++] = i
;
34623 /* Initialize the rest of array as we do not allocate some registers
34625 while (pos
< FIRST_PSEUDO_REGISTER
)
34626 reg_alloc_order
[pos
++] = 0;
34629 /* Handle a "callee_pop_aggregate_return" attribute; arguments as
34630 in struct attribute_spec handler. */
34632 ix86_handle_callee_pop_aggregate_return (tree
*node
, tree name
,
34634 int flags ATTRIBUTE_UNUSED
,
34635 bool *no_add_attrs
)
34637 if (TREE_CODE (*node
) != FUNCTION_TYPE
34638 && TREE_CODE (*node
) != METHOD_TYPE
34639 && TREE_CODE (*node
) != FIELD_DECL
34640 && TREE_CODE (*node
) != TYPE_DECL
)
34642 warning (OPT_Wattributes
, "%qE attribute only applies to functions",
34644 *no_add_attrs
= true;
34649 warning (OPT_Wattributes
, "%qE attribute only available for 32-bit",
34651 *no_add_attrs
= true;
34654 if (is_attribute_p ("callee_pop_aggregate_return", name
))
34658 cst
= TREE_VALUE (args
);
34659 if (TREE_CODE (cst
) != INTEGER_CST
)
34661 warning (OPT_Wattributes
,
34662 "%qE attribute requires an integer constant argument",
34664 *no_add_attrs
= true;
34666 else if (compare_tree_int (cst
, 0) != 0
34667 && compare_tree_int (cst
, 1) != 0)
34669 warning (OPT_Wattributes
,
34670 "argument to %qE attribute is neither zero, nor one",
34672 *no_add_attrs
= true;
34681 /* Handle a "ms_abi" or "sysv" attribute; arguments as in
34682 struct attribute_spec.handler. */
34684 ix86_handle_abi_attribute (tree
*node
, tree name
,
34685 tree args ATTRIBUTE_UNUSED
,
34686 int flags ATTRIBUTE_UNUSED
, bool *no_add_attrs
)
34688 if (TREE_CODE (*node
) != FUNCTION_TYPE
34689 && TREE_CODE (*node
) != METHOD_TYPE
34690 && TREE_CODE (*node
) != FIELD_DECL
34691 && TREE_CODE (*node
) != TYPE_DECL
)
34693 warning (OPT_Wattributes
, "%qE attribute only applies to functions",
34695 *no_add_attrs
= true;
34699 /* Can combine regparm with all attributes but fastcall. */
34700 if (is_attribute_p ("ms_abi", name
))
34702 if (lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (*node
)))
34704 error ("ms_abi and sysv_abi attributes are not compatible");
34709 else if (is_attribute_p ("sysv_abi", name
))
34711 if (lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (*node
)))
34713 error ("ms_abi and sysv_abi attributes are not compatible");
34722 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
34723 struct attribute_spec.handler. */
34725 ix86_handle_struct_attribute (tree
*node
, tree name
,
34726 tree args ATTRIBUTE_UNUSED
,
34727 int flags ATTRIBUTE_UNUSED
, bool *no_add_attrs
)
34730 if (DECL_P (*node
))
34732 if (TREE_CODE (*node
) == TYPE_DECL
)
34733 type
= &TREE_TYPE (*node
);
34738 if (!(type
&& RECORD_OR_UNION_TYPE_P (*type
)))
34740 warning (OPT_Wattributes
, "%qE attribute ignored",
34742 *no_add_attrs
= true;
34745 else if ((is_attribute_p ("ms_struct", name
)
34746 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type
)))
34747 || ((is_attribute_p ("gcc_struct", name
)
34748 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type
)))))
34750 warning (OPT_Wattributes
, "%qE incompatible attribute ignored",
34752 *no_add_attrs
= true;
34759 ix86_handle_fndecl_attribute (tree
*node
, tree name
,
34760 tree args ATTRIBUTE_UNUSED
,
34761 int flags ATTRIBUTE_UNUSED
, bool *no_add_attrs
)
34763 if (TREE_CODE (*node
) != FUNCTION_DECL
)
34765 warning (OPT_Wattributes
, "%qE attribute only applies to functions",
34767 *no_add_attrs
= true;
34773 ix86_ms_bitfield_layout_p (const_tree record_type
)
34775 return ((TARGET_MS_BITFIELD_LAYOUT
34776 && !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type
)))
34777 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type
)));
34780 /* Returns an expression indicating where the this parameter is
34781 located on entry to the FUNCTION. */
34784 x86_this_parameter (tree function
)
34786 tree type
= TREE_TYPE (function
);
34787 bool aggr
= aggregate_value_p (TREE_TYPE (type
), type
) != 0;
34792 const int *parm_regs
;
34794 if (ix86_function_type_abi (type
) == MS_ABI
)
34795 parm_regs
= x86_64_ms_abi_int_parameter_registers
;
34797 parm_regs
= x86_64_int_parameter_registers
;
34798 return gen_rtx_REG (Pmode
, parm_regs
[aggr
]);
34801 nregs
= ix86_function_regparm (type
, function
);
34803 if (nregs
> 0 && !stdarg_p (type
))
34806 unsigned int ccvt
= ix86_get_callcvt (type
);
34808 if ((ccvt
& IX86_CALLCVT_FASTCALL
) != 0)
34809 regno
= aggr
? DX_REG
: CX_REG
;
34810 else if ((ccvt
& IX86_CALLCVT_THISCALL
) != 0)
34814 return gen_rtx_MEM (SImode
,
34815 plus_constant (Pmode
, stack_pointer_rtx
, 4));
34824 return gen_rtx_MEM (SImode
,
34825 plus_constant (Pmode
,
34826 stack_pointer_rtx
, 4));
34829 return gen_rtx_REG (SImode
, regno
);
34832 return gen_rtx_MEM (SImode
, plus_constant (Pmode
, stack_pointer_rtx
,
34836 /* Determine whether x86_output_mi_thunk can succeed. */
34839 x86_can_output_mi_thunk (const_tree thunk ATTRIBUTE_UNUSED
,
34840 HOST_WIDE_INT delta ATTRIBUTE_UNUSED
,
34841 HOST_WIDE_INT vcall_offset
, const_tree function
)
34843 /* 64-bit can handle anything. */
34847 /* For 32-bit, everything's fine if we have one free register. */
34848 if (ix86_function_regparm (TREE_TYPE (function
), function
) < 3)
34851 /* Need a free register for vcall_offset. */
34855 /* Need a free register for GOT references. */
34856 if (flag_pic
&& !targetm
.binds_local_p (function
))
34859 /* Otherwise ok. */
34863 /* Output the assembler code for a thunk function. THUNK_DECL is the
34864 declaration for the thunk function itself, FUNCTION is the decl for
34865 the target function. DELTA is an immediate constant offset to be
34866 added to THIS. If VCALL_OFFSET is nonzero, the word at
34867 *(*this + vcall_offset) should be added to THIS. */
34870 x86_output_mi_thunk (FILE *file
,
34871 tree thunk ATTRIBUTE_UNUSED
, HOST_WIDE_INT delta
,
34872 HOST_WIDE_INT vcall_offset
, tree function
)
34874 rtx this_param
= x86_this_parameter (function
);
34875 rtx this_reg
, tmp
, fnaddr
;
34876 unsigned int tmp_regno
;
34879 tmp_regno
= R10_REG
;
34882 unsigned int ccvt
= ix86_get_callcvt (TREE_TYPE (function
));
34883 if ((ccvt
& IX86_CALLCVT_FASTCALL
) != 0)
34884 tmp_regno
= AX_REG
;
34885 else if ((ccvt
& IX86_CALLCVT_THISCALL
) != 0)
34886 tmp_regno
= DX_REG
;
34888 tmp_regno
= CX_REG
;
34891 emit_note (NOTE_INSN_PROLOGUE_END
);
34893 /* If VCALL_OFFSET, we'll need THIS in a register. Might as well
34894 pull it in now and let DELTA benefit. */
34895 if (REG_P (this_param
))
34896 this_reg
= this_param
;
34897 else if (vcall_offset
)
34899 /* Put the this parameter into %eax. */
34900 this_reg
= gen_rtx_REG (Pmode
, AX_REG
);
34901 emit_move_insn (this_reg
, this_param
);
34904 this_reg
= NULL_RTX
;
34906 /* Adjust the this parameter by a fixed constant. */
34909 rtx delta_rtx
= GEN_INT (delta
);
34910 rtx delta_dst
= this_reg
? this_reg
: this_param
;
34914 if (!x86_64_general_operand (delta_rtx
, Pmode
))
34916 tmp
= gen_rtx_REG (Pmode
, tmp_regno
);
34917 emit_move_insn (tmp
, delta_rtx
);
34922 ix86_emit_binop (PLUS
, Pmode
, delta_dst
, delta_rtx
);
34925 /* Adjust the this parameter by a value stored in the vtable. */
34928 rtx vcall_addr
, vcall_mem
, this_mem
;
34930 tmp
= gen_rtx_REG (Pmode
, tmp_regno
);
34932 this_mem
= gen_rtx_MEM (ptr_mode
, this_reg
);
34933 if (Pmode
!= ptr_mode
)
34934 this_mem
= gen_rtx_ZERO_EXTEND (Pmode
, this_mem
);
34935 emit_move_insn (tmp
, this_mem
);
34937 /* Adjust the this parameter. */
34938 vcall_addr
= plus_constant (Pmode
, tmp
, vcall_offset
);
34940 && !ix86_legitimate_address_p (ptr_mode
, vcall_addr
, true))
34942 rtx tmp2
= gen_rtx_REG (Pmode
, R11_REG
);
34943 emit_move_insn (tmp2
, GEN_INT (vcall_offset
));
34944 vcall_addr
= gen_rtx_PLUS (Pmode
, tmp
, tmp2
);
34947 vcall_mem
= gen_rtx_MEM (ptr_mode
, vcall_addr
);
34948 if (Pmode
!= ptr_mode
)
34949 emit_insn (gen_addsi_1_zext (this_reg
,
34950 gen_rtx_REG (ptr_mode
,
34954 ix86_emit_binop (PLUS
, Pmode
, this_reg
, vcall_mem
);
34957 /* If necessary, drop THIS back to its stack slot. */
34958 if (this_reg
&& this_reg
!= this_param
)
34959 emit_move_insn (this_param
, this_reg
);
34961 fnaddr
= XEXP (DECL_RTL (function
), 0);
34964 if (!flag_pic
|| targetm
.binds_local_p (function
)
34965 || cfun
->machine
->call_abi
== MS_ABI
)
34969 tmp
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, fnaddr
), UNSPEC_GOTPCREL
);
34970 tmp
= gen_rtx_CONST (Pmode
, tmp
);
34971 fnaddr
= gen_rtx_MEM (Pmode
, tmp
);
34976 if (!flag_pic
|| targetm
.binds_local_p (function
))
34979 else if (TARGET_MACHO
)
34981 fnaddr
= machopic_indirect_call_target (DECL_RTL (function
));
34982 fnaddr
= XEXP (fnaddr
, 0);
34984 #endif /* TARGET_MACHO */
34987 tmp
= gen_rtx_REG (Pmode
, CX_REG
);
34988 output_set_got (tmp
, NULL_RTX
);
34990 fnaddr
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, fnaddr
), UNSPEC_GOT
);
34991 fnaddr
= gen_rtx_PLUS (Pmode
, fnaddr
, tmp
);
34992 fnaddr
= gen_rtx_MEM (Pmode
, fnaddr
);
34996 /* Our sibling call patterns do not allow memories, because we have no
34997 predicate that can distinguish between frame and non-frame memory.
34998 For our purposes here, we can get away with (ab)using a jump pattern,
34999 because we're going to do no optimization. */
35000 if (MEM_P (fnaddr
))
35001 emit_jump_insn (gen_indirect_jump (fnaddr
));
35004 if (ix86_cmodel
== CM_LARGE_PIC
&& SYMBOLIC_CONST (fnaddr
))
35005 fnaddr
= legitimize_pic_address (fnaddr
,
35006 gen_rtx_REG (Pmode
, tmp_regno
));
35008 if (!sibcall_insn_operand (fnaddr
, word_mode
))
35010 tmp
= gen_rtx_REG (word_mode
, tmp_regno
);
35011 if (GET_MODE (fnaddr
) != word_mode
)
35012 fnaddr
= gen_rtx_ZERO_EXTEND (word_mode
, fnaddr
);
35013 emit_move_insn (tmp
, fnaddr
);
35017 tmp
= gen_rtx_MEM (QImode
, fnaddr
);
35018 tmp
= gen_rtx_CALL (VOIDmode
, tmp
, const0_rtx
);
35019 tmp
= emit_call_insn (tmp
);
35020 SIBLING_CALL_P (tmp
) = 1;
35024 /* Emit just enough of rest_of_compilation to get the insns emitted.
35025 Note that use_thunk calls assemble_start_function et al. */
35026 tmp
= get_insns ();
35027 shorten_branches (tmp
);
35028 final_start_function (tmp
, file
, 1);
35029 final (tmp
, file
, 1);
35030 final_end_function ();
35034 x86_file_start (void)
35036 default_file_start ();
35038 darwin_file_start ();
35040 if (X86_FILE_START_VERSION_DIRECTIVE
)
35041 fputs ("\t.version\t\"01.01\"\n", asm_out_file
);
35042 if (X86_FILE_START_FLTUSED
)
35043 fputs ("\t.global\t__fltused\n", asm_out_file
);
35044 if (ix86_asm_dialect
== ASM_INTEL
)
35045 fputs ("\t.intel_syntax noprefix\n", asm_out_file
);
35049 x86_field_alignment (tree field
, int computed
)
35051 enum machine_mode mode
;
35052 tree type
= TREE_TYPE (field
);
35054 if (TARGET_64BIT
|| TARGET_ALIGN_DOUBLE
)
35056 mode
= TYPE_MODE (strip_array_types (type
));
35057 if (mode
== DFmode
|| mode
== DCmode
35058 || GET_MODE_CLASS (mode
) == MODE_INT
35059 || GET_MODE_CLASS (mode
) == MODE_COMPLEX_INT
)
35060 return MIN (32, computed
);
35064 /* Output assembler code to FILE to increment profiler label # LABELNO
35065 for profiling a function entry. */
35067 x86_function_profiler (FILE *file
, int labelno ATTRIBUTE_UNUSED
)
35069 const char *mcount_name
= (flag_fentry
? MCOUNT_NAME_BEFORE_PROLOGUE
35074 #ifndef NO_PROFILE_COUNTERS
35075 fprintf (file
, "\tleaq\t%sP%d(%%rip),%%r11\n", LPREFIX
, labelno
);
35078 if (DEFAULT_ABI
== SYSV_ABI
&& flag_pic
)
35079 fprintf (file
, "\tcall\t*%s@GOTPCREL(%%rip)\n", mcount_name
);
35081 fprintf (file
, "\tcall\t%s\n", mcount_name
);
35085 #ifndef NO_PROFILE_COUNTERS
35086 fprintf (file
, "\tleal\t%sP%d@GOTOFF(%%ebx),%%" PROFILE_COUNT_REGISTER
"\n",
35089 fprintf (file
, "\tcall\t*%s@GOT(%%ebx)\n", mcount_name
);
35093 #ifndef NO_PROFILE_COUNTERS
35094 fprintf (file
, "\tmovl\t$%sP%d,%%" PROFILE_COUNT_REGISTER
"\n",
35097 fprintf (file
, "\tcall\t%s\n", mcount_name
);
35101 /* We don't have exact information about the insn sizes, but we may assume
35102 quite safely that we are informed about all 1 byte insns and memory
35103 address sizes. This is enough to eliminate unnecessary padding in
35107 min_insn_size (rtx insn
)
35111 if (!INSN_P (insn
) || !active_insn_p (insn
))
35114 /* Discard alignments we've emit and jump instructions. */
35115 if (GET_CODE (PATTERN (insn
)) == UNSPEC_VOLATILE
35116 && XINT (PATTERN (insn
), 1) == UNSPECV_ALIGN
)
35118 if (JUMP_TABLE_DATA_P (insn
))
35121 /* Important case - calls are always 5 bytes.
35122 It is common to have many calls in the row. */
35124 && symbolic_reference_mentioned_p (PATTERN (insn
))
35125 && !SIBLING_CALL_P (insn
))
35127 len
= get_attr_length (insn
);
35131 /* For normal instructions we rely on get_attr_length being exact,
35132 with a few exceptions. */
35133 if (!JUMP_P (insn
))
35135 enum attr_type type
= get_attr_type (insn
);
35140 if (GET_CODE (PATTERN (insn
)) == ASM_INPUT
35141 || asm_noperands (PATTERN (insn
)) >= 0)
35148 /* Otherwise trust get_attr_length. */
35152 l
= get_attr_length_address (insn
);
35153 if (l
< 4 && symbolic_reference_mentioned_p (PATTERN (insn
)))
35162 #ifdef ASM_OUTPUT_MAX_SKIP_PAD
35164 /* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte
35168 ix86_avoid_jump_mispredicts (void)
35170 rtx insn
, start
= get_insns ();
35171 int nbytes
= 0, njumps
= 0;
35174 /* Look for all minimal intervals of instructions containing 4 jumps.
35175 The intervals are bounded by START and INSN. NBYTES is the total
35176 size of instructions in the interval including INSN and not including
35177 START. When the NBYTES is smaller than 16 bytes, it is possible
35178 that the end of START and INSN ends up in the same 16byte page.
35180 The smallest offset in the page INSN can start is the case where START
35181 ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN).
35182 We add p2align to 16byte window with maxskip 15 - NBYTES + sizeof (INSN).
35184 for (insn
= start
; insn
; insn
= NEXT_INSN (insn
))
35188 if (LABEL_P (insn
))
35190 int align
= label_to_alignment (insn
);
35191 int max_skip
= label_to_max_skip (insn
);
35195 /* If align > 3, only up to 16 - max_skip - 1 bytes can be
35196 already in the current 16 byte page, because otherwise
35197 ASM_OUTPUT_MAX_SKIP_ALIGN could skip max_skip or fewer
35198 bytes to reach 16 byte boundary. */
35200 || (align
<= 3 && max_skip
!= (1 << align
) - 1))
35203 fprintf (dump_file
, "Label %i with max_skip %i\n",
35204 INSN_UID (insn
), max_skip
);
35207 while (nbytes
+ max_skip
>= 16)
35209 start
= NEXT_INSN (start
);
35210 if ((JUMP_P (start
)
35211 && GET_CODE (PATTERN (start
)) != ADDR_VEC
35212 && GET_CODE (PATTERN (start
)) != ADDR_DIFF_VEC
)
35214 njumps
--, isjump
= 1;
35217 nbytes
-= min_insn_size (start
);
35223 min_size
= min_insn_size (insn
);
35224 nbytes
+= min_size
;
35226 fprintf (dump_file
, "Insn %i estimated to %i bytes\n",
35227 INSN_UID (insn
), min_size
);
35229 && GET_CODE (PATTERN (insn
)) != ADDR_VEC
35230 && GET_CODE (PATTERN (insn
)) != ADDR_DIFF_VEC
)
35238 start
= NEXT_INSN (start
);
35239 if ((JUMP_P (start
)
35240 && GET_CODE (PATTERN (start
)) != ADDR_VEC
35241 && GET_CODE (PATTERN (start
)) != ADDR_DIFF_VEC
)
35243 njumps
--, isjump
= 1;
35246 nbytes
-= min_insn_size (start
);
35248 gcc_assert (njumps
>= 0);
35250 fprintf (dump_file
, "Interval %i to %i has %i bytes\n",
35251 INSN_UID (start
), INSN_UID (insn
), nbytes
);
35253 if (njumps
== 3 && isjump
&& nbytes
< 16)
35255 int padsize
= 15 - nbytes
+ min_insn_size (insn
);
35258 fprintf (dump_file
, "Padding insn %i by %i bytes!\n",
35259 INSN_UID (insn
), padsize
);
35260 emit_insn_before (gen_pad (GEN_INT (padsize
)), insn
);
35266 /* AMD Athlon works faster
35267 when RET is not destination of conditional jump or directly preceded
35268 by other jump instruction. We avoid the penalty by inserting NOP just
35269 before the RET instructions in such cases. */
35271 ix86_pad_returns (void)
35276 FOR_EACH_EDGE (e
, ei
, EXIT_BLOCK_PTR
->preds
)
35278 basic_block bb
= e
->src
;
35279 rtx ret
= BB_END (bb
);
35281 bool replace
= false;
35283 if (!JUMP_P (ret
) || !ANY_RETURN_P (PATTERN (ret
))
35284 || optimize_bb_for_size_p (bb
))
35286 for (prev
= PREV_INSN (ret
); prev
; prev
= PREV_INSN (prev
))
35287 if (active_insn_p (prev
) || LABEL_P (prev
))
35289 if (prev
&& LABEL_P (prev
))
35294 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
35295 if (EDGE_FREQUENCY (e
) && e
->src
->index
>= 0
35296 && !(e
->flags
& EDGE_FALLTHRU
))
35301 prev
= prev_active_insn (ret
);
35303 && ((JUMP_P (prev
) && any_condjump_p (prev
))
35306 /* Empty functions get branch mispredict even when
35307 the jump destination is not visible to us. */
35308 if (!prev
&& !optimize_function_for_size_p (cfun
))
35313 emit_jump_insn_before (gen_simple_return_internal_long (), ret
);
35319 /* Count the minimum number of instructions in BB. Return 4 if the
35320 number of instructions >= 4. */
35323 ix86_count_insn_bb (basic_block bb
)
35326 int insn_count
= 0;
35328 /* Count number of instructions in this block. Return 4 if the number
35329 of instructions >= 4. */
35330 FOR_BB_INSNS (bb
, insn
)
35332 /* Only happen in exit blocks. */
35334 && ANY_RETURN_P (PATTERN (insn
)))
35337 if (NONDEBUG_INSN_P (insn
)
35338 && GET_CODE (PATTERN (insn
)) != USE
35339 && GET_CODE (PATTERN (insn
)) != CLOBBER
)
35342 if (insn_count
>= 4)
35351 /* Count the minimum number of instructions in code path in BB.
35352 Return 4 if the number of instructions >= 4. */
35355 ix86_count_insn (basic_block bb
)
35359 int min_prev_count
;
35361 /* Only bother counting instructions along paths with no
35362 more than 2 basic blocks between entry and exit. Given
35363 that BB has an edge to exit, determine if a predecessor
35364 of BB has an edge from entry. If so, compute the number
35365 of instructions in the predecessor block. If there
35366 happen to be multiple such blocks, compute the minimum. */
35367 min_prev_count
= 4;
35368 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
35371 edge_iterator prev_ei
;
35373 if (e
->src
== ENTRY_BLOCK_PTR
)
35375 min_prev_count
= 0;
35378 FOR_EACH_EDGE (prev_e
, prev_ei
, e
->src
->preds
)
35380 if (prev_e
->src
== ENTRY_BLOCK_PTR
)
35382 int count
= ix86_count_insn_bb (e
->src
);
35383 if (count
< min_prev_count
)
35384 min_prev_count
= count
;
35390 if (min_prev_count
< 4)
35391 min_prev_count
+= ix86_count_insn_bb (bb
);
35393 return min_prev_count
;
35396 /* Pad short function to 4 instructions. */
35399 ix86_pad_short_function (void)
35404 FOR_EACH_EDGE (e
, ei
, EXIT_BLOCK_PTR
->preds
)
35406 rtx ret
= BB_END (e
->src
);
35407 if (JUMP_P (ret
) && ANY_RETURN_P (PATTERN (ret
)))
35409 int insn_count
= ix86_count_insn (e
->src
);
35411 /* Pad short function. */
35412 if (insn_count
< 4)
35416 /* Find epilogue. */
35419 || NOTE_KIND (insn
) != NOTE_INSN_EPILOGUE_BEG
))
35420 insn
= PREV_INSN (insn
);
35425 /* Two NOPs count as one instruction. */
35426 insn_count
= 2 * (4 - insn_count
);
35427 emit_insn_before (gen_nops (GEN_INT (insn_count
)), insn
);
35433 /* Implement machine specific optimizations. We implement padding of returns
35434 for K8 CPUs and pass to avoid 4 jumps in the single 16 byte window. */
35438 /* We are freeing block_for_insn in the toplev to keep compatibility
35439 with old MDEP_REORGS that are not CFG based. Recompute it now. */
35440 compute_bb_for_insn ();
35442 if (optimize
&& optimize_function_for_speed_p (cfun
))
35444 if (TARGET_PAD_SHORT_FUNCTION
)
35445 ix86_pad_short_function ();
35446 else if (TARGET_PAD_RETURNS
)
35447 ix86_pad_returns ();
35448 #ifdef ASM_OUTPUT_MAX_SKIP_PAD
35449 if (TARGET_FOUR_JUMP_LIMIT
)
35450 ix86_avoid_jump_mispredicts ();
35455 /* Return nonzero when QImode register that must be represented via REX prefix
35458 x86_extended_QIreg_mentioned_p (rtx insn
)
35461 extract_insn_cached (insn
);
35462 for (i
= 0; i
< recog_data
.n_operands
; i
++)
35463 if (GENERAL_REG_P (recog_data
.operand
[i
])
35464 && !QI_REGNO_P (REGNO (recog_data
.operand
[i
])))
35469 /* Return nonzero when P points to register encoded via REX prefix.
35470 Called via for_each_rtx. */
35472 extended_reg_mentioned_1 (rtx
*p
, void *data ATTRIBUTE_UNUSED
)
35474 unsigned int regno
;
35477 regno
= REGNO (*p
);
35478 return REX_INT_REGNO_P (regno
) || REX_SSE_REGNO_P (regno
);
35481 /* Return true when INSN mentions register that must be encoded using REX
35484 x86_extended_reg_mentioned_p (rtx insn
)
35486 return for_each_rtx (INSN_P (insn
) ? &PATTERN (insn
) : &insn
,
35487 extended_reg_mentioned_1
, NULL
);
35490 /* If profitable, negate (without causing overflow) integer constant
35491 of mode MODE at location LOC. Return true in this case. */
35493 x86_maybe_negate_const_int (rtx
*loc
, enum machine_mode mode
)
35497 if (!CONST_INT_P (*loc
))
35503 /* DImode x86_64 constants must fit in 32 bits. */
35504 gcc_assert (x86_64_immediate_operand (*loc
, mode
));
35515 gcc_unreachable ();
35518 /* Avoid overflows. */
35519 if (mode_signbit_p (mode
, *loc
))
35522 val
= INTVAL (*loc
);
35524 /* Make things pretty and `subl $4,%eax' rather than `addl $-4,%eax'.
35525 Exceptions: -128 encodes smaller than 128, so swap sign and op. */
35526 if ((val
< 0 && val
!= -128)
35529 *loc
= GEN_INT (-val
);
35536 /* Generate an unsigned DImode/SImode to FP conversion. This is the same code
35537 optabs would emit if we didn't have TFmode patterns. */
35540 x86_emit_floatuns (rtx operands
[2])
35542 rtx neglab
, donelab
, i0
, i1
, f0
, in
, out
;
35543 enum machine_mode mode
, inmode
;
35545 inmode
= GET_MODE (operands
[1]);
35546 gcc_assert (inmode
== SImode
|| inmode
== DImode
);
35549 in
= force_reg (inmode
, operands
[1]);
35550 mode
= GET_MODE (out
);
35551 neglab
= gen_label_rtx ();
35552 donelab
= gen_label_rtx ();
35553 f0
= gen_reg_rtx (mode
);
35555 emit_cmp_and_jump_insns (in
, const0_rtx
, LT
, const0_rtx
, inmode
, 0, neglab
);
35557 expand_float (out
, in
, 0);
35559 emit_jump_insn (gen_jump (donelab
));
35562 emit_label (neglab
);
35564 i0
= expand_simple_binop (inmode
, LSHIFTRT
, in
, const1_rtx
, NULL
,
35566 i1
= expand_simple_binop (inmode
, AND
, in
, const1_rtx
, NULL
,
35568 i0
= expand_simple_binop (inmode
, IOR
, i0
, i1
, i0
, 1, OPTAB_DIRECT
);
35570 expand_float (f0
, i0
, 0);
35572 emit_insn (gen_rtx_SET (VOIDmode
, out
, gen_rtx_PLUS (mode
, f0
, f0
)));
35574 emit_label (donelab
);
35577 /* AVX2 does support 32-byte integer vector operations,
35578 thus the longest vector we are faced with is V32QImode. */
35579 #define MAX_VECT_LEN 32
35581 struct expand_vec_perm_d
35583 rtx target
, op0
, op1
;
35584 unsigned char perm
[MAX_VECT_LEN
];
35585 enum machine_mode vmode
;
35586 unsigned char nelt
;
35587 bool one_operand_p
;
35591 static bool canonicalize_perm (struct expand_vec_perm_d
*d
);
35592 static bool expand_vec_perm_1 (struct expand_vec_perm_d
*d
);
35593 static bool expand_vec_perm_broadcast_1 (struct expand_vec_perm_d
*d
);
35595 /* Get a vector mode of the same size as the original but with elements
35596 twice as wide. This is only guaranteed to apply to integral vectors. */
35598 static inline enum machine_mode
35599 get_mode_wider_vector (enum machine_mode o
)
35601 /* ??? Rely on the ordering that genmodes.c gives to vectors. */
35602 enum machine_mode n
= GET_MODE_WIDER_MODE (o
);
35603 gcc_assert (GET_MODE_NUNITS (o
) == GET_MODE_NUNITS (n
) * 2);
35604 gcc_assert (GET_MODE_SIZE (o
) == GET_MODE_SIZE (n
));
35608 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
35609 with all elements equal to VAR. Return true if successful. */
35612 ix86_expand_vector_init_duplicate (bool mmx_ok
, enum machine_mode mode
,
35613 rtx target
, rtx val
)
35636 /* First attempt to recognize VAL as-is. */
35637 dup
= gen_rtx_VEC_DUPLICATE (mode
, val
);
35638 insn
= emit_insn (gen_rtx_SET (VOIDmode
, target
, dup
));
35639 if (recog_memoized (insn
) < 0)
35642 /* If that fails, force VAL into a register. */
35645 XEXP (dup
, 0) = force_reg (GET_MODE_INNER (mode
), val
);
35646 seq
= get_insns ();
35649 emit_insn_before (seq
, insn
);
35651 ok
= recog_memoized (insn
) >= 0;
35660 if (TARGET_SSE
|| TARGET_3DNOW_A
)
35664 val
= gen_lowpart (SImode
, val
);
35665 x
= gen_rtx_TRUNCATE (HImode
, val
);
35666 x
= gen_rtx_VEC_DUPLICATE (mode
, x
);
35667 emit_insn (gen_rtx_SET (VOIDmode
, target
, x
));
35680 struct expand_vec_perm_d dperm
;
35684 memset (&dperm
, 0, sizeof (dperm
));
35685 dperm
.target
= target
;
35686 dperm
.vmode
= mode
;
35687 dperm
.nelt
= GET_MODE_NUNITS (mode
);
35688 dperm
.op0
= dperm
.op1
= gen_reg_rtx (mode
);
35689 dperm
.one_operand_p
= true;
35691 /* Extend to SImode using a paradoxical SUBREG. */
35692 tmp1
= gen_reg_rtx (SImode
);
35693 emit_move_insn (tmp1
, gen_lowpart (SImode
, val
));
35695 /* Insert the SImode value as low element of a V4SImode vector. */
35696 tmp2
= gen_lowpart (V4SImode
, dperm
.op0
);
35697 emit_insn (gen_vec_setv4si_0 (tmp2
, CONST0_RTX (V4SImode
), tmp1
));
35699 ok
= (expand_vec_perm_1 (&dperm
)
35700 || expand_vec_perm_broadcast_1 (&dperm
));
35712 /* Replicate the value once into the next wider mode and recurse. */
35714 enum machine_mode smode
, wsmode
, wvmode
;
35717 smode
= GET_MODE_INNER (mode
);
35718 wvmode
= get_mode_wider_vector (mode
);
35719 wsmode
= GET_MODE_INNER (wvmode
);
35721 val
= convert_modes (wsmode
, smode
, val
, true);
35722 x
= expand_simple_binop (wsmode
, ASHIFT
, val
,
35723 GEN_INT (GET_MODE_BITSIZE (smode
)),
35724 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
35725 val
= expand_simple_binop (wsmode
, IOR
, val
, x
, x
, 1, OPTAB_LIB_WIDEN
);
35727 x
= gen_lowpart (wvmode
, target
);
35728 ok
= ix86_expand_vector_init_duplicate (mmx_ok
, wvmode
, x
, val
);
35736 enum machine_mode hvmode
= (mode
== V16HImode
? V8HImode
: V16QImode
);
35737 rtx x
= gen_reg_rtx (hvmode
);
35739 ok
= ix86_expand_vector_init_duplicate (false, hvmode
, x
, val
);
35742 x
= gen_rtx_VEC_CONCAT (mode
, x
, x
);
35743 emit_insn (gen_rtx_SET (VOIDmode
, target
, x
));
35752 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
35753 whose ONE_VAR element is VAR, and other elements are zero. Return true
35757 ix86_expand_vector_init_one_nonzero (bool mmx_ok
, enum machine_mode mode
,
35758 rtx target
, rtx var
, int one_var
)
35760 enum machine_mode vsimode
;
35763 bool use_vector_set
= false;
35768 /* For SSE4.1, we normally use vector set. But if the second
35769 element is zero and inter-unit moves are OK, we use movq
35771 use_vector_set
= (TARGET_64BIT
35773 && !(TARGET_INTER_UNIT_MOVES
35779 use_vector_set
= TARGET_SSE4_1
;
35782 use_vector_set
= TARGET_SSE2
;
35785 use_vector_set
= TARGET_SSE
|| TARGET_3DNOW_A
;
35792 use_vector_set
= TARGET_AVX
;
35795 /* Use ix86_expand_vector_set in 64bit mode only. */
35796 use_vector_set
= TARGET_AVX
&& TARGET_64BIT
;
35802 if (use_vector_set
)
35804 emit_insn (gen_rtx_SET (VOIDmode
, target
, CONST0_RTX (mode
)));
35805 var
= force_reg (GET_MODE_INNER (mode
), var
);
35806 ix86_expand_vector_set (mmx_ok
, target
, var
, one_var
);
35822 var
= force_reg (GET_MODE_INNER (mode
), var
);
35823 x
= gen_rtx_VEC_CONCAT (mode
, var
, CONST0_RTX (GET_MODE_INNER (mode
)));
35824 emit_insn (gen_rtx_SET (VOIDmode
, target
, x
));
35829 if (!REG_P (target
) || REGNO (target
) < FIRST_PSEUDO_REGISTER
)
35830 new_target
= gen_reg_rtx (mode
);
35832 new_target
= target
;
35833 var
= force_reg (GET_MODE_INNER (mode
), var
);
35834 x
= gen_rtx_VEC_DUPLICATE (mode
, var
);
35835 x
= gen_rtx_VEC_MERGE (mode
, x
, CONST0_RTX (mode
), const1_rtx
);
35836 emit_insn (gen_rtx_SET (VOIDmode
, new_target
, x
));
35839 /* We need to shuffle the value to the correct position, so
35840 create a new pseudo to store the intermediate result. */
35842 /* With SSE2, we can use the integer shuffle insns. */
35843 if (mode
!= V4SFmode
&& TARGET_SSE2
)
35845 emit_insn (gen_sse2_pshufd_1 (new_target
, new_target
,
35847 GEN_INT (one_var
== 1 ? 0 : 1),
35848 GEN_INT (one_var
== 2 ? 0 : 1),
35849 GEN_INT (one_var
== 3 ? 0 : 1)));
35850 if (target
!= new_target
)
35851 emit_move_insn (target
, new_target
);
35855 /* Otherwise convert the intermediate result to V4SFmode and
35856 use the SSE1 shuffle instructions. */
35857 if (mode
!= V4SFmode
)
35859 tmp
= gen_reg_rtx (V4SFmode
);
35860 emit_move_insn (tmp
, gen_lowpart (V4SFmode
, new_target
));
35865 emit_insn (gen_sse_shufps_v4sf (tmp
, tmp
, tmp
,
35867 GEN_INT (one_var
== 1 ? 0 : 1),
35868 GEN_INT (one_var
== 2 ? 0+4 : 1+4),
35869 GEN_INT (one_var
== 3 ? 0+4 : 1+4)));
35871 if (mode
!= V4SFmode
)
35872 emit_move_insn (target
, gen_lowpart (V4SImode
, tmp
));
35873 else if (tmp
!= target
)
35874 emit_move_insn (target
, tmp
);
35876 else if (target
!= new_target
)
35877 emit_move_insn (target
, new_target
);
35882 vsimode
= V4SImode
;
35888 vsimode
= V2SImode
;
35894 /* Zero extend the variable element to SImode and recurse. */
35895 var
= convert_modes (SImode
, GET_MODE_INNER (mode
), var
, true);
35897 x
= gen_reg_rtx (vsimode
);
35898 if (!ix86_expand_vector_init_one_nonzero (mmx_ok
, vsimode
, x
,
35900 gcc_unreachable ();
35902 emit_move_insn (target
, gen_lowpart (mode
, x
));
35910 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
35911 consisting of the values in VALS. It is known that all elements
35912 except ONE_VAR are constants. Return true if successful. */
35915 ix86_expand_vector_init_one_var (bool mmx_ok
, enum machine_mode mode
,
35916 rtx target
, rtx vals
, int one_var
)
35918 rtx var
= XVECEXP (vals
, 0, one_var
);
35919 enum machine_mode wmode
;
35922 const_vec
= copy_rtx (vals
);
35923 XVECEXP (const_vec
, 0, one_var
) = CONST0_RTX (GET_MODE_INNER (mode
));
35924 const_vec
= gen_rtx_CONST_VECTOR (mode
, XVEC (const_vec
, 0));
35932 /* For the two element vectors, it's just as easy to use
35933 the general case. */
35937 /* Use ix86_expand_vector_set in 64bit mode only. */
35960 /* There's no way to set one QImode entry easily. Combine
35961 the variable value with its adjacent constant value, and
35962 promote to an HImode set. */
35963 x
= XVECEXP (vals
, 0, one_var
^ 1);
35966 var
= convert_modes (HImode
, QImode
, var
, true);
35967 var
= expand_simple_binop (HImode
, ASHIFT
, var
, GEN_INT (8),
35968 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
35969 x
= GEN_INT (INTVAL (x
) & 0xff);
35973 var
= convert_modes (HImode
, QImode
, var
, true);
35974 x
= gen_int_mode (INTVAL (x
) << 8, HImode
);
35976 if (x
!= const0_rtx
)
35977 var
= expand_simple_binop (HImode
, IOR
, var
, x
, var
,
35978 1, OPTAB_LIB_WIDEN
);
35980 x
= gen_reg_rtx (wmode
);
35981 emit_move_insn (x
, gen_lowpart (wmode
, const_vec
));
35982 ix86_expand_vector_set (mmx_ok
, x
, var
, one_var
>> 1);
35984 emit_move_insn (target
, gen_lowpart (mode
, x
));
35991 emit_move_insn (target
, const_vec
);
35992 ix86_expand_vector_set (mmx_ok
, target
, var
, one_var
);
35996 /* A subroutine of ix86_expand_vector_init_general. Use vector
35997 concatenate to handle the most general case: all values variable,
35998 and none identical. */
36001 ix86_expand_vector_init_concat (enum machine_mode mode
,
36002 rtx target
, rtx
*ops
, int n
)
36004 enum machine_mode cmode
, hmode
= VOIDmode
;
36005 rtx first
[8], second
[4];
36045 gcc_unreachable ();
36048 if (!register_operand (ops
[1], cmode
))
36049 ops
[1] = force_reg (cmode
, ops
[1]);
36050 if (!register_operand (ops
[0], cmode
))
36051 ops
[0] = force_reg (cmode
, ops
[0]);
36052 emit_insn (gen_rtx_SET (VOIDmode
, target
,
36053 gen_rtx_VEC_CONCAT (mode
, ops
[0],
36073 gcc_unreachable ();
36089 gcc_unreachable ();
36094 /* FIXME: We process inputs backward to help RA. PR 36222. */
36097 for (; i
> 0; i
-= 2, j
--)
36099 first
[j
] = gen_reg_rtx (cmode
);
36100 v
= gen_rtvec (2, ops
[i
- 1], ops
[i
]);
36101 ix86_expand_vector_init (false, first
[j
],
36102 gen_rtx_PARALLEL (cmode
, v
));
36108 gcc_assert (hmode
!= VOIDmode
);
36109 for (i
= j
= 0; i
< n
; i
+= 2, j
++)
36111 second
[j
] = gen_reg_rtx (hmode
);
36112 ix86_expand_vector_init_concat (hmode
, second
[j
],
36116 ix86_expand_vector_init_concat (mode
, target
, second
, n
);
36119 ix86_expand_vector_init_concat (mode
, target
, first
, n
);
36123 gcc_unreachable ();
36127 /* A subroutine of ix86_expand_vector_init_general. Use vector
36128 interleave to handle the most general case: all values variable,
36129 and none identical. */
36132 ix86_expand_vector_init_interleave (enum machine_mode mode
,
36133 rtx target
, rtx
*ops
, int n
)
36135 enum machine_mode first_imode
, second_imode
, third_imode
, inner_mode
;
36138 rtx (*gen_load_even
) (rtx
, rtx
, rtx
);
36139 rtx (*gen_interleave_first_low
) (rtx
, rtx
, rtx
);
36140 rtx (*gen_interleave_second_low
) (rtx
, rtx
, rtx
);
36145 gen_load_even
= gen_vec_setv8hi
;
36146 gen_interleave_first_low
= gen_vec_interleave_lowv4si
;
36147 gen_interleave_second_low
= gen_vec_interleave_lowv2di
;
36148 inner_mode
= HImode
;
36149 first_imode
= V4SImode
;
36150 second_imode
= V2DImode
;
36151 third_imode
= VOIDmode
;
36154 gen_load_even
= gen_vec_setv16qi
;
36155 gen_interleave_first_low
= gen_vec_interleave_lowv8hi
;
36156 gen_interleave_second_low
= gen_vec_interleave_lowv4si
;
36157 inner_mode
= QImode
;
36158 first_imode
= V8HImode
;
36159 second_imode
= V4SImode
;
36160 third_imode
= V2DImode
;
36163 gcc_unreachable ();
36166 for (i
= 0; i
< n
; i
++)
36168 /* Extend the odd elment to SImode using a paradoxical SUBREG. */
36169 op0
= gen_reg_rtx (SImode
);
36170 emit_move_insn (op0
, gen_lowpart (SImode
, ops
[i
+ i
]));
36172 /* Insert the SImode value as low element of V4SImode vector. */
36173 op1
= gen_reg_rtx (V4SImode
);
36174 op0
= gen_rtx_VEC_MERGE (V4SImode
,
36175 gen_rtx_VEC_DUPLICATE (V4SImode
,
36177 CONST0_RTX (V4SImode
),
36179 emit_insn (gen_rtx_SET (VOIDmode
, op1
, op0
));
36181 /* Cast the V4SImode vector back to a vector in orignal mode. */
36182 op0
= gen_reg_rtx (mode
);
36183 emit_move_insn (op0
, gen_lowpart (mode
, op1
));
36185 /* Load even elements into the second positon. */
36186 emit_insn (gen_load_even (op0
,
36187 force_reg (inner_mode
,
36191 /* Cast vector to FIRST_IMODE vector. */
36192 ops
[i
] = gen_reg_rtx (first_imode
);
36193 emit_move_insn (ops
[i
], gen_lowpart (first_imode
, op0
));
36196 /* Interleave low FIRST_IMODE vectors. */
36197 for (i
= j
= 0; i
< n
; i
+= 2, j
++)
36199 op0
= gen_reg_rtx (first_imode
);
36200 emit_insn (gen_interleave_first_low (op0
, ops
[i
], ops
[i
+ 1]));
36202 /* Cast FIRST_IMODE vector to SECOND_IMODE vector. */
36203 ops
[j
] = gen_reg_rtx (second_imode
);
36204 emit_move_insn (ops
[j
], gen_lowpart (second_imode
, op0
));
36207 /* Interleave low SECOND_IMODE vectors. */
36208 switch (second_imode
)
36211 for (i
= j
= 0; i
< n
/ 2; i
+= 2, j
++)
36213 op0
= gen_reg_rtx (second_imode
);
36214 emit_insn (gen_interleave_second_low (op0
, ops
[i
],
36217 /* Cast the SECOND_IMODE vector to the THIRD_IMODE
36219 ops
[j
] = gen_reg_rtx (third_imode
);
36220 emit_move_insn (ops
[j
], gen_lowpart (third_imode
, op0
));
36222 second_imode
= V2DImode
;
36223 gen_interleave_second_low
= gen_vec_interleave_lowv2di
;
36227 op0
= gen_reg_rtx (second_imode
);
36228 emit_insn (gen_interleave_second_low (op0
, ops
[0],
36231 /* Cast the SECOND_IMODE vector back to a vector on original
36233 emit_insn (gen_rtx_SET (VOIDmode
, target
,
36234 gen_lowpart (mode
, op0
)));
36238 gcc_unreachable ();
36242 /* A subroutine of ix86_expand_vector_init. Handle the most general case:
36243 all values variable, and none identical. */
36246 ix86_expand_vector_init_general (bool mmx_ok
, enum machine_mode mode
,
36247 rtx target
, rtx vals
)
36249 rtx ops
[32], op0
, op1
;
36250 enum machine_mode half_mode
= VOIDmode
;
36257 if (!mmx_ok
&& !TARGET_SSE
)
36269 n
= GET_MODE_NUNITS (mode
);
36270 for (i
= 0; i
< n
; i
++)
36271 ops
[i
] = XVECEXP (vals
, 0, i
);
36272 ix86_expand_vector_init_concat (mode
, target
, ops
, n
);
36276 half_mode
= V16QImode
;
36280 half_mode
= V8HImode
;
36284 n
= GET_MODE_NUNITS (mode
);
36285 for (i
= 0; i
< n
; i
++)
36286 ops
[i
] = XVECEXP (vals
, 0, i
);
36287 op0
= gen_reg_rtx (half_mode
);
36288 op1
= gen_reg_rtx (half_mode
);
36289 ix86_expand_vector_init_interleave (half_mode
, op0
, ops
,
36291 ix86_expand_vector_init_interleave (half_mode
, op1
,
36292 &ops
[n
>> 1], n
>> 2);
36293 emit_insn (gen_rtx_SET (VOIDmode
, target
,
36294 gen_rtx_VEC_CONCAT (mode
, op0
, op1
)));
36298 if (!TARGET_SSE4_1
)
36306 /* Don't use ix86_expand_vector_init_interleave if we can't
36307 move from GPR to SSE register directly. */
36308 if (!TARGET_INTER_UNIT_MOVES
)
36311 n
= GET_MODE_NUNITS (mode
);
36312 for (i
= 0; i
< n
; i
++)
36313 ops
[i
] = XVECEXP (vals
, 0, i
);
36314 ix86_expand_vector_init_interleave (mode
, target
, ops
, n
>> 1);
36322 gcc_unreachable ();
36326 int i
, j
, n_elts
, n_words
, n_elt_per_word
;
36327 enum machine_mode inner_mode
;
36328 rtx words
[4], shift
;
36330 inner_mode
= GET_MODE_INNER (mode
);
36331 n_elts
= GET_MODE_NUNITS (mode
);
36332 n_words
= GET_MODE_SIZE (mode
) / UNITS_PER_WORD
;
36333 n_elt_per_word
= n_elts
/ n_words
;
36334 shift
= GEN_INT (GET_MODE_BITSIZE (inner_mode
));
36336 for (i
= 0; i
< n_words
; ++i
)
36338 rtx word
= NULL_RTX
;
36340 for (j
= 0; j
< n_elt_per_word
; ++j
)
36342 rtx elt
= XVECEXP (vals
, 0, (i
+1)*n_elt_per_word
- j
- 1);
36343 elt
= convert_modes (word_mode
, inner_mode
, elt
, true);
36349 word
= expand_simple_binop (word_mode
, ASHIFT
, word
, shift
,
36350 word
, 1, OPTAB_LIB_WIDEN
);
36351 word
= expand_simple_binop (word_mode
, IOR
, word
, elt
,
36352 word
, 1, OPTAB_LIB_WIDEN
);
36360 emit_move_insn (target
, gen_lowpart (mode
, words
[0]));
36361 else if (n_words
== 2)
36363 rtx tmp
= gen_reg_rtx (mode
);
36364 emit_clobber (tmp
);
36365 emit_move_insn (gen_lowpart (word_mode
, tmp
), words
[0]);
36366 emit_move_insn (gen_highpart (word_mode
, tmp
), words
[1]);
36367 emit_move_insn (target
, tmp
);
36369 else if (n_words
== 4)
36371 rtx tmp
= gen_reg_rtx (V4SImode
);
36372 gcc_assert (word_mode
== SImode
);
36373 vals
= gen_rtx_PARALLEL (V4SImode
, gen_rtvec_v (4, words
));
36374 ix86_expand_vector_init_general (false, V4SImode
, tmp
, vals
);
36375 emit_move_insn (target
, gen_lowpart (mode
, tmp
));
36378 gcc_unreachable ();
36382 /* Initialize vector TARGET via VALS. Suppress the use of MMX
36383 instructions unless MMX_OK is true. */
36386 ix86_expand_vector_init (bool mmx_ok
, rtx target
, rtx vals
)
36388 enum machine_mode mode
= GET_MODE (target
);
36389 enum machine_mode inner_mode
= GET_MODE_INNER (mode
);
36390 int n_elts
= GET_MODE_NUNITS (mode
);
36391 int n_var
= 0, one_var
= -1;
36392 bool all_same
= true, all_const_zero
= true;
36396 for (i
= 0; i
< n_elts
; ++i
)
36398 x
= XVECEXP (vals
, 0, i
);
36399 if (!(CONST_INT_P (x
)
36400 || GET_CODE (x
) == CONST_DOUBLE
36401 || GET_CODE (x
) == CONST_FIXED
))
36402 n_var
++, one_var
= i
;
36403 else if (x
!= CONST0_RTX (inner_mode
))
36404 all_const_zero
= false;
36405 if (i
> 0 && !rtx_equal_p (x
, XVECEXP (vals
, 0, 0)))
36409 /* Constants are best loaded from the constant pool. */
36412 emit_move_insn (target
, gen_rtx_CONST_VECTOR (mode
, XVEC (vals
, 0)));
36416 /* If all values are identical, broadcast the value. */
36418 && ix86_expand_vector_init_duplicate (mmx_ok
, mode
, target
,
36419 XVECEXP (vals
, 0, 0)))
36422 /* Values where only one field is non-constant are best loaded from
36423 the pool and overwritten via move later. */
36427 && ix86_expand_vector_init_one_nonzero (mmx_ok
, mode
, target
,
36428 XVECEXP (vals
, 0, one_var
),
36432 if (ix86_expand_vector_init_one_var (mmx_ok
, mode
, target
, vals
, one_var
))
36436 ix86_expand_vector_init_general (mmx_ok
, mode
, target
, vals
);
36440 ix86_expand_vector_set (bool mmx_ok
, rtx target
, rtx val
, int elt
)
36442 enum machine_mode mode
= GET_MODE (target
);
36443 enum machine_mode inner_mode
= GET_MODE_INNER (mode
);
36444 enum machine_mode half_mode
;
36445 bool use_vec_merge
= false;
36447 static rtx (*gen_extract
[6][2]) (rtx
, rtx
)
36449 { gen_vec_extract_lo_v32qi
, gen_vec_extract_hi_v32qi
},
36450 { gen_vec_extract_lo_v16hi
, gen_vec_extract_hi_v16hi
},
36451 { gen_vec_extract_lo_v8si
, gen_vec_extract_hi_v8si
},
36452 { gen_vec_extract_lo_v4di
, gen_vec_extract_hi_v4di
},
36453 { gen_vec_extract_lo_v8sf
, gen_vec_extract_hi_v8sf
},
36454 { gen_vec_extract_lo_v4df
, gen_vec_extract_hi_v4df
}
36456 static rtx (*gen_insert
[6][2]) (rtx
, rtx
, rtx
)
36458 { gen_vec_set_lo_v32qi
, gen_vec_set_hi_v32qi
},
36459 { gen_vec_set_lo_v16hi
, gen_vec_set_hi_v16hi
},
36460 { gen_vec_set_lo_v8si
, gen_vec_set_hi_v8si
},
36461 { gen_vec_set_lo_v4di
, gen_vec_set_hi_v4di
},
36462 { gen_vec_set_lo_v8sf
, gen_vec_set_hi_v8sf
},
36463 { gen_vec_set_lo_v4df
, gen_vec_set_hi_v4df
}
36473 tmp
= gen_reg_rtx (GET_MODE_INNER (mode
));
36474 ix86_expand_vector_extract (true, tmp
, target
, 1 - elt
);
36476 tmp
= gen_rtx_VEC_CONCAT (mode
, val
, tmp
);
36478 tmp
= gen_rtx_VEC_CONCAT (mode
, tmp
, val
);
36479 emit_insn (gen_rtx_SET (VOIDmode
, target
, tmp
));
36485 use_vec_merge
= TARGET_SSE4_1
&& TARGET_64BIT
;
36489 tmp
= gen_reg_rtx (GET_MODE_INNER (mode
));
36490 ix86_expand_vector_extract (false, tmp
, target
, 1 - elt
);
36492 tmp
= gen_rtx_VEC_CONCAT (mode
, val
, tmp
);
36494 tmp
= gen_rtx_VEC_CONCAT (mode
, tmp
, val
);
36495 emit_insn (gen_rtx_SET (VOIDmode
, target
, tmp
));
36502 /* For the two element vectors, we implement a VEC_CONCAT with
36503 the extraction of the other element. */
36505 tmp
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (1, GEN_INT (1 - elt
)));
36506 tmp
= gen_rtx_VEC_SELECT (inner_mode
, target
, tmp
);
36509 op0
= val
, op1
= tmp
;
36511 op0
= tmp
, op1
= val
;
36513 tmp
= gen_rtx_VEC_CONCAT (mode
, op0
, op1
);
36514 emit_insn (gen_rtx_SET (VOIDmode
, target
, tmp
));
36519 use_vec_merge
= TARGET_SSE4_1
;
36526 use_vec_merge
= true;
36530 /* tmp = target = A B C D */
36531 tmp
= copy_to_reg (target
);
36532 /* target = A A B B */
36533 emit_insn (gen_vec_interleave_lowv4sf (target
, target
, target
));
36534 /* target = X A B B */
36535 ix86_expand_vector_set (false, target
, val
, 0);
36536 /* target = A X C D */
36537 emit_insn (gen_sse_shufps_v4sf (target
, target
, tmp
,
36538 const1_rtx
, const0_rtx
,
36539 GEN_INT (2+4), GEN_INT (3+4)));
36543 /* tmp = target = A B C D */
36544 tmp
= copy_to_reg (target
);
36545 /* tmp = X B C D */
36546 ix86_expand_vector_set (false, tmp
, val
, 0);
36547 /* target = A B X D */
36548 emit_insn (gen_sse_shufps_v4sf (target
, target
, tmp
,
36549 const0_rtx
, const1_rtx
,
36550 GEN_INT (0+4), GEN_INT (3+4)));
36554 /* tmp = target = A B C D */
36555 tmp
= copy_to_reg (target
);
36556 /* tmp = X B C D */
36557 ix86_expand_vector_set (false, tmp
, val
, 0);
36558 /* target = A B X D */
36559 emit_insn (gen_sse_shufps_v4sf (target
, target
, tmp
,
36560 const0_rtx
, const1_rtx
,
36561 GEN_INT (2+4), GEN_INT (0+4)));
36565 gcc_unreachable ();
36570 use_vec_merge
= TARGET_SSE4_1
;
36574 /* Element 0 handled by vec_merge below. */
36577 use_vec_merge
= true;
36583 /* With SSE2, use integer shuffles to swap element 0 and ELT,
36584 store into element 0, then shuffle them back. */
36588 order
[0] = GEN_INT (elt
);
36589 order
[1] = const1_rtx
;
36590 order
[2] = const2_rtx
;
36591 order
[3] = GEN_INT (3);
36592 order
[elt
] = const0_rtx
;
36594 emit_insn (gen_sse2_pshufd_1 (target
, target
, order
[0],
36595 order
[1], order
[2], order
[3]));
36597 ix86_expand_vector_set (false, target
, val
, 0);
36599 emit_insn (gen_sse2_pshufd_1 (target
, target
, order
[0],
36600 order
[1], order
[2], order
[3]));
36604 /* For SSE1, we have to reuse the V4SF code. */
36605 ix86_expand_vector_set (false, gen_lowpart (V4SFmode
, target
),
36606 gen_lowpart (SFmode
, val
), elt
);
36611 use_vec_merge
= TARGET_SSE2
;
36614 use_vec_merge
= mmx_ok
&& (TARGET_SSE
|| TARGET_3DNOW_A
);
36618 use_vec_merge
= TARGET_SSE4_1
;
36625 half_mode
= V16QImode
;
36631 half_mode
= V8HImode
;
36637 half_mode
= V4SImode
;
36643 half_mode
= V2DImode
;
36649 half_mode
= V4SFmode
;
36655 half_mode
= V2DFmode
;
36661 /* Compute offset. */
36665 gcc_assert (i
<= 1);
36667 /* Extract the half. */
36668 tmp
= gen_reg_rtx (half_mode
);
36669 emit_insn (gen_extract
[j
][i
] (tmp
, target
));
36671 /* Put val in tmp at elt. */
36672 ix86_expand_vector_set (false, tmp
, val
, elt
);
36675 emit_insn (gen_insert
[j
][i
] (target
, target
, tmp
));
36684 tmp
= gen_rtx_VEC_DUPLICATE (mode
, val
);
36685 tmp
= gen_rtx_VEC_MERGE (mode
, tmp
, target
, GEN_INT (1 << elt
));
36686 emit_insn (gen_rtx_SET (VOIDmode
, target
, tmp
));
36690 rtx mem
= assign_stack_temp (mode
, GET_MODE_SIZE (mode
));
36692 emit_move_insn (mem
, target
);
36694 tmp
= adjust_address (mem
, inner_mode
, elt
*GET_MODE_SIZE (inner_mode
));
36695 emit_move_insn (tmp
, val
);
36697 emit_move_insn (target
, mem
);
36702 ix86_expand_vector_extract (bool mmx_ok
, rtx target
, rtx vec
, int elt
)
36704 enum machine_mode mode
= GET_MODE (vec
);
36705 enum machine_mode inner_mode
= GET_MODE_INNER (mode
);
36706 bool use_vec_extr
= false;
36719 use_vec_extr
= true;
36723 use_vec_extr
= TARGET_SSE4_1
;
36735 tmp
= gen_reg_rtx (mode
);
36736 emit_insn (gen_sse_shufps_v4sf (tmp
, vec
, vec
,
36737 GEN_INT (elt
), GEN_INT (elt
),
36738 GEN_INT (elt
+4), GEN_INT (elt
+4)));
36742 tmp
= gen_reg_rtx (mode
);
36743 emit_insn (gen_vec_interleave_highv4sf (tmp
, vec
, vec
));
36747 gcc_unreachable ();
36750 use_vec_extr
= true;
36755 use_vec_extr
= TARGET_SSE4_1
;
36769 tmp
= gen_reg_rtx (mode
);
36770 emit_insn (gen_sse2_pshufd_1 (tmp
, vec
,
36771 GEN_INT (elt
), GEN_INT (elt
),
36772 GEN_INT (elt
), GEN_INT (elt
)));
36776 tmp
= gen_reg_rtx (mode
);
36777 emit_insn (gen_vec_interleave_highv4si (tmp
, vec
, vec
));
36781 gcc_unreachable ();
36784 use_vec_extr
= true;
36789 /* For SSE1, we have to reuse the V4SF code. */
36790 ix86_expand_vector_extract (false, gen_lowpart (SFmode
, target
),
36791 gen_lowpart (V4SFmode
, vec
), elt
);
36797 use_vec_extr
= TARGET_SSE2
;
36800 use_vec_extr
= mmx_ok
&& (TARGET_SSE
|| TARGET_3DNOW_A
);
36804 use_vec_extr
= TARGET_SSE4_1
;
36810 tmp
= gen_reg_rtx (V4SFmode
);
36812 emit_insn (gen_vec_extract_lo_v8sf (tmp
, vec
));
36814 emit_insn (gen_vec_extract_hi_v8sf (tmp
, vec
));
36815 ix86_expand_vector_extract (false, target
, tmp
, elt
& 3);
36823 tmp
= gen_reg_rtx (V2DFmode
);
36825 emit_insn (gen_vec_extract_lo_v4df (tmp
, vec
));
36827 emit_insn (gen_vec_extract_hi_v4df (tmp
, vec
));
36828 ix86_expand_vector_extract (false, target
, tmp
, elt
& 1);
36836 tmp
= gen_reg_rtx (V16QImode
);
36838 emit_insn (gen_vec_extract_lo_v32qi (tmp
, vec
));
36840 emit_insn (gen_vec_extract_hi_v32qi (tmp
, vec
));
36841 ix86_expand_vector_extract (false, target
, tmp
, elt
& 15);
36849 tmp
= gen_reg_rtx (V8HImode
);
36851 emit_insn (gen_vec_extract_lo_v16hi (tmp
, vec
));
36853 emit_insn (gen_vec_extract_hi_v16hi (tmp
, vec
));
36854 ix86_expand_vector_extract (false, target
, tmp
, elt
& 7);
36862 tmp
= gen_reg_rtx (V4SImode
);
36864 emit_insn (gen_vec_extract_lo_v8si (tmp
, vec
));
36866 emit_insn (gen_vec_extract_hi_v8si (tmp
, vec
));
36867 ix86_expand_vector_extract (false, target
, tmp
, elt
& 3);
36875 tmp
= gen_reg_rtx (V2DImode
);
36877 emit_insn (gen_vec_extract_lo_v4di (tmp
, vec
));
36879 emit_insn (gen_vec_extract_hi_v4di (tmp
, vec
));
36880 ix86_expand_vector_extract (false, target
, tmp
, elt
& 1);
36886 /* ??? Could extract the appropriate HImode element and shift. */
36893 tmp
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (1, GEN_INT (elt
)));
36894 tmp
= gen_rtx_VEC_SELECT (inner_mode
, vec
, tmp
);
36896 /* Let the rtl optimizers know about the zero extension performed. */
36897 if (inner_mode
== QImode
|| inner_mode
== HImode
)
36899 tmp
= gen_rtx_ZERO_EXTEND (SImode
, tmp
);
36900 target
= gen_lowpart (SImode
, target
);
36903 emit_insn (gen_rtx_SET (VOIDmode
, target
, tmp
));
36907 rtx mem
= assign_stack_temp (mode
, GET_MODE_SIZE (mode
));
36909 emit_move_insn (mem
, vec
);
36911 tmp
= adjust_address (mem
, inner_mode
, elt
*GET_MODE_SIZE (inner_mode
));
36912 emit_move_insn (target
, tmp
);
36916 /* Generate code to copy vector bits i / 2 ... i - 1 from vector SRC
36917 to bits 0 ... i / 2 - 1 of vector DEST, which has the same mode.
36918 The upper bits of DEST are undefined, though they shouldn't cause
36919 exceptions (some bits from src or all zeros are ok). */
36922 emit_reduc_half (rtx dest
, rtx src
, int i
)
36925 switch (GET_MODE (src
))
36929 tem
= gen_sse_movhlps (dest
, src
, src
);
36931 tem
= gen_sse_shufps_v4sf (dest
, src
, src
, const1_rtx
, const1_rtx
,
36932 GEN_INT (1 + 4), GEN_INT (1 + 4));
36935 tem
= gen_vec_interleave_highv2df (dest
, src
, src
);
36941 tem
= gen_sse2_lshrv1ti3 (gen_lowpart (V1TImode
, dest
),
36942 gen_lowpart (V1TImode
, src
),
36947 tem
= gen_avx_vperm2f128v8sf3 (dest
, src
, src
, const1_rtx
);
36949 tem
= gen_avx_shufps256 (dest
, src
, src
,
36950 GEN_INT (i
== 128 ? 2 + (3 << 2) : 1));
36954 tem
= gen_avx_vperm2f128v4df3 (dest
, src
, src
, const1_rtx
);
36956 tem
= gen_avx_shufpd256 (dest
, src
, src
, const1_rtx
);
36963 tem
= gen_avx2_permv2ti (gen_lowpart (V4DImode
, dest
),
36964 gen_lowpart (V4DImode
, src
),
36965 gen_lowpart (V4DImode
, src
),
36968 tem
= gen_avx2_lshrv2ti3 (gen_lowpart (V2TImode
, dest
),
36969 gen_lowpart (V2TImode
, src
),
36973 gcc_unreachable ();
36978 /* Expand a vector reduction. FN is the binary pattern to reduce;
36979 DEST is the destination; IN is the input vector. */
36982 ix86_expand_reduc (rtx (*fn
) (rtx
, rtx
, rtx
), rtx dest
, rtx in
)
36984 rtx half
, dst
, vec
= in
;
36985 enum machine_mode mode
= GET_MODE (in
);
36988 /* SSE4 has a special instruction for V8HImode UMIN reduction. */
36990 && mode
== V8HImode
36991 && fn
== gen_uminv8hi3
)
36993 emit_insn (gen_sse4_1_phminposuw (dest
, in
));
36997 for (i
= GET_MODE_BITSIZE (mode
);
36998 i
> GET_MODE_BITSIZE (GET_MODE_INNER (mode
));
37001 half
= gen_reg_rtx (mode
);
37002 emit_reduc_half (half
, vec
, i
);
37003 if (i
== GET_MODE_BITSIZE (GET_MODE_INNER (mode
)) * 2)
37006 dst
= gen_reg_rtx (mode
);
37007 emit_insn (fn (dst
, half
, vec
));
37012 /* Target hook for scalar_mode_supported_p. */
37014 ix86_scalar_mode_supported_p (enum machine_mode mode
)
37016 if (DECIMAL_FLOAT_MODE_P (mode
))
37017 return default_decimal_float_supported_p ();
37018 else if (mode
== TFmode
)
37021 return default_scalar_mode_supported_p (mode
);
37024 /* Implements target hook vector_mode_supported_p. */
37026 ix86_vector_mode_supported_p (enum machine_mode mode
)
37028 if (TARGET_SSE
&& VALID_SSE_REG_MODE (mode
))
37030 if (TARGET_SSE2
&& VALID_SSE2_REG_MODE (mode
))
37032 if (TARGET_AVX
&& VALID_AVX256_REG_MODE (mode
))
37034 if (TARGET_MMX
&& VALID_MMX_REG_MODE (mode
))
37036 if (TARGET_3DNOW
&& VALID_MMX_REG_MODE_3DNOW (mode
))
37041 /* Target hook for c_mode_for_suffix. */
37042 static enum machine_mode
37043 ix86_c_mode_for_suffix (char suffix
)
37053 /* Worker function for TARGET_MD_ASM_CLOBBERS.
37055 We do this in the new i386 backend to maintain source compatibility
37056 with the old cc0-based compiler. */
37059 ix86_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED
,
37060 tree inputs ATTRIBUTE_UNUSED
,
37063 clobbers
= tree_cons (NULL_TREE
, build_string (5, "flags"),
37065 clobbers
= tree_cons (NULL_TREE
, build_string (4, "fpsr"),
37070 /* Implements target vector targetm.asm.encode_section_info. */
37072 static void ATTRIBUTE_UNUSED
37073 ix86_encode_section_info (tree decl
, rtx rtl
, int first
)
37075 default_encode_section_info (decl
, rtl
, first
);
37077 if (TREE_CODE (decl
) == VAR_DECL
37078 && (TREE_STATIC (decl
) || DECL_EXTERNAL (decl
))
37079 && ix86_in_large_data_p (decl
))
37080 SYMBOL_REF_FLAGS (XEXP (rtl
, 0)) |= SYMBOL_FLAG_FAR_ADDR
;
37083 /* Worker function for REVERSE_CONDITION. */
37086 ix86_reverse_condition (enum rtx_code code
, enum machine_mode mode
)
37088 return (mode
!= CCFPmode
&& mode
!= CCFPUmode
37089 ? reverse_condition (code
)
37090 : reverse_condition_maybe_unordered (code
));
37093 /* Output code to perform an x87 FP register move, from OPERANDS[1]
37097 output_387_reg_move (rtx insn
, rtx
*operands
)
37099 if (REG_P (operands
[0]))
37101 if (REG_P (operands
[1])
37102 && find_regno_note (insn
, REG_DEAD
, REGNO (operands
[1])))
37104 if (REGNO (operands
[0]) == FIRST_STACK_REG
)
37105 return output_387_ffreep (operands
, 0);
37106 return "fstp\t%y0";
37108 if (STACK_TOP_P (operands
[0]))
37109 return "fld%Z1\t%y1";
37112 else if (MEM_P (operands
[0]))
37114 gcc_assert (REG_P (operands
[1]));
37115 if (find_regno_note (insn
, REG_DEAD
, REGNO (operands
[1])))
37116 return "fstp%Z0\t%y0";
37119 /* There is no non-popping store to memory for XFmode.
37120 So if we need one, follow the store with a load. */
37121 if (GET_MODE (operands
[0]) == XFmode
)
37122 return "fstp%Z0\t%y0\n\tfld%Z0\t%y0";
37124 return "fst%Z0\t%y0";
37131 /* Output code to perform a conditional jump to LABEL, if C2 flag in
37132 FP status register is set. */
37135 ix86_emit_fp_unordered_jump (rtx label
)
37137 rtx reg
= gen_reg_rtx (HImode
);
37140 emit_insn (gen_x86_fnstsw_1 (reg
));
37142 if (TARGET_SAHF
&& (TARGET_USE_SAHF
|| optimize_insn_for_size_p ()))
37144 emit_insn (gen_x86_sahf_1 (reg
));
37146 temp
= gen_rtx_REG (CCmode
, FLAGS_REG
);
37147 temp
= gen_rtx_UNORDERED (VOIDmode
, temp
, const0_rtx
);
37151 emit_insn (gen_testqi_ext_ccno_0 (reg
, GEN_INT (0x04)));
37153 temp
= gen_rtx_REG (CCNOmode
, FLAGS_REG
);
37154 temp
= gen_rtx_NE (VOIDmode
, temp
, const0_rtx
);
37157 temp
= gen_rtx_IF_THEN_ELSE (VOIDmode
, temp
,
37158 gen_rtx_LABEL_REF (VOIDmode
, label
),
37160 temp
= gen_rtx_SET (VOIDmode
, pc_rtx
, temp
);
37162 emit_jump_insn (temp
);
37163 predict_jump (REG_BR_PROB_BASE
* 10 / 100);
37166 /* Output code to perform a log1p XFmode calculation. */
37168 void ix86_emit_i387_log1p (rtx op0
, rtx op1
)
37170 rtx label1
= gen_label_rtx ();
37171 rtx label2
= gen_label_rtx ();
37173 rtx tmp
= gen_reg_rtx (XFmode
);
37174 rtx tmp2
= gen_reg_rtx (XFmode
);
37177 emit_insn (gen_absxf2 (tmp
, op1
));
37178 test
= gen_rtx_GE (VOIDmode
, tmp
,
37179 CONST_DOUBLE_FROM_REAL_VALUE (
37180 REAL_VALUE_ATOF ("0.29289321881345247561810596348408353", XFmode
),
37182 emit_jump_insn (gen_cbranchxf4 (test
, XEXP (test
, 0), XEXP (test
, 1), label1
));
37184 emit_move_insn (tmp2
, standard_80387_constant_rtx (4)); /* fldln2 */
37185 emit_insn (gen_fyl2xp1xf3_i387 (op0
, op1
, tmp2
));
37186 emit_jump (label2
);
37188 emit_label (label1
);
37189 emit_move_insn (tmp
, CONST1_RTX (XFmode
));
37190 emit_insn (gen_addxf3 (tmp
, op1
, tmp
));
37191 emit_move_insn (tmp2
, standard_80387_constant_rtx (4)); /* fldln2 */
37192 emit_insn (gen_fyl2xxf3_i387 (op0
, tmp
, tmp2
));
37194 emit_label (label2
);
37197 /* Emit code for round calculation. */
37198 void ix86_emit_i387_round (rtx op0
, rtx op1
)
37200 enum machine_mode inmode
= GET_MODE (op1
);
37201 enum machine_mode outmode
= GET_MODE (op0
);
37202 rtx e1
, e2
, res
, tmp
, tmp1
, half
;
37203 rtx scratch
= gen_reg_rtx (HImode
);
37204 rtx flags
= gen_rtx_REG (CCNOmode
, FLAGS_REG
);
37205 rtx jump_label
= gen_label_rtx ();
37207 rtx (*gen_abs
) (rtx
, rtx
);
37208 rtx (*gen_neg
) (rtx
, rtx
);
37213 gen_abs
= gen_abssf2
;
37216 gen_abs
= gen_absdf2
;
37219 gen_abs
= gen_absxf2
;
37222 gcc_unreachable ();
37228 gen_neg
= gen_negsf2
;
37231 gen_neg
= gen_negdf2
;
37234 gen_neg
= gen_negxf2
;
37237 gen_neg
= gen_neghi2
;
37240 gen_neg
= gen_negsi2
;
37243 gen_neg
= gen_negdi2
;
37246 gcc_unreachable ();
37249 e1
= gen_reg_rtx (inmode
);
37250 e2
= gen_reg_rtx (inmode
);
37251 res
= gen_reg_rtx (outmode
);
37253 half
= CONST_DOUBLE_FROM_REAL_VALUE (dconsthalf
, inmode
);
37255 /* round(a) = sgn(a) * floor(fabs(a) + 0.5) */
37257 /* scratch = fxam(op1) */
37258 emit_insn (gen_rtx_SET (VOIDmode
, scratch
,
37259 gen_rtx_UNSPEC (HImode
, gen_rtvec (1, op1
),
37261 /* e1 = fabs(op1) */
37262 emit_insn (gen_abs (e1
, op1
));
37264 /* e2 = e1 + 0.5 */
37265 half
= force_reg (inmode
, half
);
37266 emit_insn (gen_rtx_SET (VOIDmode
, e2
,
37267 gen_rtx_PLUS (inmode
, e1
, half
)));
37269 /* res = floor(e2) */
37270 if (inmode
!= XFmode
)
37272 tmp1
= gen_reg_rtx (XFmode
);
37274 emit_insn (gen_rtx_SET (VOIDmode
, tmp1
,
37275 gen_rtx_FLOAT_EXTEND (XFmode
, e2
)));
37285 rtx tmp0
= gen_reg_rtx (XFmode
);
37287 emit_insn (gen_frndintxf2_floor (tmp0
, tmp1
));
37289 emit_insn (gen_rtx_SET (VOIDmode
, res
,
37290 gen_rtx_UNSPEC (outmode
, gen_rtvec (1, tmp0
),
37291 UNSPEC_TRUNC_NOOP
)));
37295 emit_insn (gen_frndintxf2_floor (res
, tmp1
));
37298 emit_insn (gen_lfloorxfhi2 (res
, tmp1
));
37301 emit_insn (gen_lfloorxfsi2 (res
, tmp1
));
37304 emit_insn (gen_lfloorxfdi2 (res
, tmp1
));
37307 gcc_unreachable ();
37310 /* flags = signbit(a) */
37311 emit_insn (gen_testqi_ext_ccno_0 (scratch
, GEN_INT (0x02)));
37313 /* if (flags) then res = -res */
37314 tmp
= gen_rtx_IF_THEN_ELSE (VOIDmode
,
37315 gen_rtx_EQ (VOIDmode
, flags
, const0_rtx
),
37316 gen_rtx_LABEL_REF (VOIDmode
, jump_label
),
37318 insn
= emit_jump_insn (gen_rtx_SET (VOIDmode
, pc_rtx
, tmp
));
37319 predict_jump (REG_BR_PROB_BASE
* 50 / 100);
37320 JUMP_LABEL (insn
) = jump_label
;
37322 emit_insn (gen_neg (res
, res
));
37324 emit_label (jump_label
);
37325 LABEL_NUSES (jump_label
) = 1;
37327 emit_move_insn (op0
, res
);
37330 /* Output code to perform a Newton-Rhapson approximation of a single precision
37331 floating point divide [http://en.wikipedia.org/wiki/N-th_root_algorithm]. */
37333 void ix86_emit_swdivsf (rtx res
, rtx a
, rtx b
, enum machine_mode mode
)
37335 rtx x0
, x1
, e0
, e1
;
37337 x0
= gen_reg_rtx (mode
);
37338 e0
= gen_reg_rtx (mode
);
37339 e1
= gen_reg_rtx (mode
);
37340 x1
= gen_reg_rtx (mode
);
37342 /* a / b = a * ((rcp(b) + rcp(b)) - (b * rcp(b) * rcp (b))) */
37344 b
= force_reg (mode
, b
);
37346 /* x0 = rcp(b) estimate */
37347 emit_insn (gen_rtx_SET (VOIDmode
, x0
,
37348 gen_rtx_UNSPEC (mode
, gen_rtvec (1, b
),
37351 emit_insn (gen_rtx_SET (VOIDmode
, e0
,
37352 gen_rtx_MULT (mode
, x0
, b
)));
37355 emit_insn (gen_rtx_SET (VOIDmode
, e0
,
37356 gen_rtx_MULT (mode
, x0
, e0
)));
37359 emit_insn (gen_rtx_SET (VOIDmode
, e1
,
37360 gen_rtx_PLUS (mode
, x0
, x0
)));
37363 emit_insn (gen_rtx_SET (VOIDmode
, x1
,
37364 gen_rtx_MINUS (mode
, e1
, e0
)));
37367 emit_insn (gen_rtx_SET (VOIDmode
, res
,
37368 gen_rtx_MULT (mode
, a
, x1
)));
37371 /* Output code to perform a Newton-Rhapson approximation of a
37372 single precision floating point [reciprocal] square root. */
37374 void ix86_emit_swsqrtsf (rtx res
, rtx a
, enum machine_mode mode
,
37377 rtx x0
, e0
, e1
, e2
, e3
, mthree
, mhalf
;
37380 x0
= gen_reg_rtx (mode
);
37381 e0
= gen_reg_rtx (mode
);
37382 e1
= gen_reg_rtx (mode
);
37383 e2
= gen_reg_rtx (mode
);
37384 e3
= gen_reg_rtx (mode
);
37386 real_from_integer (&r
, VOIDmode
, -3, -1, 0);
37387 mthree
= CONST_DOUBLE_FROM_REAL_VALUE (r
, SFmode
);
37389 real_arithmetic (&r
, NEGATE_EXPR
, &dconsthalf
, NULL
);
37390 mhalf
= CONST_DOUBLE_FROM_REAL_VALUE (r
, SFmode
);
37392 if (VECTOR_MODE_P (mode
))
37394 mthree
= ix86_build_const_vector (mode
, true, mthree
);
37395 mhalf
= ix86_build_const_vector (mode
, true, mhalf
);
37398 /* sqrt(a) = -0.5 * a * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0)
37399 rsqrt(a) = -0.5 * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0) */
37401 a
= force_reg (mode
, a
);
37403 /* x0 = rsqrt(a) estimate */
37404 emit_insn (gen_rtx_SET (VOIDmode
, x0
,
37405 gen_rtx_UNSPEC (mode
, gen_rtvec (1, a
),
37408 /* If (a == 0.0) Filter out infinity to prevent NaN for sqrt(0.0). */
37413 zero
= gen_reg_rtx (mode
);
37414 mask
= gen_reg_rtx (mode
);
37416 zero
= force_reg (mode
, CONST0_RTX(mode
));
37417 emit_insn (gen_rtx_SET (VOIDmode
, mask
,
37418 gen_rtx_NE (mode
, zero
, a
)));
37420 emit_insn (gen_rtx_SET (VOIDmode
, x0
,
37421 gen_rtx_AND (mode
, x0
, mask
)));
37425 emit_insn (gen_rtx_SET (VOIDmode
, e0
,
37426 gen_rtx_MULT (mode
, x0
, a
)));
37428 emit_insn (gen_rtx_SET (VOIDmode
, e1
,
37429 gen_rtx_MULT (mode
, e0
, x0
)));
37432 mthree
= force_reg (mode
, mthree
);
37433 emit_insn (gen_rtx_SET (VOIDmode
, e2
,
37434 gen_rtx_PLUS (mode
, e1
, mthree
)));
37436 mhalf
= force_reg (mode
, mhalf
);
37438 /* e3 = -.5 * x0 */
37439 emit_insn (gen_rtx_SET (VOIDmode
, e3
,
37440 gen_rtx_MULT (mode
, x0
, mhalf
)));
37442 /* e3 = -.5 * e0 */
37443 emit_insn (gen_rtx_SET (VOIDmode
, e3
,
37444 gen_rtx_MULT (mode
, e0
, mhalf
)));
37445 /* ret = e2 * e3 */
37446 emit_insn (gen_rtx_SET (VOIDmode
, res
,
37447 gen_rtx_MULT (mode
, e2
, e3
)));
37450 #ifdef TARGET_SOLARIS
37451 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
37454 i386_solaris_elf_named_section (const char *name
, unsigned int flags
,
37457 /* With Binutils 2.15, the "@unwind" marker must be specified on
37458 every occurrence of the ".eh_frame" section, not just the first
37461 && strcmp (name
, ".eh_frame") == 0)
37463 fprintf (asm_out_file
, "\t.section\t%s,\"%s\",@unwind\n", name
,
37464 flags
& SECTION_WRITE
? "aw" : "a");
37469 if (HAVE_COMDAT_GROUP
&& flags
& SECTION_LINKONCE
)
37471 solaris_elf_asm_comdat_section (name
, flags
, decl
);
37476 default_elf_asm_named_section (name
, flags
, decl
);
37478 #endif /* TARGET_SOLARIS */
37480 /* Return the mangling of TYPE if it is an extended fundamental type. */
37482 static const char *
37483 ix86_mangle_type (const_tree type
)
37485 type
= TYPE_MAIN_VARIANT (type
);
37487 if (TREE_CODE (type
) != VOID_TYPE
&& TREE_CODE (type
) != BOOLEAN_TYPE
37488 && TREE_CODE (type
) != INTEGER_TYPE
&& TREE_CODE (type
) != REAL_TYPE
)
37491 switch (TYPE_MODE (type
))
37494 /* __float128 is "g". */
37497 /* "long double" or __float80 is "e". */
37504 /* For 32-bit code we can save PIC register setup by using
37505 __stack_chk_fail_local hidden function instead of calling
37506 __stack_chk_fail directly. 64-bit code doesn't need to setup any PIC
37507 register, so it is better to call __stack_chk_fail directly. */
37509 static tree ATTRIBUTE_UNUSED
37510 ix86_stack_protect_fail (void)
37512 return TARGET_64BIT
37513 ? default_external_stack_protect_fail ()
37514 : default_hidden_stack_protect_fail ();
37517 /* Select a format to encode pointers in exception handling data. CODE
37518 is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is
37519 true if the symbol may be affected by dynamic relocations.
37521 ??? All x86 object file formats are capable of representing this.
37522 After all, the relocation needed is the same as for the call insn.
37523 Whether or not a particular assembler allows us to enter such, I
37524 guess we'll have to see. */
37526 asm_preferred_eh_data_format (int code
, int global
)
37530 int type
= DW_EH_PE_sdata8
;
37532 || ix86_cmodel
== CM_SMALL_PIC
37533 || (ix86_cmodel
== CM_MEDIUM_PIC
&& (global
|| code
)))
37534 type
= DW_EH_PE_sdata4
;
37535 return (global
? DW_EH_PE_indirect
: 0) | DW_EH_PE_pcrel
| type
;
37537 if (ix86_cmodel
== CM_SMALL
37538 || (ix86_cmodel
== CM_MEDIUM
&& code
))
37539 return DW_EH_PE_udata4
;
37540 return DW_EH_PE_absptr
;
37543 /* Expand copysign from SIGN to the positive value ABS_VALUE
37544 storing in RESULT. If MASK is non-null, it shall be a mask to mask out
37547 ix86_sse_copysign_to_positive (rtx result
, rtx abs_value
, rtx sign
, rtx mask
)
37549 enum machine_mode mode
= GET_MODE (sign
);
37550 rtx sgn
= gen_reg_rtx (mode
);
37551 if (mask
== NULL_RTX
)
37553 enum machine_mode vmode
;
37555 if (mode
== SFmode
)
37557 else if (mode
== DFmode
)
37562 mask
= ix86_build_signbit_mask (vmode
, VECTOR_MODE_P (mode
), false);
37563 if (!VECTOR_MODE_P (mode
))
37565 /* We need to generate a scalar mode mask in this case. */
37566 rtx tmp
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (1, const0_rtx
));
37567 tmp
= gen_rtx_VEC_SELECT (mode
, mask
, tmp
);
37568 mask
= gen_reg_rtx (mode
);
37569 emit_insn (gen_rtx_SET (VOIDmode
, mask
, tmp
));
37573 mask
= gen_rtx_NOT (mode
, mask
);
37574 emit_insn (gen_rtx_SET (VOIDmode
, sgn
,
37575 gen_rtx_AND (mode
, mask
, sign
)));
37576 emit_insn (gen_rtx_SET (VOIDmode
, result
,
37577 gen_rtx_IOR (mode
, abs_value
, sgn
)));
37580 /* Expand fabs (OP0) and return a new rtx that holds the result. The
37581 mask for masking out the sign-bit is stored in *SMASK, if that is
37584 ix86_expand_sse_fabs (rtx op0
, rtx
*smask
)
37586 enum machine_mode vmode
, mode
= GET_MODE (op0
);
37589 xa
= gen_reg_rtx (mode
);
37590 if (mode
== SFmode
)
37592 else if (mode
== DFmode
)
37596 mask
= ix86_build_signbit_mask (vmode
, VECTOR_MODE_P (mode
), true);
37597 if (!VECTOR_MODE_P (mode
))
37599 /* We need to generate a scalar mode mask in this case. */
37600 rtx tmp
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (1, const0_rtx
));
37601 tmp
= gen_rtx_VEC_SELECT (mode
, mask
, tmp
);
37602 mask
= gen_reg_rtx (mode
);
37603 emit_insn (gen_rtx_SET (VOIDmode
, mask
, tmp
));
37605 emit_insn (gen_rtx_SET (VOIDmode
, xa
,
37606 gen_rtx_AND (mode
, op0
, mask
)));
37614 /* Expands a comparison of OP0 with OP1 using comparison code CODE,
37615 swapping the operands if SWAP_OPERANDS is true. The expanded
37616 code is a forward jump to a newly created label in case the
37617 comparison is true. The generated label rtx is returned. */
37619 ix86_expand_sse_compare_and_jump (enum rtx_code code
, rtx op0
, rtx op1
,
37620 bool swap_operands
)
37631 label
= gen_label_rtx ();
37632 tmp
= gen_rtx_REG (CCFPUmode
, FLAGS_REG
);
37633 emit_insn (gen_rtx_SET (VOIDmode
, tmp
,
37634 gen_rtx_COMPARE (CCFPUmode
, op0
, op1
)));
37635 tmp
= gen_rtx_fmt_ee (code
, VOIDmode
, tmp
, const0_rtx
);
37636 tmp
= gen_rtx_IF_THEN_ELSE (VOIDmode
, tmp
,
37637 gen_rtx_LABEL_REF (VOIDmode
, label
), pc_rtx
);
37638 tmp
= emit_jump_insn (gen_rtx_SET (VOIDmode
, pc_rtx
, tmp
));
37639 JUMP_LABEL (tmp
) = label
;
37644 /* Expand a mask generating SSE comparison instruction comparing OP0 with OP1
37645 using comparison code CODE. Operands are swapped for the comparison if
37646 SWAP_OPERANDS is true. Returns a rtx for the generated mask. */
37648 ix86_expand_sse_compare_mask (enum rtx_code code
, rtx op0
, rtx op1
,
37649 bool swap_operands
)
37651 rtx (*insn
)(rtx
, rtx
, rtx
, rtx
);
37652 enum machine_mode mode
= GET_MODE (op0
);
37653 rtx mask
= gen_reg_rtx (mode
);
37662 insn
= mode
== DFmode
? gen_setcc_df_sse
: gen_setcc_sf_sse
;
37664 emit_insn (insn (mask
, op0
, op1
,
37665 gen_rtx_fmt_ee (code
, mode
, op0
, op1
)));
37669 /* Generate and return a rtx of mode MODE for 2**n where n is the number
37670 of bits of the mantissa of MODE, which must be one of DFmode or SFmode. */
37672 ix86_gen_TWO52 (enum machine_mode mode
)
37674 REAL_VALUE_TYPE TWO52r
;
37677 real_ldexp (&TWO52r
, &dconst1
, mode
== DFmode
? 52 : 23);
37678 TWO52
= const_double_from_real_value (TWO52r
, mode
);
37679 TWO52
= force_reg (mode
, TWO52
);
37684 /* Expand SSE sequence for computing lround from OP1 storing
37687 ix86_expand_lround (rtx op0
, rtx op1
)
37689 /* C code for the stuff we're doing below:
37690 tmp = op1 + copysign (nextafter (0.5, 0.0), op1)
37693 enum machine_mode mode
= GET_MODE (op1
);
37694 const struct real_format
*fmt
;
37695 REAL_VALUE_TYPE pred_half
, half_minus_pred_half
;
37698 /* load nextafter (0.5, 0.0) */
37699 fmt
= REAL_MODE_FORMAT (mode
);
37700 real_2expN (&half_minus_pred_half
, -(fmt
->p
) - 1, mode
);
37701 REAL_ARITHMETIC (pred_half
, MINUS_EXPR
, dconsthalf
, half_minus_pred_half
);
37703 /* adj = copysign (0.5, op1) */
37704 adj
= force_reg (mode
, const_double_from_real_value (pred_half
, mode
));
37705 ix86_sse_copysign_to_positive (adj
, adj
, force_reg (mode
, op1
), NULL_RTX
);
37707 /* adj = op1 + adj */
37708 adj
= expand_simple_binop (mode
, PLUS
, adj
, op1
, NULL_RTX
, 0, OPTAB_DIRECT
);
37710 /* op0 = (imode)adj */
37711 expand_fix (op0
, adj
, 0);
37714 /* Expand SSE2 sequence for computing lround from OPERAND1 storing
37717 ix86_expand_lfloorceil (rtx op0
, rtx op1
, bool do_floor
)
37719 /* C code for the stuff we're doing below (for do_floor):
37721 xi -= (double)xi > op1 ? 1 : 0;
37724 enum machine_mode fmode
= GET_MODE (op1
);
37725 enum machine_mode imode
= GET_MODE (op0
);
37726 rtx ireg
, freg
, label
, tmp
;
37728 /* reg = (long)op1 */
37729 ireg
= gen_reg_rtx (imode
);
37730 expand_fix (ireg
, op1
, 0);
37732 /* freg = (double)reg */
37733 freg
= gen_reg_rtx (fmode
);
37734 expand_float (freg
, ireg
, 0);
37736 /* ireg = (freg > op1) ? ireg - 1 : ireg */
37737 label
= ix86_expand_sse_compare_and_jump (UNLE
,
37738 freg
, op1
, !do_floor
);
37739 tmp
= expand_simple_binop (imode
, do_floor
? MINUS
: PLUS
,
37740 ireg
, const1_rtx
, NULL_RTX
, 0, OPTAB_DIRECT
);
37741 emit_move_insn (ireg
, tmp
);
37743 emit_label (label
);
37744 LABEL_NUSES (label
) = 1;
37746 emit_move_insn (op0
, ireg
);
37749 /* Expand rint (IEEE round to nearest) rounding OPERAND1 and storing the
37750 result in OPERAND0. */
37752 ix86_expand_rint (rtx operand0
, rtx operand1
)
37754 /* C code for the stuff we're doing below:
37755 xa = fabs (operand1);
37756 if (!isless (xa, 2**52))
37758 xa = xa + 2**52 - 2**52;
37759 return copysign (xa, operand1);
37761 enum machine_mode mode
= GET_MODE (operand0
);
37762 rtx res
, xa
, label
, TWO52
, mask
;
37764 res
= gen_reg_rtx (mode
);
37765 emit_move_insn (res
, operand1
);
37767 /* xa = abs (operand1) */
37768 xa
= ix86_expand_sse_fabs (res
, &mask
);
37770 /* if (!isless (xa, TWO52)) goto label; */
37771 TWO52
= ix86_gen_TWO52 (mode
);
37772 label
= ix86_expand_sse_compare_and_jump (UNLE
, TWO52
, xa
, false);
37774 xa
= expand_simple_binop (mode
, PLUS
, xa
, TWO52
, NULL_RTX
, 0, OPTAB_DIRECT
);
37775 xa
= expand_simple_binop (mode
, MINUS
, xa
, TWO52
, xa
, 0, OPTAB_DIRECT
);
37777 ix86_sse_copysign_to_positive (res
, xa
, res
, mask
);
37779 emit_label (label
);
37780 LABEL_NUSES (label
) = 1;
37782 emit_move_insn (operand0
, res
);
37785 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
37788 ix86_expand_floorceildf_32 (rtx operand0
, rtx operand1
, bool do_floor
)
37790 /* C code for the stuff we expand below.
37791 double xa = fabs (x), x2;
37792 if (!isless (xa, TWO52))
37794 xa = xa + TWO52 - TWO52;
37795 x2 = copysign (xa, x);
37804 enum machine_mode mode
= GET_MODE (operand0
);
37805 rtx xa
, TWO52
, tmp
, label
, one
, res
, mask
;
37807 TWO52
= ix86_gen_TWO52 (mode
);
37809 /* Temporary for holding the result, initialized to the input
37810 operand to ease control flow. */
37811 res
= gen_reg_rtx (mode
);
37812 emit_move_insn (res
, operand1
);
37814 /* xa = abs (operand1) */
37815 xa
= ix86_expand_sse_fabs (res
, &mask
);
37817 /* if (!isless (xa, TWO52)) goto label; */
37818 label
= ix86_expand_sse_compare_and_jump (UNLE
, TWO52
, xa
, false);
37820 /* xa = xa + TWO52 - TWO52; */
37821 xa
= expand_simple_binop (mode
, PLUS
, xa
, TWO52
, NULL_RTX
, 0, OPTAB_DIRECT
);
37822 xa
= expand_simple_binop (mode
, MINUS
, xa
, TWO52
, xa
, 0, OPTAB_DIRECT
);
37824 /* xa = copysign (xa, operand1) */
37825 ix86_sse_copysign_to_positive (xa
, xa
, res
, mask
);
37827 /* generate 1.0 or -1.0 */
37828 one
= force_reg (mode
,
37829 const_double_from_real_value (do_floor
37830 ? dconst1
: dconstm1
, mode
));
37832 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
37833 tmp
= ix86_expand_sse_compare_mask (UNGT
, xa
, res
, !do_floor
);
37834 emit_insn (gen_rtx_SET (VOIDmode
, tmp
,
37835 gen_rtx_AND (mode
, one
, tmp
)));
37836 /* We always need to subtract here to preserve signed zero. */
37837 tmp
= expand_simple_binop (mode
, MINUS
,
37838 xa
, tmp
, NULL_RTX
, 0, OPTAB_DIRECT
);
37839 emit_move_insn (res
, tmp
);
37841 emit_label (label
);
37842 LABEL_NUSES (label
) = 1;
37844 emit_move_insn (operand0
, res
);
37847 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
37850 ix86_expand_floorceil (rtx operand0
, rtx operand1
, bool do_floor
)
37852 /* C code for the stuff we expand below.
37853 double xa = fabs (x), x2;
37854 if (!isless (xa, TWO52))
37856 x2 = (double)(long)x;
37863 if (HONOR_SIGNED_ZEROS (mode))
37864 return copysign (x2, x);
37867 enum machine_mode mode
= GET_MODE (operand0
);
37868 rtx xa
, xi
, TWO52
, tmp
, label
, one
, res
, mask
;
37870 TWO52
= ix86_gen_TWO52 (mode
);
37872 /* Temporary for holding the result, initialized to the input
37873 operand to ease control flow. */
37874 res
= gen_reg_rtx (mode
);
37875 emit_move_insn (res
, operand1
);
37877 /* xa = abs (operand1) */
37878 xa
= ix86_expand_sse_fabs (res
, &mask
);
37880 /* if (!isless (xa, TWO52)) goto label; */
37881 label
= ix86_expand_sse_compare_and_jump (UNLE
, TWO52
, xa
, false);
37883 /* xa = (double)(long)x */
37884 xi
= gen_reg_rtx (mode
== DFmode
? DImode
: SImode
);
37885 expand_fix (xi
, res
, 0);
37886 expand_float (xa
, xi
, 0);
37889 one
= force_reg (mode
, const_double_from_real_value (dconst1
, mode
));
37891 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
37892 tmp
= ix86_expand_sse_compare_mask (UNGT
, xa
, res
, !do_floor
);
37893 emit_insn (gen_rtx_SET (VOIDmode
, tmp
,
37894 gen_rtx_AND (mode
, one
, tmp
)));
37895 tmp
= expand_simple_binop (mode
, do_floor
? MINUS
: PLUS
,
37896 xa
, tmp
, NULL_RTX
, 0, OPTAB_DIRECT
);
37897 emit_move_insn (res
, tmp
);
37899 if (HONOR_SIGNED_ZEROS (mode
))
37900 ix86_sse_copysign_to_positive (res
, res
, force_reg (mode
, operand1
), mask
);
37902 emit_label (label
);
37903 LABEL_NUSES (label
) = 1;
37905 emit_move_insn (operand0
, res
);
37908 /* Expand SSE sequence for computing round from OPERAND1 storing
37909 into OPERAND0. Sequence that works without relying on DImode truncation
37910 via cvttsd2siq that is only available on 64bit targets. */
37912 ix86_expand_rounddf_32 (rtx operand0
, rtx operand1
)
37914 /* C code for the stuff we expand below.
37915 double xa = fabs (x), xa2, x2;
37916 if (!isless (xa, TWO52))
37918 Using the absolute value and copying back sign makes
37919 -0.0 -> -0.0 correct.
37920 xa2 = xa + TWO52 - TWO52;
37925 else if (dxa > 0.5)
37927 x2 = copysign (xa2, x);
37930 enum machine_mode mode
= GET_MODE (operand0
);
37931 rtx xa
, xa2
, dxa
, TWO52
, tmp
, label
, half
, mhalf
, one
, res
, mask
;
37933 TWO52
= ix86_gen_TWO52 (mode
);
37935 /* Temporary for holding the result, initialized to the input
37936 operand to ease control flow. */
37937 res
= gen_reg_rtx (mode
);
37938 emit_move_insn (res
, operand1
);
37940 /* xa = abs (operand1) */
37941 xa
= ix86_expand_sse_fabs (res
, &mask
);
37943 /* if (!isless (xa, TWO52)) goto label; */
37944 label
= ix86_expand_sse_compare_and_jump (UNLE
, TWO52
, xa
, false);
37946 /* xa2 = xa + TWO52 - TWO52; */
37947 xa2
= expand_simple_binop (mode
, PLUS
, xa
, TWO52
, NULL_RTX
, 0, OPTAB_DIRECT
);
37948 xa2
= expand_simple_binop (mode
, MINUS
, xa2
, TWO52
, xa2
, 0, OPTAB_DIRECT
);
37950 /* dxa = xa2 - xa; */
37951 dxa
= expand_simple_binop (mode
, MINUS
, xa2
, xa
, NULL_RTX
, 0, OPTAB_DIRECT
);
37953 /* generate 0.5, 1.0 and -0.5 */
37954 half
= force_reg (mode
, const_double_from_real_value (dconsthalf
, mode
));
37955 one
= expand_simple_binop (mode
, PLUS
, half
, half
, NULL_RTX
, 0, OPTAB_DIRECT
);
37956 mhalf
= expand_simple_binop (mode
, MINUS
, half
, one
, NULL_RTX
,
37960 tmp
= gen_reg_rtx (mode
);
37961 /* xa2 = xa2 - (dxa > 0.5 ? 1 : 0) */
37962 tmp
= ix86_expand_sse_compare_mask (UNGT
, dxa
, half
, false);
37963 emit_insn (gen_rtx_SET (VOIDmode
, tmp
,
37964 gen_rtx_AND (mode
, one
, tmp
)));
37965 xa2
= expand_simple_binop (mode
, MINUS
, xa2
, tmp
, NULL_RTX
, 0, OPTAB_DIRECT
);
37966 /* xa2 = xa2 + (dxa <= -0.5 ? 1 : 0) */
37967 tmp
= ix86_expand_sse_compare_mask (UNGE
, mhalf
, dxa
, false);
37968 emit_insn (gen_rtx_SET (VOIDmode
, tmp
,
37969 gen_rtx_AND (mode
, one
, tmp
)));
37970 xa2
= expand_simple_binop (mode
, PLUS
, xa2
, tmp
, NULL_RTX
, 0, OPTAB_DIRECT
);
37972 /* res = copysign (xa2, operand1) */
37973 ix86_sse_copysign_to_positive (res
, xa2
, force_reg (mode
, operand1
), mask
);
37975 emit_label (label
);
37976 LABEL_NUSES (label
) = 1;
37978 emit_move_insn (operand0
, res
);
37981 /* Expand SSE sequence for computing trunc from OPERAND1 storing
37984 ix86_expand_trunc (rtx operand0
, rtx operand1
)
37986 /* C code for SSE variant we expand below.
37987 double xa = fabs (x), x2;
37988 if (!isless (xa, TWO52))
37990 x2 = (double)(long)x;
37991 if (HONOR_SIGNED_ZEROS (mode))
37992 return copysign (x2, x);
37995 enum machine_mode mode
= GET_MODE (operand0
);
37996 rtx xa
, xi
, TWO52
, label
, res
, mask
;
37998 TWO52
= ix86_gen_TWO52 (mode
);
38000 /* Temporary for holding the result, initialized to the input
38001 operand to ease control flow. */
38002 res
= gen_reg_rtx (mode
);
38003 emit_move_insn (res
, operand1
);
38005 /* xa = abs (operand1) */
38006 xa
= ix86_expand_sse_fabs (res
, &mask
);
38008 /* if (!isless (xa, TWO52)) goto label; */
38009 label
= ix86_expand_sse_compare_and_jump (UNLE
, TWO52
, xa
, false);
38011 /* x = (double)(long)x */
38012 xi
= gen_reg_rtx (mode
== DFmode
? DImode
: SImode
);
38013 expand_fix (xi
, res
, 0);
38014 expand_float (res
, xi
, 0);
38016 if (HONOR_SIGNED_ZEROS (mode
))
38017 ix86_sse_copysign_to_positive (res
, res
, force_reg (mode
, operand1
), mask
);
38019 emit_label (label
);
38020 LABEL_NUSES (label
) = 1;
38022 emit_move_insn (operand0
, res
);
38025 /* Expand SSE sequence for computing trunc from OPERAND1 storing
38028 ix86_expand_truncdf_32 (rtx operand0
, rtx operand1
)
38030 enum machine_mode mode
= GET_MODE (operand0
);
38031 rtx xa
, mask
, TWO52
, label
, one
, res
, smask
, tmp
;
38033 /* C code for SSE variant we expand below.
38034 double xa = fabs (x), x2;
38035 if (!isless (xa, TWO52))
38037 xa2 = xa + TWO52 - TWO52;
38041 x2 = copysign (xa2, x);
38045 TWO52
= ix86_gen_TWO52 (mode
);
38047 /* Temporary for holding the result, initialized to the input
38048 operand to ease control flow. */
38049 res
= gen_reg_rtx (mode
);
38050 emit_move_insn (res
, operand1
);
38052 /* xa = abs (operand1) */
38053 xa
= ix86_expand_sse_fabs (res
, &smask
);
38055 /* if (!isless (xa, TWO52)) goto label; */
38056 label
= ix86_expand_sse_compare_and_jump (UNLE
, TWO52
, xa
, false);
38058 /* res = xa + TWO52 - TWO52; */
38059 tmp
= expand_simple_binop (mode
, PLUS
, xa
, TWO52
, NULL_RTX
, 0, OPTAB_DIRECT
);
38060 tmp
= expand_simple_binop (mode
, MINUS
, tmp
, TWO52
, tmp
, 0, OPTAB_DIRECT
);
38061 emit_move_insn (res
, tmp
);
38064 one
= force_reg (mode
, const_double_from_real_value (dconst1
, mode
));
38066 /* Compensate: res = xa2 - (res > xa ? 1 : 0) */
38067 mask
= ix86_expand_sse_compare_mask (UNGT
, res
, xa
, false);
38068 emit_insn (gen_rtx_SET (VOIDmode
, mask
,
38069 gen_rtx_AND (mode
, mask
, one
)));
38070 tmp
= expand_simple_binop (mode
, MINUS
,
38071 res
, mask
, NULL_RTX
, 0, OPTAB_DIRECT
);
38072 emit_move_insn (res
, tmp
);
38074 /* res = copysign (res, operand1) */
38075 ix86_sse_copysign_to_positive (res
, res
, force_reg (mode
, operand1
), smask
);
38077 emit_label (label
);
38078 LABEL_NUSES (label
) = 1;
38080 emit_move_insn (operand0
, res
);
38083 /* Expand SSE sequence for computing round from OPERAND1 storing
38086 ix86_expand_round (rtx operand0
, rtx operand1
)
38088 /* C code for the stuff we're doing below:
38089 double xa = fabs (x);
38090 if (!isless (xa, TWO52))
38092 xa = (double)(long)(xa + nextafter (0.5, 0.0));
38093 return copysign (xa, x);
38095 enum machine_mode mode
= GET_MODE (operand0
);
38096 rtx res
, TWO52
, xa
, label
, xi
, half
, mask
;
38097 const struct real_format
*fmt
;
38098 REAL_VALUE_TYPE pred_half
, half_minus_pred_half
;
38100 /* Temporary for holding the result, initialized to the input
38101 operand to ease control flow. */
38102 res
= gen_reg_rtx (mode
);
38103 emit_move_insn (res
, operand1
);
38105 TWO52
= ix86_gen_TWO52 (mode
);
38106 xa
= ix86_expand_sse_fabs (res
, &mask
);
38107 label
= ix86_expand_sse_compare_and_jump (UNLE
, TWO52
, xa
, false);
38109 /* load nextafter (0.5, 0.0) */
38110 fmt
= REAL_MODE_FORMAT (mode
);
38111 real_2expN (&half_minus_pred_half
, -(fmt
->p
) - 1, mode
);
38112 REAL_ARITHMETIC (pred_half
, MINUS_EXPR
, dconsthalf
, half_minus_pred_half
);
38114 /* xa = xa + 0.5 */
38115 half
= force_reg (mode
, const_double_from_real_value (pred_half
, mode
));
38116 xa
= expand_simple_binop (mode
, PLUS
, xa
, half
, NULL_RTX
, 0, OPTAB_DIRECT
);
38118 /* xa = (double)(int64_t)xa */
38119 xi
= gen_reg_rtx (mode
== DFmode
? DImode
: SImode
);
38120 expand_fix (xi
, xa
, 0);
38121 expand_float (xa
, xi
, 0);
38123 /* res = copysign (xa, operand1) */
38124 ix86_sse_copysign_to_positive (res
, xa
, force_reg (mode
, operand1
), mask
);
38126 emit_label (label
);
38127 LABEL_NUSES (label
) = 1;
38129 emit_move_insn (operand0
, res
);
38132 /* Expand SSE sequence for computing round
38133 from OP1 storing into OP0 using sse4 round insn. */
38135 ix86_expand_round_sse4 (rtx op0
, rtx op1
)
38137 enum machine_mode mode
= GET_MODE (op0
);
38138 rtx e1
, e2
, res
, half
;
38139 const struct real_format
*fmt
;
38140 REAL_VALUE_TYPE pred_half
, half_minus_pred_half
;
38141 rtx (*gen_copysign
) (rtx
, rtx
, rtx
);
38142 rtx (*gen_round
) (rtx
, rtx
, rtx
);
38147 gen_copysign
= gen_copysignsf3
;
38148 gen_round
= gen_sse4_1_roundsf2
;
38151 gen_copysign
= gen_copysigndf3
;
38152 gen_round
= gen_sse4_1_rounddf2
;
38155 gcc_unreachable ();
38158 /* round (a) = trunc (a + copysign (0.5, a)) */
38160 /* load nextafter (0.5, 0.0) */
38161 fmt
= REAL_MODE_FORMAT (mode
);
38162 real_2expN (&half_minus_pred_half
, -(fmt
->p
) - 1, mode
);
38163 REAL_ARITHMETIC (pred_half
, MINUS_EXPR
, dconsthalf
, half_minus_pred_half
);
38164 half
= const_double_from_real_value (pred_half
, mode
);
38166 /* e1 = copysign (0.5, op1) */
38167 e1
= gen_reg_rtx (mode
);
38168 emit_insn (gen_copysign (e1
, half
, op1
));
38170 /* e2 = op1 + e1 */
38171 e2
= expand_simple_binop (mode
, PLUS
, op1
, e1
, NULL_RTX
, 0, OPTAB_DIRECT
);
38173 /* res = trunc (e2) */
38174 res
= gen_reg_rtx (mode
);
38175 emit_insn (gen_round (res
, e2
, GEN_INT (ROUND_TRUNC
)));
38177 emit_move_insn (op0
, res
);
38181 /* Table of valid machine attributes. */
38182 static const struct attribute_spec ix86_attribute_table
[] =
38184 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
38185 affects_type_identity } */
38186 /* Stdcall attribute says callee is responsible for popping arguments
38187 if they are not variable. */
38188 { "stdcall", 0, 0, false, true, true, ix86_handle_cconv_attribute
,
38190 /* Fastcall attribute says callee is responsible for popping arguments
38191 if they are not variable. */
38192 { "fastcall", 0, 0, false, true, true, ix86_handle_cconv_attribute
,
38194 /* Thiscall attribute says callee is responsible for popping arguments
38195 if they are not variable. */
38196 { "thiscall", 0, 0, false, true, true, ix86_handle_cconv_attribute
,
38198 /* Cdecl attribute says the callee is a normal C declaration */
38199 { "cdecl", 0, 0, false, true, true, ix86_handle_cconv_attribute
,
38201 /* Regparm attribute specifies how many integer arguments are to be
38202 passed in registers. */
38203 { "regparm", 1, 1, false, true, true, ix86_handle_cconv_attribute
,
38205 /* Sseregparm attribute says we are using x86_64 calling conventions
38206 for FP arguments. */
38207 { "sseregparm", 0, 0, false, true, true, ix86_handle_cconv_attribute
,
38209 /* The transactional memory builtins are implicitly regparm or fastcall
38210 depending on the ABI. Override the generic do-nothing attribute that
38211 these builtins were declared with. */
38212 { "*tm regparm", 0, 0, false, true, true, ix86_handle_tm_regparm_attribute
,
38214 /* force_align_arg_pointer says this function realigns the stack at entry. */
38215 { (const char *)&ix86_force_align_arg_pointer_string
, 0, 0,
38216 false, true, true, ix86_handle_cconv_attribute
, false },
38217 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
38218 { "dllimport", 0, 0, false, false, false, handle_dll_attribute
, false },
38219 { "dllexport", 0, 0, false, false, false, handle_dll_attribute
, false },
38220 { "shared", 0, 0, true, false, false, ix86_handle_shared_attribute
,
38223 { "ms_struct", 0, 0, false, false, false, ix86_handle_struct_attribute
,
38225 { "gcc_struct", 0, 0, false, false, false, ix86_handle_struct_attribute
,
38227 #ifdef SUBTARGET_ATTRIBUTE_TABLE
38228 SUBTARGET_ATTRIBUTE_TABLE
,
38230 /* ms_abi and sysv_abi calling convention function attributes. */
38231 { "ms_abi", 0, 0, false, true, true, ix86_handle_abi_attribute
, true },
38232 { "sysv_abi", 0, 0, false, true, true, ix86_handle_abi_attribute
, true },
38233 { "ms_hook_prologue", 0, 0, true, false, false, ix86_handle_fndecl_attribute
,
38235 { "callee_pop_aggregate_return", 1, 1, false, true, true,
38236 ix86_handle_callee_pop_aggregate_return
, true },
38238 { NULL
, 0, 0, false, false, false, NULL
, false }
38241 /* Implement targetm.vectorize.builtin_vectorization_cost. */
38243 ix86_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost
,
38245 int misalign ATTRIBUTE_UNUSED
)
38249 switch (type_of_cost
)
38252 return ix86_cost
->scalar_stmt_cost
;
38255 return ix86_cost
->scalar_load_cost
;
38258 return ix86_cost
->scalar_store_cost
;
38261 return ix86_cost
->vec_stmt_cost
;
38264 return ix86_cost
->vec_align_load_cost
;
38267 return ix86_cost
->vec_store_cost
;
38269 case vec_to_scalar
:
38270 return ix86_cost
->vec_to_scalar_cost
;
38272 case scalar_to_vec
:
38273 return ix86_cost
->scalar_to_vec_cost
;
38275 case unaligned_load
:
38276 case unaligned_store
:
38277 return ix86_cost
->vec_unalign_load_cost
;
38279 case cond_branch_taken
:
38280 return ix86_cost
->cond_taken_branch_cost
;
38282 case cond_branch_not_taken
:
38283 return ix86_cost
->cond_not_taken_branch_cost
;
38286 case vec_promote_demote
:
38287 return ix86_cost
->vec_stmt_cost
;
38289 case vec_construct
:
38290 elements
= TYPE_VECTOR_SUBPARTS (vectype
);
38291 return elements
/ 2 + 1;
38294 gcc_unreachable ();
38298 /* A cached (set (nil) (vselect (vconcat (nil) (nil)) (parallel [])))
38299 insn, so that expand_vselect{,_vconcat} doesn't have to create a fresh
38300 insn every time. */
38302 static GTY(()) rtx vselect_insn
;
38304 /* Initialize vselect_insn. */
38307 init_vselect_insn (void)
38312 x
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (MAX_VECT_LEN
));
38313 for (i
= 0; i
< MAX_VECT_LEN
; ++i
)
38314 XVECEXP (x
, 0, i
) = const0_rtx
;
38315 x
= gen_rtx_VEC_SELECT (V2DFmode
, gen_rtx_VEC_CONCAT (V4DFmode
, const0_rtx
,
38317 x
= gen_rtx_SET (VOIDmode
, const0_rtx
, x
);
38319 vselect_insn
= emit_insn (x
);
38323 /* Construct (set target (vec_select op0 (parallel perm))) and
38324 return true if that's a valid instruction in the active ISA. */
38327 expand_vselect (rtx target
, rtx op0
, const unsigned char *perm
,
38328 unsigned nelt
, bool testing_p
)
38331 rtx x
, save_vconcat
;
38334 if (vselect_insn
== NULL_RTX
)
38335 init_vselect_insn ();
38337 x
= XEXP (SET_SRC (PATTERN (vselect_insn
)), 1);
38338 PUT_NUM_ELEM (XVEC (x
, 0), nelt
);
38339 for (i
= 0; i
< nelt
; ++i
)
38340 XVECEXP (x
, 0, i
) = GEN_INT (perm
[i
]);
38341 save_vconcat
= XEXP (SET_SRC (PATTERN (vselect_insn
)), 0);
38342 XEXP (SET_SRC (PATTERN (vselect_insn
)), 0) = op0
;
38343 PUT_MODE (SET_SRC (PATTERN (vselect_insn
)), GET_MODE (target
));
38344 SET_DEST (PATTERN (vselect_insn
)) = target
;
38345 icode
= recog_memoized (vselect_insn
);
38347 if (icode
>= 0 && !testing_p
)
38348 emit_insn (copy_rtx (PATTERN (vselect_insn
)));
38350 SET_DEST (PATTERN (vselect_insn
)) = const0_rtx
;
38351 XEXP (SET_SRC (PATTERN (vselect_insn
)), 0) = save_vconcat
;
38352 INSN_CODE (vselect_insn
) = -1;
38357 /* Similar, but generate a vec_concat from op0 and op1 as well. */
38360 expand_vselect_vconcat (rtx target
, rtx op0
, rtx op1
,
38361 const unsigned char *perm
, unsigned nelt
,
38364 enum machine_mode v2mode
;
38368 if (vselect_insn
== NULL_RTX
)
38369 init_vselect_insn ();
38371 v2mode
= GET_MODE_2XWIDER_MODE (GET_MODE (op0
));
38372 x
= XEXP (SET_SRC (PATTERN (vselect_insn
)), 0);
38373 PUT_MODE (x
, v2mode
);
38376 ok
= expand_vselect (target
, x
, perm
, nelt
, testing_p
);
38377 XEXP (x
, 0) = const0_rtx
;
38378 XEXP (x
, 1) = const0_rtx
;
38382 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
38383 in terms of blendp[sd] / pblendw / pblendvb / vpblendd. */
38386 expand_vec_perm_blend (struct expand_vec_perm_d
*d
)
38388 enum machine_mode vmode
= d
->vmode
;
38389 unsigned i
, mask
, nelt
= d
->nelt
;
38390 rtx target
, op0
, op1
, x
;
38391 rtx rperm
[32], vperm
;
38393 if (d
->one_operand_p
)
38395 if (TARGET_AVX2
&& GET_MODE_SIZE (vmode
) == 32)
38397 else if (TARGET_AVX
&& (vmode
== V4DFmode
|| vmode
== V8SFmode
))
38399 else if (TARGET_SSE4_1
&& GET_MODE_SIZE (vmode
) == 16)
38404 /* This is a blend, not a permute. Elements must stay in their
38405 respective lanes. */
38406 for (i
= 0; i
< nelt
; ++i
)
38408 unsigned e
= d
->perm
[i
];
38409 if (!(e
== i
|| e
== i
+ nelt
))
38416 /* ??? Without SSE4.1, we could implement this with and/andn/or. This
38417 decision should be extracted elsewhere, so that we only try that
38418 sequence once all budget==3 options have been tried. */
38419 target
= d
->target
;
38432 for (i
= 0; i
< nelt
; ++i
)
38433 mask
|= (d
->perm
[i
] >= nelt
) << i
;
38437 for (i
= 0; i
< 2; ++i
)
38438 mask
|= (d
->perm
[i
] >= 2 ? 15 : 0) << (i
* 4);
38443 for (i
= 0; i
< 4; ++i
)
38444 mask
|= (d
->perm
[i
] >= 4 ? 3 : 0) << (i
* 2);
38449 /* See if bytes move in pairs so we can use pblendw with
38450 an immediate argument, rather than pblendvb with a vector
38452 for (i
= 0; i
< 16; i
+= 2)
38453 if (d
->perm
[i
] + 1 != d
->perm
[i
+ 1])
38456 for (i
= 0; i
< nelt
; ++i
)
38457 rperm
[i
] = (d
->perm
[i
] < nelt
? const0_rtx
: constm1_rtx
);
38460 vperm
= gen_rtx_CONST_VECTOR (vmode
, gen_rtvec_v (nelt
, rperm
));
38461 vperm
= force_reg (vmode
, vperm
);
38463 if (GET_MODE_SIZE (vmode
) == 16)
38464 emit_insn (gen_sse4_1_pblendvb (target
, op0
, op1
, vperm
));
38466 emit_insn (gen_avx2_pblendvb (target
, op0
, op1
, vperm
));
38470 for (i
= 0; i
< 8; ++i
)
38471 mask
|= (d
->perm
[i
* 2] >= 16) << i
;
38476 target
= gen_lowpart (vmode
, target
);
38477 op0
= gen_lowpart (vmode
, op0
);
38478 op1
= gen_lowpart (vmode
, op1
);
38482 /* See if bytes move in pairs. If not, vpblendvb must be used. */
38483 for (i
= 0; i
< 32; i
+= 2)
38484 if (d
->perm
[i
] + 1 != d
->perm
[i
+ 1])
38486 /* See if bytes move in quadruplets. If yes, vpblendd
38487 with immediate can be used. */
38488 for (i
= 0; i
< 32; i
+= 4)
38489 if (d
->perm
[i
] + 2 != d
->perm
[i
+ 2])
38493 /* See if bytes move the same in both lanes. If yes,
38494 vpblendw with immediate can be used. */
38495 for (i
= 0; i
< 16; i
+= 2)
38496 if (d
->perm
[i
] + 16 != d
->perm
[i
+ 16])
38499 /* Use vpblendw. */
38500 for (i
= 0; i
< 16; ++i
)
38501 mask
|= (d
->perm
[i
* 2] >= 32) << i
;
38506 /* Use vpblendd. */
38507 for (i
= 0; i
< 8; ++i
)
38508 mask
|= (d
->perm
[i
* 4] >= 32) << i
;
38513 /* See if words move in pairs. If yes, vpblendd can be used. */
38514 for (i
= 0; i
< 16; i
+= 2)
38515 if (d
->perm
[i
] + 1 != d
->perm
[i
+ 1])
38519 /* See if words move the same in both lanes. If not,
38520 vpblendvb must be used. */
38521 for (i
= 0; i
< 8; i
++)
38522 if (d
->perm
[i
] + 8 != d
->perm
[i
+ 8])
38524 /* Use vpblendvb. */
38525 for (i
= 0; i
< 32; ++i
)
38526 rperm
[i
] = (d
->perm
[i
/ 2] < 16 ? const0_rtx
: constm1_rtx
);
38530 target
= gen_lowpart (vmode
, target
);
38531 op0
= gen_lowpart (vmode
, op0
);
38532 op1
= gen_lowpart (vmode
, op1
);
38533 goto finish_pblendvb
;
38536 /* Use vpblendw. */
38537 for (i
= 0; i
< 16; ++i
)
38538 mask
|= (d
->perm
[i
] >= 16) << i
;
38542 /* Use vpblendd. */
38543 for (i
= 0; i
< 8; ++i
)
38544 mask
|= (d
->perm
[i
* 2] >= 16) << i
;
38549 /* Use vpblendd. */
38550 for (i
= 0; i
< 4; ++i
)
38551 mask
|= (d
->perm
[i
] >= 4 ? 3 : 0) << (i
* 2);
38556 gcc_unreachable ();
38559 /* This matches five different patterns with the different modes. */
38560 x
= gen_rtx_VEC_MERGE (vmode
, op1
, op0
, GEN_INT (mask
));
38561 x
= gen_rtx_SET (VOIDmode
, target
, x
);
38567 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
38568 in terms of the variable form of vpermilps.
38570 Note that we will have already failed the immediate input vpermilps,
38571 which requires that the high and low part shuffle be identical; the
38572 variable form doesn't require that. */
38575 expand_vec_perm_vpermil (struct expand_vec_perm_d
*d
)
38577 rtx rperm
[8], vperm
;
38580 if (!TARGET_AVX
|| d
->vmode
!= V8SFmode
|| !d
->one_operand_p
)
38583 /* We can only permute within the 128-bit lane. */
38584 for (i
= 0; i
< 8; ++i
)
38586 unsigned e
= d
->perm
[i
];
38587 if (i
< 4 ? e
>= 4 : e
< 4)
38594 for (i
= 0; i
< 8; ++i
)
38596 unsigned e
= d
->perm
[i
];
38598 /* Within each 128-bit lane, the elements of op0 are numbered
38599 from 0 and the elements of op1 are numbered from 4. */
38605 rperm
[i
] = GEN_INT (e
);
38608 vperm
= gen_rtx_CONST_VECTOR (V8SImode
, gen_rtvec_v (8, rperm
));
38609 vperm
= force_reg (V8SImode
, vperm
);
38610 emit_insn (gen_avx_vpermilvarv8sf3 (d
->target
, d
->op0
, vperm
));
38615 /* Return true if permutation D can be performed as VMODE permutation
38619 valid_perm_using_mode_p (enum machine_mode vmode
, struct expand_vec_perm_d
*d
)
38621 unsigned int i
, j
, chunk
;
38623 if (GET_MODE_CLASS (vmode
) != MODE_VECTOR_INT
38624 || GET_MODE_CLASS (d
->vmode
) != MODE_VECTOR_INT
38625 || GET_MODE_SIZE (vmode
) != GET_MODE_SIZE (d
->vmode
))
38628 if (GET_MODE_NUNITS (vmode
) >= d
->nelt
)
38631 chunk
= d
->nelt
/ GET_MODE_NUNITS (vmode
);
38632 for (i
= 0; i
< d
->nelt
; i
+= chunk
)
38633 if (d
->perm
[i
] & (chunk
- 1))
38636 for (j
= 1; j
< chunk
; ++j
)
38637 if (d
->perm
[i
] + j
!= d
->perm
[i
+ j
])
38643 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
38644 in terms of pshufb, vpperm, vpermq, vpermd, vpermps or vperm2i128. */
38647 expand_vec_perm_pshufb (struct expand_vec_perm_d
*d
)
38649 unsigned i
, nelt
, eltsz
, mask
;
38650 unsigned char perm
[32];
38651 enum machine_mode vmode
= V16QImode
;
38652 rtx rperm
[32], vperm
, target
, op0
, op1
;
38656 if (!d
->one_operand_p
)
38658 if (!TARGET_XOP
|| GET_MODE_SIZE (d
->vmode
) != 16)
38661 && valid_perm_using_mode_p (V2TImode
, d
))
38666 /* Use vperm2i128 insn. The pattern uses
38667 V4DImode instead of V2TImode. */
38668 target
= gen_lowpart (V4DImode
, d
->target
);
38669 op0
= gen_lowpart (V4DImode
, d
->op0
);
38670 op1
= gen_lowpart (V4DImode
, d
->op1
);
38672 = GEN_INT (((d
->perm
[0] & (nelt
/ 2)) ? 1 : 0)
38673 || ((d
->perm
[nelt
/ 2] & (nelt
/ 2)) ? 2 : 0));
38674 emit_insn (gen_avx2_permv2ti (target
, op0
, op1
, rperm
[0]));
38682 if (GET_MODE_SIZE (d
->vmode
) == 16)
38687 else if (GET_MODE_SIZE (d
->vmode
) == 32)
38692 /* V4DImode should be already handled through
38693 expand_vselect by vpermq instruction. */
38694 gcc_assert (d
->vmode
!= V4DImode
);
38697 if (d
->vmode
== V8SImode
38698 || d
->vmode
== V16HImode
38699 || d
->vmode
== V32QImode
)
38701 /* First see if vpermq can be used for
38702 V8SImode/V16HImode/V32QImode. */
38703 if (valid_perm_using_mode_p (V4DImode
, d
))
38705 for (i
= 0; i
< 4; i
++)
38706 perm
[i
] = (d
->perm
[i
* nelt
/ 4] * 4 / nelt
) & 3;
38709 return expand_vselect (gen_lowpart (V4DImode
, d
->target
),
38710 gen_lowpart (V4DImode
, d
->op0
),
38714 /* Next see if vpermd can be used. */
38715 if (valid_perm_using_mode_p (V8SImode
, d
))
38718 /* Or if vpermps can be used. */
38719 else if (d
->vmode
== V8SFmode
)
38722 if (vmode
== V32QImode
)
38724 /* vpshufb only works intra lanes, it is not
38725 possible to shuffle bytes in between the lanes. */
38726 for (i
= 0; i
< nelt
; ++i
)
38727 if ((d
->perm
[i
] ^ i
) & (nelt
/ 2))
38738 if (vmode
== V8SImode
)
38739 for (i
= 0; i
< 8; ++i
)
38740 rperm
[i
] = GEN_INT ((d
->perm
[i
* nelt
/ 8] * 8 / nelt
) & 7);
38743 eltsz
= GET_MODE_SIZE (GET_MODE_INNER (d
->vmode
));
38744 if (!d
->one_operand_p
)
38745 mask
= 2 * nelt
- 1;
38746 else if (vmode
== V16QImode
)
38749 mask
= nelt
/ 2 - 1;
38751 for (i
= 0; i
< nelt
; ++i
)
38753 unsigned j
, e
= d
->perm
[i
] & mask
;
38754 for (j
= 0; j
< eltsz
; ++j
)
38755 rperm
[i
* eltsz
+ j
] = GEN_INT (e
* eltsz
+ j
);
38759 vperm
= gen_rtx_CONST_VECTOR (vmode
,
38760 gen_rtvec_v (GET_MODE_NUNITS (vmode
), rperm
));
38761 vperm
= force_reg (vmode
, vperm
);
38763 target
= gen_lowpart (vmode
, d
->target
);
38764 op0
= gen_lowpart (vmode
, d
->op0
);
38765 if (d
->one_operand_p
)
38767 if (vmode
== V16QImode
)
38768 emit_insn (gen_ssse3_pshufbv16qi3 (target
, op0
, vperm
));
38769 else if (vmode
== V32QImode
)
38770 emit_insn (gen_avx2_pshufbv32qi3 (target
, op0
, vperm
));
38771 else if (vmode
== V8SFmode
)
38772 emit_insn (gen_avx2_permvarv8sf (target
, op0
, vperm
));
38774 emit_insn (gen_avx2_permvarv8si (target
, op0
, vperm
));
38778 op1
= gen_lowpart (vmode
, d
->op1
);
38779 emit_insn (gen_xop_pperm (target
, op0
, op1
, vperm
));
38785 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to instantiate D
38786 in a single instruction. */
38789 expand_vec_perm_1 (struct expand_vec_perm_d
*d
)
38791 unsigned i
, nelt
= d
->nelt
;
38792 unsigned char perm2
[MAX_VECT_LEN
];
38794 /* Check plain VEC_SELECT first, because AVX has instructions that could
38795 match both SEL and SEL+CONCAT, but the plain SEL will allow a memory
38796 input where SEL+CONCAT may not. */
38797 if (d
->one_operand_p
)
38799 int mask
= nelt
- 1;
38800 bool identity_perm
= true;
38801 bool broadcast_perm
= true;
38803 for (i
= 0; i
< nelt
; i
++)
38805 perm2
[i
] = d
->perm
[i
] & mask
;
38807 identity_perm
= false;
38809 broadcast_perm
= false;
38815 emit_move_insn (d
->target
, d
->op0
);
38818 else if (broadcast_perm
&& TARGET_AVX2
)
38820 /* Use vpbroadcast{b,w,d}. */
38821 rtx (*gen
) (rtx
, rtx
) = NULL
;
38825 gen
= gen_avx2_pbroadcastv32qi_1
;
38828 gen
= gen_avx2_pbroadcastv16hi_1
;
38831 gen
= gen_avx2_pbroadcastv8si_1
;
38834 gen
= gen_avx2_pbroadcastv16qi
;
38837 gen
= gen_avx2_pbroadcastv8hi
;
38840 gen
= gen_avx2_vec_dupv8sf_1
;
38842 /* For other modes prefer other shuffles this function creates. */
38848 emit_insn (gen (d
->target
, d
->op0
));
38853 if (expand_vselect (d
->target
, d
->op0
, perm2
, nelt
, d
->testing_p
))
38856 /* There are plenty of patterns in sse.md that are written for
38857 SEL+CONCAT and are not replicated for a single op. Perhaps
38858 that should be changed, to avoid the nastiness here. */
38860 /* Recognize interleave style patterns, which means incrementing
38861 every other permutation operand. */
38862 for (i
= 0; i
< nelt
; i
+= 2)
38864 perm2
[i
] = d
->perm
[i
] & mask
;
38865 perm2
[i
+ 1] = (d
->perm
[i
+ 1] & mask
) + nelt
;
38867 if (expand_vselect_vconcat (d
->target
, d
->op0
, d
->op0
, perm2
, nelt
,
38871 /* Recognize shufps, which means adding {0, 0, nelt, nelt}. */
38874 for (i
= 0; i
< nelt
; i
+= 4)
38876 perm2
[i
+ 0] = d
->perm
[i
+ 0] & mask
;
38877 perm2
[i
+ 1] = d
->perm
[i
+ 1] & mask
;
38878 perm2
[i
+ 2] = (d
->perm
[i
+ 2] & mask
) + nelt
;
38879 perm2
[i
+ 3] = (d
->perm
[i
+ 3] & mask
) + nelt
;
38882 if (expand_vselect_vconcat (d
->target
, d
->op0
, d
->op0
, perm2
, nelt
,
38888 /* Finally, try the fully general two operand permute. */
38889 if (expand_vselect_vconcat (d
->target
, d
->op0
, d
->op1
, d
->perm
, nelt
,
38893 /* Recognize interleave style patterns with reversed operands. */
38894 if (!d
->one_operand_p
)
38896 for (i
= 0; i
< nelt
; ++i
)
38898 unsigned e
= d
->perm
[i
];
38906 if (expand_vselect_vconcat (d
->target
, d
->op1
, d
->op0
, perm2
, nelt
,
38911 /* Try the SSE4.1 blend variable merge instructions. */
38912 if (expand_vec_perm_blend (d
))
38915 /* Try one of the AVX vpermil variable permutations. */
38916 if (expand_vec_perm_vpermil (d
))
38919 /* Try the SSSE3 pshufb or XOP vpperm or AVX2 vperm2i128,
38920 vpshufb, vpermd, vpermps or vpermq variable permutation. */
38921 if (expand_vec_perm_pshufb (d
))
38927 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
38928 in terms of a pair of pshuflw + pshufhw instructions. */
38931 expand_vec_perm_pshuflw_pshufhw (struct expand_vec_perm_d
*d
)
38933 unsigned char perm2
[MAX_VECT_LEN
];
38937 if (d
->vmode
!= V8HImode
|| !d
->one_operand_p
)
38940 /* The two permutations only operate in 64-bit lanes. */
38941 for (i
= 0; i
< 4; ++i
)
38942 if (d
->perm
[i
] >= 4)
38944 for (i
= 4; i
< 8; ++i
)
38945 if (d
->perm
[i
] < 4)
38951 /* Emit the pshuflw. */
38952 memcpy (perm2
, d
->perm
, 4);
38953 for (i
= 4; i
< 8; ++i
)
38955 ok
= expand_vselect (d
->target
, d
->op0
, perm2
, 8, d
->testing_p
);
38958 /* Emit the pshufhw. */
38959 memcpy (perm2
+ 4, d
->perm
+ 4, 4);
38960 for (i
= 0; i
< 4; ++i
)
38962 ok
= expand_vselect (d
->target
, d
->target
, perm2
, 8, d
->testing_p
);
38968 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to simplify
38969 the permutation using the SSSE3 palignr instruction. This succeeds
38970 when all of the elements in PERM fit within one vector and we merely
38971 need to shift them down so that a single vector permutation has a
38972 chance to succeed. */
38975 expand_vec_perm_palignr (struct expand_vec_perm_d
*d
)
38977 unsigned i
, nelt
= d
->nelt
;
38982 /* Even with AVX, palignr only operates on 128-bit vectors. */
38983 if (!TARGET_SSSE3
|| GET_MODE_SIZE (d
->vmode
) != 16)
38986 min
= nelt
, max
= 0;
38987 for (i
= 0; i
< nelt
; ++i
)
38989 unsigned e
= d
->perm
[i
];
38995 if (min
== 0 || max
- min
>= nelt
)
38998 /* Given that we have SSSE3, we know we'll be able to implement the
38999 single operand permutation after the palignr with pshufb. */
39003 shift
= GEN_INT (min
* GET_MODE_BITSIZE (GET_MODE_INNER (d
->vmode
)));
39004 emit_insn (gen_ssse3_palignrti (gen_lowpart (TImode
, d
->target
),
39005 gen_lowpart (TImode
, d
->op1
),
39006 gen_lowpart (TImode
, d
->op0
), shift
));
39008 d
->op0
= d
->op1
= d
->target
;
39009 d
->one_operand_p
= true;
39012 for (i
= 0; i
< nelt
; ++i
)
39014 unsigned e
= d
->perm
[i
] - min
;
39020 /* Test for the degenerate case where the alignment by itself
39021 produces the desired permutation. */
39025 ok
= expand_vec_perm_1 (d
);
39031 static bool expand_vec_perm_interleave3 (struct expand_vec_perm_d
*d
);
39033 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to simplify
39034 a two vector permutation into a single vector permutation by using
39035 an interleave operation to merge the vectors. */
39038 expand_vec_perm_interleave2 (struct expand_vec_perm_d
*d
)
39040 struct expand_vec_perm_d dremap
, dfinal
;
39041 unsigned i
, nelt
= d
->nelt
, nelt2
= nelt
/ 2;
39042 unsigned HOST_WIDE_INT contents
;
39043 unsigned char remap
[2 * MAX_VECT_LEN
];
39045 bool ok
, same_halves
= false;
39047 if (GET_MODE_SIZE (d
->vmode
) == 16)
39049 if (d
->one_operand_p
)
39052 else if (GET_MODE_SIZE (d
->vmode
) == 32)
39056 /* For 32-byte modes allow even d->one_operand_p.
39057 The lack of cross-lane shuffling in some instructions
39058 might prevent a single insn shuffle. */
39060 dfinal
.testing_p
= true;
39061 /* If expand_vec_perm_interleave3 can expand this into
39062 a 3 insn sequence, give up and let it be expanded as
39063 3 insn sequence. While that is one insn longer,
39064 it doesn't need a memory operand and in the common
39065 case that both interleave low and high permutations
39066 with the same operands are adjacent needs 4 insns
39067 for both after CSE. */
39068 if (expand_vec_perm_interleave3 (&dfinal
))
39074 /* Examine from whence the elements come. */
39076 for (i
= 0; i
< nelt
; ++i
)
39077 contents
|= ((unsigned HOST_WIDE_INT
) 1) << d
->perm
[i
];
39079 memset (remap
, 0xff, sizeof (remap
));
39082 if (GET_MODE_SIZE (d
->vmode
) == 16)
39084 unsigned HOST_WIDE_INT h1
, h2
, h3
, h4
;
39086 /* Split the two input vectors into 4 halves. */
39087 h1
= (((unsigned HOST_WIDE_INT
) 1) << nelt2
) - 1;
39092 /* If the elements from the low halves use interleave low, and similarly
39093 for interleave high. If the elements are from mis-matched halves, we
39094 can use shufps for V4SF/V4SI or do a DImode shuffle. */
39095 if ((contents
& (h1
| h3
)) == contents
)
39098 for (i
= 0; i
< nelt2
; ++i
)
39101 remap
[i
+ nelt
] = i
* 2 + 1;
39102 dremap
.perm
[i
* 2] = i
;
39103 dremap
.perm
[i
* 2 + 1] = i
+ nelt
;
39105 if (!TARGET_SSE2
&& d
->vmode
== V4SImode
)
39106 dremap
.vmode
= V4SFmode
;
39108 else if ((contents
& (h2
| h4
)) == contents
)
39111 for (i
= 0; i
< nelt2
; ++i
)
39113 remap
[i
+ nelt2
] = i
* 2;
39114 remap
[i
+ nelt
+ nelt2
] = i
* 2 + 1;
39115 dremap
.perm
[i
* 2] = i
+ nelt2
;
39116 dremap
.perm
[i
* 2 + 1] = i
+ nelt
+ nelt2
;
39118 if (!TARGET_SSE2
&& d
->vmode
== V4SImode
)
39119 dremap
.vmode
= V4SFmode
;
39121 else if ((contents
& (h1
| h4
)) == contents
)
39124 for (i
= 0; i
< nelt2
; ++i
)
39127 remap
[i
+ nelt
+ nelt2
] = i
+ nelt2
;
39128 dremap
.perm
[i
] = i
;
39129 dremap
.perm
[i
+ nelt2
] = i
+ nelt
+ nelt2
;
39134 dremap
.vmode
= V2DImode
;
39136 dremap
.perm
[0] = 0;
39137 dremap
.perm
[1] = 3;
39140 else if ((contents
& (h2
| h3
)) == contents
)
39143 for (i
= 0; i
< nelt2
; ++i
)
39145 remap
[i
+ nelt2
] = i
;
39146 remap
[i
+ nelt
] = i
+ nelt2
;
39147 dremap
.perm
[i
] = i
+ nelt2
;
39148 dremap
.perm
[i
+ nelt2
] = i
+ nelt
;
39153 dremap
.vmode
= V2DImode
;
39155 dremap
.perm
[0] = 1;
39156 dremap
.perm
[1] = 2;
39164 unsigned int nelt4
= nelt
/ 4, nzcnt
= 0;
39165 unsigned HOST_WIDE_INT q
[8];
39166 unsigned int nonzero_halves
[4];
39168 /* Split the two input vectors into 8 quarters. */
39169 q
[0] = (((unsigned HOST_WIDE_INT
) 1) << nelt4
) - 1;
39170 for (i
= 1; i
< 8; ++i
)
39171 q
[i
] = q
[0] << (nelt4
* i
);
39172 for (i
= 0; i
< 4; ++i
)
39173 if (((q
[2 * i
] | q
[2 * i
+ 1]) & contents
) != 0)
39175 nonzero_halves
[nzcnt
] = i
;
39181 gcc_assert (d
->one_operand_p
);
39182 nonzero_halves
[1] = nonzero_halves
[0];
39183 same_halves
= true;
39185 else if (d
->one_operand_p
)
39187 gcc_assert (nonzero_halves
[0] == 0);
39188 gcc_assert (nonzero_halves
[1] == 1);
39193 if (d
->perm
[0] / nelt2
== nonzero_halves
[1])
39195 /* Attempt to increase the likelihood that dfinal
39196 shuffle will be intra-lane. */
39197 char tmph
= nonzero_halves
[0];
39198 nonzero_halves
[0] = nonzero_halves
[1];
39199 nonzero_halves
[1] = tmph
;
39202 /* vperm2f128 or vperm2i128. */
39203 for (i
= 0; i
< nelt2
; ++i
)
39205 remap
[i
+ nonzero_halves
[1] * nelt2
] = i
+ nelt2
;
39206 remap
[i
+ nonzero_halves
[0] * nelt2
] = i
;
39207 dremap
.perm
[i
+ nelt2
] = i
+ nonzero_halves
[1] * nelt2
;
39208 dremap
.perm
[i
] = i
+ nonzero_halves
[0] * nelt2
;
39211 if (d
->vmode
!= V8SFmode
39212 && d
->vmode
!= V4DFmode
39213 && d
->vmode
!= V8SImode
)
39215 dremap
.vmode
= V8SImode
;
39217 for (i
= 0; i
< 4; ++i
)
39219 dremap
.perm
[i
] = i
+ nonzero_halves
[0] * 4;
39220 dremap
.perm
[i
+ 4] = i
+ nonzero_halves
[1] * 4;
39224 else if (d
->one_operand_p
)
39226 else if (TARGET_AVX2
39227 && (contents
& (q
[0] | q
[2] | q
[4] | q
[6])) == contents
)
39230 for (i
= 0; i
< nelt4
; ++i
)
39233 remap
[i
+ nelt
] = i
* 2 + 1;
39234 remap
[i
+ nelt2
] = i
* 2 + nelt2
;
39235 remap
[i
+ nelt
+ nelt2
] = i
* 2 + nelt2
+ 1;
39236 dremap
.perm
[i
* 2] = i
;
39237 dremap
.perm
[i
* 2 + 1] = i
+ nelt
;
39238 dremap
.perm
[i
* 2 + nelt2
] = i
+ nelt2
;
39239 dremap
.perm
[i
* 2 + nelt2
+ 1] = i
+ nelt
+ nelt2
;
39242 else if (TARGET_AVX2
39243 && (contents
& (q
[1] | q
[3] | q
[5] | q
[7])) == contents
)
39246 for (i
= 0; i
< nelt4
; ++i
)
39248 remap
[i
+ nelt4
] = i
* 2;
39249 remap
[i
+ nelt
+ nelt4
] = i
* 2 + 1;
39250 remap
[i
+ nelt2
+ nelt4
] = i
* 2 + nelt2
;
39251 remap
[i
+ nelt
+ nelt2
+ nelt4
] = i
* 2 + nelt2
+ 1;
39252 dremap
.perm
[i
* 2] = i
+ nelt4
;
39253 dremap
.perm
[i
* 2 + 1] = i
+ nelt
+ nelt4
;
39254 dremap
.perm
[i
* 2 + nelt2
] = i
+ nelt2
+ nelt4
;
39255 dremap
.perm
[i
* 2 + nelt2
+ 1] = i
+ nelt
+ nelt2
+ nelt4
;
39262 /* Use the remapping array set up above to move the elements from their
39263 swizzled locations into their final destinations. */
39265 for (i
= 0; i
< nelt
; ++i
)
39267 unsigned e
= remap
[d
->perm
[i
]];
39268 gcc_assert (e
< nelt
);
39269 /* If same_halves is true, both halves of the remapped vector are the
39270 same. Avoid cross-lane accesses if possible. */
39271 if (same_halves
&& i
>= nelt2
)
39273 gcc_assert (e
< nelt2
);
39274 dfinal
.perm
[i
] = e
+ nelt2
;
39277 dfinal
.perm
[i
] = e
;
39279 dfinal
.op0
= gen_reg_rtx (dfinal
.vmode
);
39280 dfinal
.op1
= dfinal
.op0
;
39281 dfinal
.one_operand_p
= true;
39282 dremap
.target
= dfinal
.op0
;
39284 /* Test if the final remap can be done with a single insn. For V4SFmode or
39285 V4SImode this *will* succeed. For V8HImode or V16QImode it may not. */
39287 ok
= expand_vec_perm_1 (&dfinal
);
39288 seq
= get_insns ();
39297 if (dremap
.vmode
!= dfinal
.vmode
)
39299 dremap
.target
= gen_lowpart (dremap
.vmode
, dremap
.target
);
39300 dremap
.op0
= gen_lowpart (dremap
.vmode
, dremap
.op0
);
39301 dremap
.op1
= gen_lowpart (dremap
.vmode
, dremap
.op1
);
39304 ok
= expand_vec_perm_1 (&dremap
);
39311 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to simplify
39312 a single vector cross-lane permutation into vpermq followed
39313 by any of the single insn permutations. */
39316 expand_vec_perm_vpermq_perm_1 (struct expand_vec_perm_d
*d
)
39318 struct expand_vec_perm_d dremap
, dfinal
;
39319 unsigned i
, j
, nelt
= d
->nelt
, nelt2
= nelt
/ 2, nelt4
= nelt
/ 4;
39320 unsigned contents
[2];
39324 && (d
->vmode
== V32QImode
|| d
->vmode
== V16HImode
)
39325 && d
->one_operand_p
))
39330 for (i
= 0; i
< nelt2
; ++i
)
39332 contents
[0] |= 1u << (d
->perm
[i
] / nelt4
);
39333 contents
[1] |= 1u << (d
->perm
[i
+ nelt2
] / nelt4
);
39336 for (i
= 0; i
< 2; ++i
)
39338 unsigned int cnt
= 0;
39339 for (j
= 0; j
< 4; ++j
)
39340 if ((contents
[i
] & (1u << j
)) != 0 && ++cnt
> 2)
39348 dremap
.vmode
= V4DImode
;
39350 dremap
.target
= gen_reg_rtx (V4DImode
);
39351 dremap
.op0
= gen_lowpart (V4DImode
, d
->op0
);
39352 dremap
.op1
= dremap
.op0
;
39353 dremap
.one_operand_p
= true;
39354 for (i
= 0; i
< 2; ++i
)
39356 unsigned int cnt
= 0;
39357 for (j
= 0; j
< 4; ++j
)
39358 if ((contents
[i
] & (1u << j
)) != 0)
39359 dremap
.perm
[2 * i
+ cnt
++] = j
;
39360 for (; cnt
< 2; ++cnt
)
39361 dremap
.perm
[2 * i
+ cnt
] = 0;
39365 dfinal
.op0
= gen_lowpart (dfinal
.vmode
, dremap
.target
);
39366 dfinal
.op1
= dfinal
.op0
;
39367 dfinal
.one_operand_p
= true;
39368 for (i
= 0, j
= 0; i
< nelt
; ++i
)
39372 dfinal
.perm
[i
] = (d
->perm
[i
] & (nelt4
- 1)) | (j
? nelt2
: 0);
39373 if ((d
->perm
[i
] / nelt4
) == dremap
.perm
[j
])
39375 else if ((d
->perm
[i
] / nelt4
) == dremap
.perm
[j
+ 1])
39376 dfinal
.perm
[i
] |= nelt4
;
39378 gcc_unreachable ();
39381 ok
= expand_vec_perm_1 (&dremap
);
39384 ok
= expand_vec_perm_1 (&dfinal
);
39390 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to expand
39391 a vector permutation using two instructions, vperm2f128 resp.
39392 vperm2i128 followed by any single in-lane permutation. */
39395 expand_vec_perm_vperm2f128 (struct expand_vec_perm_d
*d
)
39397 struct expand_vec_perm_d dfirst
, dsecond
;
39398 unsigned i
, j
, nelt
= d
->nelt
, nelt2
= nelt
/ 2, perm
;
39402 || GET_MODE_SIZE (d
->vmode
) != 32
39403 || (d
->vmode
!= V8SFmode
&& d
->vmode
!= V4DFmode
&& !TARGET_AVX2
))
39407 dsecond
.one_operand_p
= false;
39408 dsecond
.testing_p
= true;
39410 /* ((perm << 2)|perm) & 0x33 is the vperm2[fi]128
39411 immediate. For perm < 16 the second permutation uses
39412 d->op0 as first operand, for perm >= 16 it uses d->op1
39413 as first operand. The second operand is the result of
39415 for (perm
= 0; perm
< 32; perm
++)
39417 /* Ignore permutations which do not move anything cross-lane. */
39420 /* The second shuffle for e.g. V4DFmode has
39421 0123 and ABCD operands.
39422 Ignore AB23, as 23 is already in the second lane
39423 of the first operand. */
39424 if ((perm
& 0xc) == (1 << 2)) continue;
39425 /* And 01CD, as 01 is in the first lane of the first
39427 if ((perm
& 3) == 0) continue;
39428 /* And 4567, as then the vperm2[fi]128 doesn't change
39429 anything on the original 4567 second operand. */
39430 if ((perm
& 0xf) == ((3 << 2) | 2)) continue;
39434 /* The second shuffle for e.g. V4DFmode has
39435 4567 and ABCD operands.
39436 Ignore AB67, as 67 is already in the second lane
39437 of the first operand. */
39438 if ((perm
& 0xc) == (3 << 2)) continue;
39439 /* And 45CD, as 45 is in the first lane of the first
39441 if ((perm
& 3) == 2) continue;
39442 /* And 0123, as then the vperm2[fi]128 doesn't change
39443 anything on the original 0123 first operand. */
39444 if ((perm
& 0xf) == (1 << 2)) continue;
39447 for (i
= 0; i
< nelt
; i
++)
39449 j
= d
->perm
[i
] / nelt2
;
39450 if (j
== ((perm
>> (2 * (i
>= nelt2
))) & 3))
39451 dsecond
.perm
[i
] = nelt
+ (i
& nelt2
) + (d
->perm
[i
] & (nelt2
- 1));
39452 else if (j
== (unsigned) (i
>= nelt2
) + 2 * (perm
>= 16))
39453 dsecond
.perm
[i
] = d
->perm
[i
] & (nelt
- 1);
39461 ok
= expand_vec_perm_1 (&dsecond
);
39472 /* Found a usable second shuffle. dfirst will be
39473 vperm2f128 on d->op0 and d->op1. */
39474 dsecond
.testing_p
= false;
39476 dfirst
.target
= gen_reg_rtx (d
->vmode
);
39477 for (i
= 0; i
< nelt
; i
++)
39478 dfirst
.perm
[i
] = (i
& (nelt2
- 1))
39479 + ((perm
>> (2 * (i
>= nelt2
))) & 3) * nelt2
;
39481 ok
= expand_vec_perm_1 (&dfirst
);
39484 /* And dsecond is some single insn shuffle, taking
39485 d->op0 and result of vperm2f128 (if perm < 16) or
39486 d->op1 and result of vperm2f128 (otherwise). */
39487 dsecond
.op1
= dfirst
.target
;
39489 dsecond
.op0
= dfirst
.op1
;
39491 ok
= expand_vec_perm_1 (&dsecond
);
39497 /* For one operand, the only useful vperm2f128 permutation is 0x10. */
39498 if (d
->one_operand_p
)
39505 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to simplify
39506 a two vector permutation using 2 intra-lane interleave insns
39507 and cross-lane shuffle for 32-byte vectors. */
39510 expand_vec_perm_interleave3 (struct expand_vec_perm_d
*d
)
39513 rtx (*gen
) (rtx
, rtx
, rtx
);
39515 if (d
->one_operand_p
)
39517 if (TARGET_AVX2
&& GET_MODE_SIZE (d
->vmode
) == 32)
39519 else if (TARGET_AVX
&& (d
->vmode
== V8SFmode
|| d
->vmode
== V4DFmode
))
39525 if (d
->perm
[0] != 0 && d
->perm
[0] != nelt
/ 2)
39527 for (i
= 0; i
< nelt
; i
+= 2)
39528 if (d
->perm
[i
] != d
->perm
[0] + i
/ 2
39529 || d
->perm
[i
+ 1] != d
->perm
[0] + i
/ 2 + nelt
)
39539 gen
= gen_vec_interleave_highv32qi
;
39541 gen
= gen_vec_interleave_lowv32qi
;
39545 gen
= gen_vec_interleave_highv16hi
;
39547 gen
= gen_vec_interleave_lowv16hi
;
39551 gen
= gen_vec_interleave_highv8si
;
39553 gen
= gen_vec_interleave_lowv8si
;
39557 gen
= gen_vec_interleave_highv4di
;
39559 gen
= gen_vec_interleave_lowv4di
;
39563 gen
= gen_vec_interleave_highv8sf
;
39565 gen
= gen_vec_interleave_lowv8sf
;
39569 gen
= gen_vec_interleave_highv4df
;
39571 gen
= gen_vec_interleave_lowv4df
;
39574 gcc_unreachable ();
39577 emit_insn (gen (d
->target
, d
->op0
, d
->op1
));
39581 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement
39582 a single vector permutation using a single intra-lane vector
39583 permutation, vperm2f128 swapping the lanes and vblend* insn blending
39584 the non-swapped and swapped vectors together. */
39587 expand_vec_perm_vperm2f128_vblend (struct expand_vec_perm_d
*d
)
39589 struct expand_vec_perm_d dfirst
, dsecond
;
39590 unsigned i
, j
, msk
, nelt
= d
->nelt
, nelt2
= nelt
/ 2;
39593 rtx (*blend
) (rtx
, rtx
, rtx
, rtx
) = NULL
;
39597 || (d
->vmode
!= V8SFmode
&& d
->vmode
!= V4DFmode
)
39598 || !d
->one_operand_p
)
39602 for (i
= 0; i
< nelt
; i
++)
39603 dfirst
.perm
[i
] = 0xff;
39604 for (i
= 0, msk
= 0; i
< nelt
; i
++)
39606 j
= (d
->perm
[i
] & nelt2
) ? i
| nelt2
: i
& ~nelt2
;
39607 if (dfirst
.perm
[j
] != 0xff && dfirst
.perm
[j
] != d
->perm
[i
])
39609 dfirst
.perm
[j
] = d
->perm
[i
];
39613 for (i
= 0; i
< nelt
; i
++)
39614 if (dfirst
.perm
[i
] == 0xff)
39615 dfirst
.perm
[i
] = i
;
39618 dfirst
.target
= gen_reg_rtx (dfirst
.vmode
);
39621 ok
= expand_vec_perm_1 (&dfirst
);
39622 seq
= get_insns ();
39634 dsecond
.op0
= dfirst
.target
;
39635 dsecond
.op1
= dfirst
.target
;
39636 dsecond
.one_operand_p
= true;
39637 dsecond
.target
= gen_reg_rtx (dsecond
.vmode
);
39638 for (i
= 0; i
< nelt
; i
++)
39639 dsecond
.perm
[i
] = i
^ nelt2
;
39641 ok
= expand_vec_perm_1 (&dsecond
);
39644 blend
= d
->vmode
== V8SFmode
? gen_avx_blendps256
: gen_avx_blendpd256
;
39645 emit_insn (blend (d
->target
, dfirst
.target
, dsecond
.target
, GEN_INT (msk
)));
39649 /* A subroutine of ix86_expand_vec_perm_builtin_1. Implement a V4DF
39650 permutation using two vperm2f128, followed by a vshufpd insn blending
39651 the two vectors together. */
39654 expand_vec_perm_2vperm2f128_vshuf (struct expand_vec_perm_d
*d
)
39656 struct expand_vec_perm_d dfirst
, dsecond
, dthird
;
39659 if (!TARGET_AVX
|| (d
->vmode
!= V4DFmode
))
39669 dfirst
.perm
[0] = (d
->perm
[0] & ~1);
39670 dfirst
.perm
[1] = (d
->perm
[0] & ~1) + 1;
39671 dfirst
.perm
[2] = (d
->perm
[2] & ~1);
39672 dfirst
.perm
[3] = (d
->perm
[2] & ~1) + 1;
39673 dsecond
.perm
[0] = (d
->perm
[1] & ~1);
39674 dsecond
.perm
[1] = (d
->perm
[1] & ~1) + 1;
39675 dsecond
.perm
[2] = (d
->perm
[3] & ~1);
39676 dsecond
.perm
[3] = (d
->perm
[3] & ~1) + 1;
39677 dthird
.perm
[0] = (d
->perm
[0] % 2);
39678 dthird
.perm
[1] = (d
->perm
[1] % 2) + 4;
39679 dthird
.perm
[2] = (d
->perm
[2] % 2) + 2;
39680 dthird
.perm
[3] = (d
->perm
[3] % 2) + 6;
39682 dfirst
.target
= gen_reg_rtx (dfirst
.vmode
);
39683 dsecond
.target
= gen_reg_rtx (dsecond
.vmode
);
39684 dthird
.op0
= dfirst
.target
;
39685 dthird
.op1
= dsecond
.target
;
39686 dthird
.one_operand_p
= false;
39688 canonicalize_perm (&dfirst
);
39689 canonicalize_perm (&dsecond
);
39691 ok
= expand_vec_perm_1 (&dfirst
)
39692 && expand_vec_perm_1 (&dsecond
)
39693 && expand_vec_perm_1 (&dthird
);
39700 /* A subroutine of expand_vec_perm_even_odd_1. Implement the double-word
39701 permutation with two pshufb insns and an ior. We should have already
39702 failed all two instruction sequences. */
39705 expand_vec_perm_pshufb2 (struct expand_vec_perm_d
*d
)
39707 rtx rperm
[2][16], vperm
, l
, h
, op
, m128
;
39708 unsigned int i
, nelt
, eltsz
;
39710 if (!TARGET_SSSE3
|| GET_MODE_SIZE (d
->vmode
) != 16)
39712 gcc_assert (!d
->one_operand_p
);
39715 eltsz
= GET_MODE_SIZE (GET_MODE_INNER (d
->vmode
));
39717 /* Generate two permutation masks. If the required element is within
39718 the given vector it is shuffled into the proper lane. If the required
39719 element is in the other vector, force a zero into the lane by setting
39720 bit 7 in the permutation mask. */
39721 m128
= GEN_INT (-128);
39722 for (i
= 0; i
< nelt
; ++i
)
39724 unsigned j
, e
= d
->perm
[i
];
39725 unsigned which
= (e
>= nelt
);
39729 for (j
= 0; j
< eltsz
; ++j
)
39731 rperm
[which
][i
*eltsz
+ j
] = GEN_INT (e
*eltsz
+ j
);
39732 rperm
[1-which
][i
*eltsz
+ j
] = m128
;
39736 vperm
= gen_rtx_CONST_VECTOR (V16QImode
, gen_rtvec_v (16, rperm
[0]));
39737 vperm
= force_reg (V16QImode
, vperm
);
39739 l
= gen_reg_rtx (V16QImode
);
39740 op
= gen_lowpart (V16QImode
, d
->op0
);
39741 emit_insn (gen_ssse3_pshufbv16qi3 (l
, op
, vperm
));
39743 vperm
= gen_rtx_CONST_VECTOR (V16QImode
, gen_rtvec_v (16, rperm
[1]));
39744 vperm
= force_reg (V16QImode
, vperm
);
39746 h
= gen_reg_rtx (V16QImode
);
39747 op
= gen_lowpart (V16QImode
, d
->op1
);
39748 emit_insn (gen_ssse3_pshufbv16qi3 (h
, op
, vperm
));
39750 op
= gen_lowpart (V16QImode
, d
->target
);
39751 emit_insn (gen_iorv16qi3 (op
, l
, h
));
39756 /* Implement arbitrary permutation of one V32QImode and V16QImode operand
39757 with two vpshufb insns, vpermq and vpor. We should have already failed
39758 all two or three instruction sequences. */
39761 expand_vec_perm_vpshufb2_vpermq (struct expand_vec_perm_d
*d
)
39763 rtx rperm
[2][32], vperm
, l
, h
, hp
, op
, m128
;
39764 unsigned int i
, nelt
, eltsz
;
39767 || !d
->one_operand_p
39768 || (d
->vmode
!= V32QImode
&& d
->vmode
!= V16HImode
))
39775 eltsz
= GET_MODE_SIZE (GET_MODE_INNER (d
->vmode
));
39777 /* Generate two permutation masks. If the required element is within
39778 the same lane, it is shuffled in. If the required element from the
39779 other lane, force a zero by setting bit 7 in the permutation mask.
39780 In the other mask the mask has non-negative elements if element
39781 is requested from the other lane, but also moved to the other lane,
39782 so that the result of vpshufb can have the two V2TImode halves
39784 m128
= GEN_INT (-128);
39785 for (i
= 0; i
< nelt
; ++i
)
39787 unsigned j
, e
= d
->perm
[i
] & (nelt
/ 2 - 1);
39788 unsigned which
= ((d
->perm
[i
] ^ i
) & (nelt
/ 2)) * eltsz
;
39790 for (j
= 0; j
< eltsz
; ++j
)
39792 rperm
[!!which
][(i
* eltsz
+ j
) ^ which
] = GEN_INT (e
* eltsz
+ j
);
39793 rperm
[!which
][(i
* eltsz
+ j
) ^ (which
^ 16)] = m128
;
39797 vperm
= gen_rtx_CONST_VECTOR (V32QImode
, gen_rtvec_v (32, rperm
[1]));
39798 vperm
= force_reg (V32QImode
, vperm
);
39800 h
= gen_reg_rtx (V32QImode
);
39801 op
= gen_lowpart (V32QImode
, d
->op0
);
39802 emit_insn (gen_avx2_pshufbv32qi3 (h
, op
, vperm
));
39804 /* Swap the 128-byte lanes of h into hp. */
39805 hp
= gen_reg_rtx (V4DImode
);
39806 op
= gen_lowpart (V4DImode
, h
);
39807 emit_insn (gen_avx2_permv4di_1 (hp
, op
, const2_rtx
, GEN_INT (3), const0_rtx
,
39810 vperm
= gen_rtx_CONST_VECTOR (V32QImode
, gen_rtvec_v (32, rperm
[0]));
39811 vperm
= force_reg (V32QImode
, vperm
);
39813 l
= gen_reg_rtx (V32QImode
);
39814 op
= gen_lowpart (V32QImode
, d
->op0
);
39815 emit_insn (gen_avx2_pshufbv32qi3 (l
, op
, vperm
));
39817 op
= gen_lowpart (V32QImode
, d
->target
);
39818 emit_insn (gen_iorv32qi3 (op
, l
, gen_lowpart (V32QImode
, hp
)));
39823 /* A subroutine of expand_vec_perm_even_odd_1. Implement extract-even
39824 and extract-odd permutations of two V32QImode and V16QImode operand
39825 with two vpshufb insns, vpor and vpermq. We should have already
39826 failed all two or three instruction sequences. */
39829 expand_vec_perm_vpshufb2_vpermq_even_odd (struct expand_vec_perm_d
*d
)
39831 rtx rperm
[2][32], vperm
, l
, h
, ior
, op
, m128
;
39832 unsigned int i
, nelt
, eltsz
;
39835 || d
->one_operand_p
39836 || (d
->vmode
!= V32QImode
&& d
->vmode
!= V16HImode
))
39839 for (i
= 0; i
< d
->nelt
; ++i
)
39840 if ((d
->perm
[i
] ^ (i
* 2)) & (3 * d
->nelt
/ 2))
39847 eltsz
= GET_MODE_SIZE (GET_MODE_INNER (d
->vmode
));
39849 /* Generate two permutation masks. In the first permutation mask
39850 the first quarter will contain indexes for the first half
39851 of the op0, the second quarter will contain bit 7 set, third quarter
39852 will contain indexes for the second half of the op0 and the
39853 last quarter bit 7 set. In the second permutation mask
39854 the first quarter will contain bit 7 set, the second quarter
39855 indexes for the first half of the op1, the third quarter bit 7 set
39856 and last quarter indexes for the second half of the op1.
39857 I.e. the first mask e.g. for V32QImode extract even will be:
39858 0, 2, ..., 0xe, -128, ..., -128, 0, 2, ..., 0xe, -128, ..., -128
39859 (all values masked with 0xf except for -128) and second mask
39860 for extract even will be
39861 -128, ..., -128, 0, 2, ..., 0xe, -128, ..., -128, 0, 2, ..., 0xe. */
39862 m128
= GEN_INT (-128);
39863 for (i
= 0; i
< nelt
; ++i
)
39865 unsigned j
, e
= d
->perm
[i
] & (nelt
/ 2 - 1);
39866 unsigned which
= d
->perm
[i
] >= nelt
;
39867 unsigned xorv
= (i
>= nelt
/ 4 && i
< 3 * nelt
/ 4) ? 24 : 0;
39869 for (j
= 0; j
< eltsz
; ++j
)
39871 rperm
[which
][(i
* eltsz
+ j
) ^ xorv
] = GEN_INT (e
* eltsz
+ j
);
39872 rperm
[1 - which
][(i
* eltsz
+ j
) ^ xorv
] = m128
;
39876 vperm
= gen_rtx_CONST_VECTOR (V32QImode
, gen_rtvec_v (32, rperm
[0]));
39877 vperm
= force_reg (V32QImode
, vperm
);
39879 l
= gen_reg_rtx (V32QImode
);
39880 op
= gen_lowpart (V32QImode
, d
->op0
);
39881 emit_insn (gen_avx2_pshufbv32qi3 (l
, op
, vperm
));
39883 vperm
= gen_rtx_CONST_VECTOR (V32QImode
, gen_rtvec_v (32, rperm
[1]));
39884 vperm
= force_reg (V32QImode
, vperm
);
39886 h
= gen_reg_rtx (V32QImode
);
39887 op
= gen_lowpart (V32QImode
, d
->op1
);
39888 emit_insn (gen_avx2_pshufbv32qi3 (h
, op
, vperm
));
39890 ior
= gen_reg_rtx (V32QImode
);
39891 emit_insn (gen_iorv32qi3 (ior
, l
, h
));
39893 /* Permute the V4DImode quarters using { 0, 2, 1, 3 } permutation. */
39894 op
= gen_lowpart (V4DImode
, d
->target
);
39895 ior
= gen_lowpart (V4DImode
, ior
);
39896 emit_insn (gen_avx2_permv4di_1 (op
, ior
, const0_rtx
, const2_rtx
,
39897 const1_rtx
, GEN_INT (3)));
39902 /* A subroutine of ix86_expand_vec_perm_builtin_1. Implement extract-even
39903 and extract-odd permutations. */
39906 expand_vec_perm_even_odd_1 (struct expand_vec_perm_d
*d
, unsigned odd
)
39913 t1
= gen_reg_rtx (V4DFmode
);
39914 t2
= gen_reg_rtx (V4DFmode
);
39916 /* Shuffle the lanes around into { 0 1 4 5 } and { 2 3 6 7 }. */
39917 emit_insn (gen_avx_vperm2f128v4df3 (t1
, d
->op0
, d
->op1
, GEN_INT (0x20)));
39918 emit_insn (gen_avx_vperm2f128v4df3 (t2
, d
->op0
, d
->op1
, GEN_INT (0x31)));
39920 /* Now an unpck[lh]pd will produce the result required. */
39922 t3
= gen_avx_unpckhpd256 (d
->target
, t1
, t2
);
39924 t3
= gen_avx_unpcklpd256 (d
->target
, t1
, t2
);
39930 int mask
= odd
? 0xdd : 0x88;
39932 t1
= gen_reg_rtx (V8SFmode
);
39933 t2
= gen_reg_rtx (V8SFmode
);
39934 t3
= gen_reg_rtx (V8SFmode
);
39936 /* Shuffle within the 128-bit lanes to produce:
39937 { 0 2 8 a 4 6 c e } | { 1 3 9 b 5 7 d f }. */
39938 emit_insn (gen_avx_shufps256 (t1
, d
->op0
, d
->op1
,
39941 /* Shuffle the lanes around to produce:
39942 { 4 6 c e 0 2 8 a } and { 5 7 d f 1 3 9 b }. */
39943 emit_insn (gen_avx_vperm2f128v8sf3 (t2
, t1
, t1
,
39946 /* Shuffle within the 128-bit lanes to produce:
39947 { 0 2 4 6 4 6 0 2 } | { 1 3 5 7 5 7 1 3 }. */
39948 emit_insn (gen_avx_shufps256 (t3
, t1
, t2
, GEN_INT (0x44)));
39950 /* Shuffle within the 128-bit lanes to produce:
39951 { 8 a c e c e 8 a } | { 9 b d f d f 9 b }. */
39952 emit_insn (gen_avx_shufps256 (t2
, t1
, t2
, GEN_INT (0xee)));
39954 /* Shuffle the lanes around to produce:
39955 { 0 2 4 6 8 a c e } | { 1 3 5 7 9 b d f }. */
39956 emit_insn (gen_avx_vperm2f128v8sf3 (d
->target
, t3
, t2
,
39965 /* These are always directly implementable by expand_vec_perm_1. */
39966 gcc_unreachable ();
39970 return expand_vec_perm_pshufb2 (d
);
39973 /* We need 2*log2(N)-1 operations to achieve odd/even
39974 with interleave. */
39975 t1
= gen_reg_rtx (V8HImode
);
39976 t2
= gen_reg_rtx (V8HImode
);
39977 emit_insn (gen_vec_interleave_highv8hi (t1
, d
->op0
, d
->op1
));
39978 emit_insn (gen_vec_interleave_lowv8hi (d
->target
, d
->op0
, d
->op1
));
39979 emit_insn (gen_vec_interleave_highv8hi (t2
, d
->target
, t1
));
39980 emit_insn (gen_vec_interleave_lowv8hi (d
->target
, d
->target
, t1
));
39982 t3
= gen_vec_interleave_highv8hi (d
->target
, d
->target
, t2
);
39984 t3
= gen_vec_interleave_lowv8hi (d
->target
, d
->target
, t2
);
39991 return expand_vec_perm_pshufb2 (d
);
39994 t1
= gen_reg_rtx (V16QImode
);
39995 t2
= gen_reg_rtx (V16QImode
);
39996 t3
= gen_reg_rtx (V16QImode
);
39997 emit_insn (gen_vec_interleave_highv16qi (t1
, d
->op0
, d
->op1
));
39998 emit_insn (gen_vec_interleave_lowv16qi (d
->target
, d
->op0
, d
->op1
));
39999 emit_insn (gen_vec_interleave_highv16qi (t2
, d
->target
, t1
));
40000 emit_insn (gen_vec_interleave_lowv16qi (d
->target
, d
->target
, t1
));
40001 emit_insn (gen_vec_interleave_highv16qi (t3
, d
->target
, t2
));
40002 emit_insn (gen_vec_interleave_lowv16qi (d
->target
, d
->target
, t2
));
40004 t3
= gen_vec_interleave_highv16qi (d
->target
, d
->target
, t3
);
40006 t3
= gen_vec_interleave_lowv16qi (d
->target
, d
->target
, t3
);
40013 return expand_vec_perm_vpshufb2_vpermq_even_odd (d
);
40018 struct expand_vec_perm_d d_copy
= *d
;
40019 d_copy
.vmode
= V4DFmode
;
40020 d_copy
.target
= gen_lowpart (V4DFmode
, d
->target
);
40021 d_copy
.op0
= gen_lowpart (V4DFmode
, d
->op0
);
40022 d_copy
.op1
= gen_lowpart (V4DFmode
, d
->op1
);
40023 return expand_vec_perm_even_odd_1 (&d_copy
, odd
);
40026 t1
= gen_reg_rtx (V4DImode
);
40027 t2
= gen_reg_rtx (V4DImode
);
40029 /* Shuffle the lanes around into { 0 1 4 5 } and { 2 3 6 7 }. */
40030 emit_insn (gen_avx2_permv2ti (t1
, d
->op0
, d
->op1
, GEN_INT (0x20)));
40031 emit_insn (gen_avx2_permv2ti (t2
, d
->op0
, d
->op1
, GEN_INT (0x31)));
40033 /* Now an vpunpck[lh]qdq will produce the result required. */
40035 t3
= gen_avx2_interleave_highv4di (d
->target
, t1
, t2
);
40037 t3
= gen_avx2_interleave_lowv4di (d
->target
, t1
, t2
);
40044 struct expand_vec_perm_d d_copy
= *d
;
40045 d_copy
.vmode
= V8SFmode
;
40046 d_copy
.target
= gen_lowpart (V8SFmode
, d
->target
);
40047 d_copy
.op0
= gen_lowpart (V8SFmode
, d
->op0
);
40048 d_copy
.op1
= gen_lowpart (V8SFmode
, d
->op1
);
40049 return expand_vec_perm_even_odd_1 (&d_copy
, odd
);
40052 t1
= gen_reg_rtx (V8SImode
);
40053 t2
= gen_reg_rtx (V8SImode
);
40055 /* Shuffle the lanes around into
40056 { 0 1 2 3 8 9 a b } and { 4 5 6 7 c d e f }. */
40057 emit_insn (gen_avx2_permv2ti (gen_lowpart (V4DImode
, t1
),
40058 gen_lowpart (V4DImode
, d
->op0
),
40059 gen_lowpart (V4DImode
, d
->op1
),
40061 emit_insn (gen_avx2_permv2ti (gen_lowpart (V4DImode
, t2
),
40062 gen_lowpart (V4DImode
, d
->op0
),
40063 gen_lowpart (V4DImode
, d
->op1
),
40066 /* Swap the 2nd and 3rd position in each lane into
40067 { 0 2 1 3 8 a 9 b } and { 4 6 5 7 c e d f }. */
40068 emit_insn (gen_avx2_pshufdv3 (t1
, t1
,
40069 GEN_INT (2 * 4 + 1 * 16 + 3 * 64)));
40070 emit_insn (gen_avx2_pshufdv3 (t2
, t2
,
40071 GEN_INT (2 * 4 + 1 * 16 + 3 * 64)));
40073 /* Now an vpunpck[lh]qdq will produce
40074 { 0 2 4 6 8 a c e } resp. { 1 3 5 7 9 b d f }. */
40076 t3
= gen_avx2_interleave_highv4di (gen_lowpart (V4DImode
, d
->target
),
40077 gen_lowpart (V4DImode
, t1
),
40078 gen_lowpart (V4DImode
, t2
));
40080 t3
= gen_avx2_interleave_lowv4di (gen_lowpart (V4DImode
, d
->target
),
40081 gen_lowpart (V4DImode
, t1
),
40082 gen_lowpart (V4DImode
, t2
));
40087 gcc_unreachable ();
40093 /* A subroutine of ix86_expand_vec_perm_builtin_1. Pattern match
40094 extract-even and extract-odd permutations. */
40097 expand_vec_perm_even_odd (struct expand_vec_perm_d
*d
)
40099 unsigned i
, odd
, nelt
= d
->nelt
;
40102 if (odd
!= 0 && odd
!= 1)
40105 for (i
= 1; i
< nelt
; ++i
)
40106 if (d
->perm
[i
] != 2 * i
+ odd
)
40109 return expand_vec_perm_even_odd_1 (d
, odd
);
40112 /* A subroutine of ix86_expand_vec_perm_builtin_1. Implement broadcast
40113 permutations. We assume that expand_vec_perm_1 has already failed. */
40116 expand_vec_perm_broadcast_1 (struct expand_vec_perm_d
*d
)
40118 unsigned elt
= d
->perm
[0], nelt2
= d
->nelt
/ 2;
40119 enum machine_mode vmode
= d
->vmode
;
40120 unsigned char perm2
[4];
40128 /* These are special-cased in sse.md so that we can optionally
40129 use the vbroadcast instruction. They expand to two insns
40130 if the input happens to be in a register. */
40131 gcc_unreachable ();
40137 /* These are always implementable using standard shuffle patterns. */
40138 gcc_unreachable ();
40142 /* These can be implemented via interleave. We save one insn by
40143 stopping once we have promoted to V4SImode and then use pshufd. */
40147 rtx (*gen
) (rtx
, rtx
, rtx
)
40148 = vmode
== V16QImode
? gen_vec_interleave_lowv16qi
40149 : gen_vec_interleave_lowv8hi
;
40153 gen
= vmode
== V16QImode
? gen_vec_interleave_highv16qi
40154 : gen_vec_interleave_highv8hi
;
40159 dest
= gen_reg_rtx (vmode
);
40160 emit_insn (gen (dest
, op0
, op0
));
40161 vmode
= get_mode_wider_vector (vmode
);
40162 op0
= gen_lowpart (vmode
, dest
);
40164 while (vmode
!= V4SImode
);
40166 memset (perm2
, elt
, 4);
40167 ok
= expand_vselect (gen_lowpart (V4SImode
, d
->target
), op0
, perm2
, 4,
40176 /* For AVX2 broadcasts of the first element vpbroadcast* or
40177 vpermq should be used by expand_vec_perm_1. */
40178 gcc_assert (!TARGET_AVX2
|| d
->perm
[0]);
40182 gcc_unreachable ();
40186 /* A subroutine of ix86_expand_vec_perm_builtin_1. Pattern match
40187 broadcast permutations. */
40190 expand_vec_perm_broadcast (struct expand_vec_perm_d
*d
)
40192 unsigned i
, elt
, nelt
= d
->nelt
;
40194 if (!d
->one_operand_p
)
40198 for (i
= 1; i
< nelt
; ++i
)
40199 if (d
->perm
[i
] != elt
)
40202 return expand_vec_perm_broadcast_1 (d
);
40205 /* Implement arbitrary permutation of two V32QImode and V16QImode operands
40206 with 4 vpshufb insns, 2 vpermq and 3 vpor. We should have already failed
40207 all the shorter instruction sequences. */
40210 expand_vec_perm_vpshufb4_vpermq2 (struct expand_vec_perm_d
*d
)
40212 rtx rperm
[4][32], vperm
, l
[2], h
[2], op
, m128
;
40213 unsigned int i
, nelt
, eltsz
;
40217 || d
->one_operand_p
40218 || (d
->vmode
!= V32QImode
&& d
->vmode
!= V16HImode
))
40225 eltsz
= GET_MODE_SIZE (GET_MODE_INNER (d
->vmode
));
40227 /* Generate 4 permutation masks. If the required element is within
40228 the same lane, it is shuffled in. If the required element from the
40229 other lane, force a zero by setting bit 7 in the permutation mask.
40230 In the other mask the mask has non-negative elements if element
40231 is requested from the other lane, but also moved to the other lane,
40232 so that the result of vpshufb can have the two V2TImode halves
40234 m128
= GEN_INT (-128);
40235 for (i
= 0; i
< 32; ++i
)
40237 rperm
[0][i
] = m128
;
40238 rperm
[1][i
] = m128
;
40239 rperm
[2][i
] = m128
;
40240 rperm
[3][i
] = m128
;
40246 for (i
= 0; i
< nelt
; ++i
)
40248 unsigned j
, e
= d
->perm
[i
] & (nelt
/ 2 - 1);
40249 unsigned xlane
= ((d
->perm
[i
] ^ i
) & (nelt
/ 2)) * eltsz
;
40250 unsigned int which
= ((d
->perm
[i
] & nelt
) ? 2 : 0) + (xlane
? 1 : 0);
40252 for (j
= 0; j
< eltsz
; ++j
)
40253 rperm
[which
][(i
* eltsz
+ j
) ^ xlane
] = GEN_INT (e
* eltsz
+ j
);
40254 used
[which
] = true;
40257 for (i
= 0; i
< 2; ++i
)
40259 if (!used
[2 * i
+ 1])
40264 vperm
= gen_rtx_CONST_VECTOR (V32QImode
,
40265 gen_rtvec_v (32, rperm
[2 * i
+ 1]));
40266 vperm
= force_reg (V32QImode
, vperm
);
40267 h
[i
] = gen_reg_rtx (V32QImode
);
40268 op
= gen_lowpart (V32QImode
, i
? d
->op1
: d
->op0
);
40269 emit_insn (gen_avx2_pshufbv32qi3 (h
[i
], op
, vperm
));
40272 /* Swap the 128-byte lanes of h[X]. */
40273 for (i
= 0; i
< 2; ++i
)
40275 if (h
[i
] == NULL_RTX
)
40277 op
= gen_reg_rtx (V4DImode
);
40278 emit_insn (gen_avx2_permv4di_1 (op
, gen_lowpart (V4DImode
, h
[i
]),
40279 const2_rtx
, GEN_INT (3), const0_rtx
,
40281 h
[i
] = gen_lowpart (V32QImode
, op
);
40284 for (i
= 0; i
< 2; ++i
)
40291 vperm
= gen_rtx_CONST_VECTOR (V32QImode
, gen_rtvec_v (32, rperm
[2 * i
]));
40292 vperm
= force_reg (V32QImode
, vperm
);
40293 l
[i
] = gen_reg_rtx (V32QImode
);
40294 op
= gen_lowpart (V32QImode
, i
? d
->op1
: d
->op0
);
40295 emit_insn (gen_avx2_pshufbv32qi3 (l
[i
], op
, vperm
));
40298 for (i
= 0; i
< 2; ++i
)
40302 op
= gen_reg_rtx (V32QImode
);
40303 emit_insn (gen_iorv32qi3 (op
, l
[i
], h
[i
]));
40310 gcc_assert (l
[0] && l
[1]);
40311 op
= gen_lowpart (V32QImode
, d
->target
);
40312 emit_insn (gen_iorv32qi3 (op
, l
[0], l
[1]));
40316 /* The guts of ix86_expand_vec_perm_const, also used by the ok hook.
40317 With all of the interface bits taken care of, perform the expansion
40318 in D and return true on success. */
40321 ix86_expand_vec_perm_const_1 (struct expand_vec_perm_d
*d
)
40323 /* Try a single instruction expansion. */
40324 if (expand_vec_perm_1 (d
))
40327 /* Try sequences of two instructions. */
40329 if (expand_vec_perm_pshuflw_pshufhw (d
))
40332 if (expand_vec_perm_palignr (d
))
40335 if (expand_vec_perm_interleave2 (d
))
40338 if (expand_vec_perm_broadcast (d
))
40341 if (expand_vec_perm_vpermq_perm_1 (d
))
40344 if (expand_vec_perm_vperm2f128 (d
))
40347 /* Try sequences of three instructions. */
40349 if (expand_vec_perm_2vperm2f128_vshuf (d
))
40352 if (expand_vec_perm_pshufb2 (d
))
40355 if (expand_vec_perm_interleave3 (d
))
40358 if (expand_vec_perm_vperm2f128_vblend (d
))
40361 /* Try sequences of four instructions. */
40363 if (expand_vec_perm_vpshufb2_vpermq (d
))
40366 if (expand_vec_perm_vpshufb2_vpermq_even_odd (d
))
40369 /* ??? Look for narrow permutations whose element orderings would
40370 allow the promotion to a wider mode. */
40372 /* ??? Look for sequences of interleave or a wider permute that place
40373 the data into the correct lanes for a half-vector shuffle like
40374 pshuf[lh]w or vpermilps. */
40376 /* ??? Look for sequences of interleave that produce the desired results.
40377 The combinatorics of punpck[lh] get pretty ugly... */
40379 if (expand_vec_perm_even_odd (d
))
40382 /* Even longer sequences. */
40383 if (expand_vec_perm_vpshufb4_vpermq2 (d
))
40389 /* If a permutation only uses one operand, make it clear. Returns true
40390 if the permutation references both operands. */
40393 canonicalize_perm (struct expand_vec_perm_d
*d
)
40395 int i
, which
, nelt
= d
->nelt
;
40397 for (i
= which
= 0; i
< nelt
; ++i
)
40398 which
|= (d
->perm
[i
] < nelt
? 1 : 2);
40400 d
->one_operand_p
= true;
40407 if (!rtx_equal_p (d
->op0
, d
->op1
))
40409 d
->one_operand_p
= false;
40412 /* The elements of PERM do not suggest that only the first operand
40413 is used, but both operands are identical. Allow easier matching
40414 of the permutation by folding the permutation into the single
40419 for (i
= 0; i
< nelt
; ++i
)
40420 d
->perm
[i
] &= nelt
- 1;
40429 return (which
== 3);
40433 ix86_expand_vec_perm_const (rtx operands
[4])
40435 struct expand_vec_perm_d d
;
40436 unsigned char perm
[MAX_VECT_LEN
];
40441 d
.target
= operands
[0];
40442 d
.op0
= operands
[1];
40443 d
.op1
= operands
[2];
40446 d
.vmode
= GET_MODE (d
.target
);
40447 gcc_assert (VECTOR_MODE_P (d
.vmode
));
40448 d
.nelt
= nelt
= GET_MODE_NUNITS (d
.vmode
);
40449 d
.testing_p
= false;
40451 gcc_assert (GET_CODE (sel
) == CONST_VECTOR
);
40452 gcc_assert (XVECLEN (sel
, 0) == nelt
);
40453 gcc_checking_assert (sizeof (d
.perm
) == sizeof (perm
));
40455 for (i
= 0; i
< nelt
; ++i
)
40457 rtx e
= XVECEXP (sel
, 0, i
);
40458 int ei
= INTVAL (e
) & (2 * nelt
- 1);
40463 two_args
= canonicalize_perm (&d
);
40465 if (ix86_expand_vec_perm_const_1 (&d
))
40468 /* If the selector says both arguments are needed, but the operands are the
40469 same, the above tried to expand with one_operand_p and flattened selector.
40470 If that didn't work, retry without one_operand_p; we succeeded with that
40472 if (two_args
&& d
.one_operand_p
)
40474 d
.one_operand_p
= false;
40475 memcpy (d
.perm
, perm
, sizeof (perm
));
40476 return ix86_expand_vec_perm_const_1 (&d
);
40482 /* Implement targetm.vectorize.vec_perm_const_ok. */
40485 ix86_vectorize_vec_perm_const_ok (enum machine_mode vmode
,
40486 const unsigned char *sel
)
40488 struct expand_vec_perm_d d
;
40489 unsigned int i
, nelt
, which
;
40493 d
.nelt
= nelt
= GET_MODE_NUNITS (d
.vmode
);
40494 d
.testing_p
= true;
40496 /* Given sufficient ISA support we can just return true here
40497 for selected vector modes. */
40498 if (GET_MODE_SIZE (d
.vmode
) == 16)
40500 /* All implementable with a single vpperm insn. */
40503 /* All implementable with 2 pshufb + 1 ior. */
40506 /* All implementable with shufpd or unpck[lh]pd. */
40511 /* Extract the values from the vector CST into the permutation
40513 memcpy (d
.perm
, sel
, nelt
);
40514 for (i
= which
= 0; i
< nelt
; ++i
)
40516 unsigned char e
= d
.perm
[i
];
40517 gcc_assert (e
< 2 * nelt
);
40518 which
|= (e
< nelt
? 1 : 2);
40521 /* For all elements from second vector, fold the elements to first. */
40523 for (i
= 0; i
< nelt
; ++i
)
40526 /* Check whether the mask can be applied to the vector type. */
40527 d
.one_operand_p
= (which
!= 3);
40529 /* Implementable with shufps or pshufd. */
40530 if (d
.one_operand_p
&& (d
.vmode
== V4SFmode
|| d
.vmode
== V4SImode
))
40533 /* Otherwise we have to go through the motions and see if we can
40534 figure out how to generate the requested permutation. */
40535 d
.target
= gen_raw_REG (d
.vmode
, LAST_VIRTUAL_REGISTER
+ 1);
40536 d
.op1
= d
.op0
= gen_raw_REG (d
.vmode
, LAST_VIRTUAL_REGISTER
+ 2);
40537 if (!d
.one_operand_p
)
40538 d
.op1
= gen_raw_REG (d
.vmode
, LAST_VIRTUAL_REGISTER
+ 3);
40541 ret
= ix86_expand_vec_perm_const_1 (&d
);
40548 ix86_expand_vec_extract_even_odd (rtx targ
, rtx op0
, rtx op1
, unsigned odd
)
40550 struct expand_vec_perm_d d
;
40556 d
.vmode
= GET_MODE (targ
);
40557 d
.nelt
= nelt
= GET_MODE_NUNITS (d
.vmode
);
40558 d
.one_operand_p
= false;
40559 d
.testing_p
= false;
40561 for (i
= 0; i
< nelt
; ++i
)
40562 d
.perm
[i
] = i
* 2 + odd
;
40564 /* We'll either be able to implement the permutation directly... */
40565 if (expand_vec_perm_1 (&d
))
40568 /* ... or we use the special-case patterns. */
40569 expand_vec_perm_even_odd_1 (&d
, odd
);
40573 ix86_expand_vec_interleave (rtx targ
, rtx op0
, rtx op1
, bool high_p
)
40575 struct expand_vec_perm_d d
;
40576 unsigned i
, nelt
, base
;
40582 d
.vmode
= GET_MODE (targ
);
40583 d
.nelt
= nelt
= GET_MODE_NUNITS (d
.vmode
);
40584 d
.one_operand_p
= false;
40585 d
.testing_p
= false;
40587 base
= high_p
? nelt
/ 2 : 0;
40588 for (i
= 0; i
< nelt
/ 2; ++i
)
40590 d
.perm
[i
* 2] = i
+ base
;
40591 d
.perm
[i
* 2 + 1] = i
+ base
+ nelt
;
40594 /* Note that for AVX this isn't one instruction. */
40595 ok
= ix86_expand_vec_perm_const_1 (&d
);
40600 /* Expand a vector operation CODE for a V*QImode in terms of the
40601 same operation on V*HImode. */
40604 ix86_expand_vecop_qihi (enum rtx_code code
, rtx dest
, rtx op1
, rtx op2
)
40606 enum machine_mode qimode
= GET_MODE (dest
);
40607 enum machine_mode himode
;
40608 rtx (*gen_il
) (rtx
, rtx
, rtx
);
40609 rtx (*gen_ih
) (rtx
, rtx
, rtx
);
40610 rtx op1_l
, op1_h
, op2_l
, op2_h
, res_l
, res_h
;
40611 struct expand_vec_perm_d d
;
40612 bool ok
, full_interleave
;
40613 bool uns_p
= false;
40620 gen_il
= gen_vec_interleave_lowv16qi
;
40621 gen_ih
= gen_vec_interleave_highv16qi
;
40624 himode
= V16HImode
;
40625 gen_il
= gen_avx2_interleave_lowv32qi
;
40626 gen_ih
= gen_avx2_interleave_highv32qi
;
40629 gcc_unreachable ();
40632 op2_l
= op2_h
= op2
;
40636 /* Unpack data such that we've got a source byte in each low byte of
40637 each word. We don't care what goes into the high byte of each word.
40638 Rather than trying to get zero in there, most convenient is to let
40639 it be a copy of the low byte. */
40640 op2_l
= gen_reg_rtx (qimode
);
40641 op2_h
= gen_reg_rtx (qimode
);
40642 emit_insn (gen_il (op2_l
, op2
, op2
));
40643 emit_insn (gen_ih (op2_h
, op2
, op2
));
40646 op1_l
= gen_reg_rtx (qimode
);
40647 op1_h
= gen_reg_rtx (qimode
);
40648 emit_insn (gen_il (op1_l
, op1
, op1
));
40649 emit_insn (gen_ih (op1_h
, op1
, op1
));
40650 full_interleave
= qimode
== V16QImode
;
40658 op1_l
= gen_reg_rtx (himode
);
40659 op1_h
= gen_reg_rtx (himode
);
40660 ix86_expand_sse_unpack (op1_l
, op1
, uns_p
, false);
40661 ix86_expand_sse_unpack (op1_h
, op1
, uns_p
, true);
40662 full_interleave
= true;
40665 gcc_unreachable ();
40668 /* Perform the operation. */
40669 res_l
= expand_simple_binop (himode
, code
, op1_l
, op2_l
, NULL_RTX
,
40671 res_h
= expand_simple_binop (himode
, code
, op1_h
, op2_h
, NULL_RTX
,
40673 gcc_assert (res_l
&& res_h
);
40675 /* Merge the data back into the right place. */
40677 d
.op0
= gen_lowpart (qimode
, res_l
);
40678 d
.op1
= gen_lowpart (qimode
, res_h
);
40680 d
.nelt
= GET_MODE_NUNITS (qimode
);
40681 d
.one_operand_p
= false;
40682 d
.testing_p
= false;
40684 if (full_interleave
)
40686 /* For SSE2, we used an full interleave, so the desired
40687 results are in the even elements. */
40688 for (i
= 0; i
< 32; ++i
)
40693 /* For AVX, the interleave used above was not cross-lane. So the
40694 extraction is evens but with the second and third quarter swapped.
40695 Happily, that is even one insn shorter than even extraction. */
40696 for (i
= 0; i
< 32; ++i
)
40697 d
.perm
[i
] = i
* 2 + ((i
& 24) == 8 ? 16 : (i
& 24) == 16 ? -16 : 0);
40700 ok
= ix86_expand_vec_perm_const_1 (&d
);
40703 set_unique_reg_note (get_last_insn (), REG_EQUAL
,
40704 gen_rtx_fmt_ee (code
, qimode
, op1
, op2
));
40708 ix86_expand_mul_widen_evenodd (rtx dest
, rtx op1
, rtx op2
,
40709 bool uns_p
, bool odd_p
)
40711 enum machine_mode mode
= GET_MODE (op1
);
40712 enum machine_mode wmode
= GET_MODE (dest
);
40715 /* We only play even/odd games with vectors of SImode. */
40716 gcc_assert (mode
== V4SImode
|| mode
== V8SImode
);
40718 /* If we're looking for the odd results, shift those members down to
40719 the even slots. For some cpus this is faster than a PSHUFD. */
40722 if (TARGET_XOP
&& mode
== V4SImode
)
40724 x
= force_reg (wmode
, CONST0_RTX (wmode
));
40725 emit_insn (gen_xop_pmacsdqh (dest
, op1
, op2
, x
));
40729 x
= GEN_INT (GET_MODE_UNIT_BITSIZE (mode
));
40730 op1
= expand_binop (wmode
, lshr_optab
, gen_lowpart (wmode
, op1
),
40731 x
, NULL
, 1, OPTAB_DIRECT
);
40732 op2
= expand_binop (wmode
, lshr_optab
, gen_lowpart (wmode
, op2
),
40733 x
, NULL
, 1, OPTAB_DIRECT
);
40734 op1
= gen_lowpart (mode
, op1
);
40735 op2
= gen_lowpart (mode
, op2
);
40738 if (mode
== V8SImode
)
40741 x
= gen_vec_widen_umult_even_v8si (dest
, op1
, op2
);
40743 x
= gen_vec_widen_smult_even_v8si (dest
, op1
, op2
);
40746 x
= gen_vec_widen_umult_even_v4si (dest
, op1
, op2
);
40747 else if (TARGET_SSE4_1
)
40748 x
= gen_sse4_1_mulv2siv2di3 (dest
, op1
, op2
);
40751 rtx s1
, s2
, t0
, t1
, t2
;
40753 /* The easiest way to implement this without PMULDQ is to go through
40754 the motions as if we are performing a full 64-bit multiply. With
40755 the exception that we need to do less shuffling of the elements. */
40757 /* Compute the sign-extension, aka highparts, of the two operands. */
40758 s1
= ix86_expand_sse_cmp (gen_reg_rtx (mode
), GT
, CONST0_RTX (mode
),
40759 op1
, pc_rtx
, pc_rtx
);
40760 s2
= ix86_expand_sse_cmp (gen_reg_rtx (mode
), GT
, CONST0_RTX (mode
),
40761 op2
, pc_rtx
, pc_rtx
);
40763 /* Multiply LO(A) * HI(B), and vice-versa. */
40764 t1
= gen_reg_rtx (wmode
);
40765 t2
= gen_reg_rtx (wmode
);
40766 emit_insn (gen_vec_widen_umult_even_v4si (t1
, s1
, op2
));
40767 emit_insn (gen_vec_widen_umult_even_v4si (t2
, s2
, op1
));
40769 /* Multiply LO(A) * LO(B). */
40770 t0
= gen_reg_rtx (wmode
);
40771 emit_insn (gen_vec_widen_umult_even_v4si (t0
, op1
, op2
));
40773 /* Combine and shift the highparts into place. */
40774 t1
= expand_binop (wmode
, add_optab
, t1
, t2
, t1
, 1, OPTAB_DIRECT
);
40775 t1
= expand_binop (wmode
, ashl_optab
, t1
, GEN_INT (32), t1
,
40778 /* Combine high and low parts. */
40779 force_expand_binop (wmode
, add_optab
, t0
, t1
, dest
, 1, OPTAB_DIRECT
);
40786 ix86_expand_mul_widen_hilo (rtx dest
, rtx op1
, rtx op2
,
40787 bool uns_p
, bool high_p
)
40789 enum machine_mode wmode
= GET_MODE (dest
);
40790 enum machine_mode mode
= GET_MODE (op1
);
40791 rtx t1
, t2
, t3
, t4
, mask
;
40796 t1
= gen_reg_rtx (mode
);
40797 t2
= gen_reg_rtx (mode
);
40798 if (TARGET_XOP
&& !uns_p
)
40800 /* With XOP, we have pmacsdqh, aka mul_widen_odd. In this case,
40801 shuffle the elements once so that all elements are in the right
40802 place for immediate use: { A C B D }. */
40803 emit_insn (gen_sse2_pshufd_1 (t1
, op1
, const0_rtx
, const2_rtx
,
40804 const1_rtx
, GEN_INT (3)));
40805 emit_insn (gen_sse2_pshufd_1 (t2
, op2
, const0_rtx
, const2_rtx
,
40806 const1_rtx
, GEN_INT (3)));
40810 /* Put the elements into place for the multiply. */
40811 ix86_expand_vec_interleave (t1
, op1
, op1
, high_p
);
40812 ix86_expand_vec_interleave (t2
, op2
, op2
, high_p
);
40815 ix86_expand_mul_widen_evenodd (dest
, t1
, t2
, uns_p
, high_p
);
40819 /* Shuffle the elements between the lanes. After this we
40820 have { A B E F | C D G H } for each operand. */
40821 t1
= gen_reg_rtx (V4DImode
);
40822 t2
= gen_reg_rtx (V4DImode
);
40823 emit_insn (gen_avx2_permv4di_1 (t1
, gen_lowpart (V4DImode
, op1
),
40824 const0_rtx
, const2_rtx
,
40825 const1_rtx
, GEN_INT (3)));
40826 emit_insn (gen_avx2_permv4di_1 (t2
, gen_lowpart (V4DImode
, op2
),
40827 const0_rtx
, const2_rtx
,
40828 const1_rtx
, GEN_INT (3)));
40830 /* Shuffle the elements within the lanes. After this we
40831 have { A A B B | C C D D } or { E E F F | G G H H }. */
40832 t3
= gen_reg_rtx (V8SImode
);
40833 t4
= gen_reg_rtx (V8SImode
);
40834 mask
= GEN_INT (high_p
40835 ? 2 + (2 << 2) + (3 << 4) + (3 << 6)
40836 : 0 + (0 << 2) + (1 << 4) + (1 << 6));
40837 emit_insn (gen_avx2_pshufdv3 (t3
, gen_lowpart (V8SImode
, t1
), mask
));
40838 emit_insn (gen_avx2_pshufdv3 (t4
, gen_lowpart (V8SImode
, t2
), mask
));
40840 ix86_expand_mul_widen_evenodd (dest
, t3
, t4
, uns_p
, false);
40845 t1
= expand_binop (mode
, smul_optab
, op1
, op2
, NULL_RTX
,
40846 uns_p
, OPTAB_DIRECT
);
40847 t2
= expand_binop (mode
,
40848 uns_p
? umul_highpart_optab
: smul_highpart_optab
,
40849 op1
, op2
, NULL_RTX
, uns_p
, OPTAB_DIRECT
);
40850 gcc_assert (t1
&& t2
);
40852 ix86_expand_vec_interleave (gen_lowpart (mode
, dest
), t1
, t2
, high_p
);
40857 t1
= gen_reg_rtx (wmode
);
40858 t2
= gen_reg_rtx (wmode
);
40859 ix86_expand_sse_unpack (t1
, op1
, uns_p
, high_p
);
40860 ix86_expand_sse_unpack (t2
, op2
, uns_p
, high_p
);
40862 emit_insn (gen_rtx_SET (VOIDmode
, dest
, gen_rtx_MULT (wmode
, t1
, t2
)));
40866 gcc_unreachable ();
40871 ix86_expand_sse2_mulv4si3 (rtx op0
, rtx op1
, rtx op2
)
40875 res_1
= gen_reg_rtx (V4SImode
);
40876 res_2
= gen_reg_rtx (V4SImode
);
40877 ix86_expand_mul_widen_evenodd (gen_lowpart (V2DImode
, res_1
),
40878 op1
, op2
, true, false);
40879 ix86_expand_mul_widen_evenodd (gen_lowpart (V2DImode
, res_2
),
40880 op1
, op2
, true, true);
40882 /* Move the results in element 2 down to element 1; we don't care
40883 what goes in elements 2 and 3. Then we can merge the parts
40884 back together with an interleave.
40886 Note that two other sequences were tried:
40887 (1) Use interleaves at the start instead of psrldq, which allows
40888 us to use a single shufps to merge things back at the end.
40889 (2) Use shufps here to combine the two vectors, then pshufd to
40890 put the elements in the correct order.
40891 In both cases the cost of the reformatting stall was too high
40892 and the overall sequence slower. */
40894 emit_insn (gen_sse2_pshufd_1 (res_1
, res_1
, const0_rtx
, const2_rtx
,
40895 const0_rtx
, const0_rtx
));
40896 emit_insn (gen_sse2_pshufd_1 (res_2
, res_2
, const0_rtx
, const2_rtx
,
40897 const0_rtx
, const0_rtx
));
40898 res_1
= emit_insn (gen_vec_interleave_lowv4si (op0
, res_1
, res_2
));
40900 set_unique_reg_note (res_1
, REG_EQUAL
, gen_rtx_MULT (V4SImode
, op1
, op2
));
40904 ix86_expand_sse2_mulvxdi3 (rtx op0
, rtx op1
, rtx op2
)
40906 enum machine_mode mode
= GET_MODE (op0
);
40907 rtx t1
, t2
, t3
, t4
, t5
, t6
;
40909 if (TARGET_XOP
&& mode
== V2DImode
)
40911 /* op1: A,B,C,D, op2: E,F,G,H */
40912 op1
= gen_lowpart (V4SImode
, op1
);
40913 op2
= gen_lowpart (V4SImode
, op2
);
40915 t1
= gen_reg_rtx (V4SImode
);
40916 t2
= gen_reg_rtx (V4SImode
);
40917 t3
= gen_reg_rtx (V2DImode
);
40918 t4
= gen_reg_rtx (V2DImode
);
40921 emit_insn (gen_sse2_pshufd_1 (t1
, op1
,
40927 /* t2: (B*E),(A*F),(D*G),(C*H) */
40928 emit_insn (gen_mulv4si3 (t2
, t1
, op2
));
40930 /* t3: (B*E)+(A*F), (D*G)+(C*H) */
40931 emit_insn (gen_xop_phadddq (t3
, t2
));
40933 /* t4: ((B*E)+(A*F))<<32, ((D*G)+(C*H))<<32 */
40934 emit_insn (gen_ashlv2di3 (t4
, t3
, GEN_INT (32)));
40936 /* op0: (((B*E)+(A*F))<<32)+(B*F), (((D*G)+(C*H))<<32)+(D*H) */
40937 emit_insn (gen_xop_pmacsdql (op0
, op1
, op2
, t4
));
40941 enum machine_mode nmode
;
40942 rtx (*umul
) (rtx
, rtx
, rtx
);
40944 if (mode
== V2DImode
)
40946 umul
= gen_vec_widen_umult_even_v4si
;
40949 else if (mode
== V4DImode
)
40951 umul
= gen_vec_widen_umult_even_v8si
;
40955 gcc_unreachable ();
40958 /* Multiply low parts. */
40959 t1
= gen_reg_rtx (mode
);
40960 emit_insn (umul (t1
, gen_lowpart (nmode
, op1
), gen_lowpart (nmode
, op2
)));
40962 /* Shift input vectors right 32 bits so we can multiply high parts. */
40964 t2
= expand_binop (mode
, lshr_optab
, op1
, t6
, NULL
, 1, OPTAB_DIRECT
);
40965 t3
= expand_binop (mode
, lshr_optab
, op2
, t6
, NULL
, 1, OPTAB_DIRECT
);
40967 /* Multiply high parts by low parts. */
40968 t4
= gen_reg_rtx (mode
);
40969 t5
= gen_reg_rtx (mode
);
40970 emit_insn (umul (t4
, gen_lowpart (nmode
, t2
), gen_lowpart (nmode
, op2
)));
40971 emit_insn (umul (t5
, gen_lowpart (nmode
, t3
), gen_lowpart (nmode
, op1
)));
40973 /* Combine and shift the highparts back. */
40974 t4
= expand_binop (mode
, add_optab
, t4
, t5
, t4
, 1, OPTAB_DIRECT
);
40975 t4
= expand_binop (mode
, ashl_optab
, t4
, t6
, t4
, 1, OPTAB_DIRECT
);
40977 /* Combine high and low parts. */
40978 force_expand_binop (mode
, add_optab
, t1
, t4
, op0
, 1, OPTAB_DIRECT
);
40981 set_unique_reg_note (get_last_insn (), REG_EQUAL
,
40982 gen_rtx_MULT (mode
, op1
, op2
));
40985 /* Expand an insert into a vector register through pinsr insn.
40986 Return true if successful. */
40989 ix86_expand_pinsr (rtx
*operands
)
40991 rtx dst
= operands
[0];
40992 rtx src
= operands
[3];
40994 unsigned int size
= INTVAL (operands
[1]);
40995 unsigned int pos
= INTVAL (operands
[2]);
40997 if (GET_CODE (dst
) == SUBREG
)
40999 pos
+= SUBREG_BYTE (dst
) * BITS_PER_UNIT
;
41000 dst
= SUBREG_REG (dst
);
41003 if (GET_CODE (src
) == SUBREG
)
41004 src
= SUBREG_REG (src
);
41006 switch (GET_MODE (dst
))
41013 enum machine_mode srcmode
, dstmode
;
41014 rtx (*pinsr
)(rtx
, rtx
, rtx
, rtx
);
41016 srcmode
= mode_for_size (size
, MODE_INT
, 0);
41021 if (!TARGET_SSE4_1
)
41023 dstmode
= V16QImode
;
41024 pinsr
= gen_sse4_1_pinsrb
;
41030 dstmode
= V8HImode
;
41031 pinsr
= gen_sse2_pinsrw
;
41035 if (!TARGET_SSE4_1
)
41037 dstmode
= V4SImode
;
41038 pinsr
= gen_sse4_1_pinsrd
;
41042 gcc_assert (TARGET_64BIT
);
41043 if (!TARGET_SSE4_1
)
41045 dstmode
= V2DImode
;
41046 pinsr
= gen_sse4_1_pinsrq
;
41053 dst
= gen_lowpart (dstmode
, dst
);
41054 src
= gen_lowpart (srcmode
, src
);
41058 emit_insn (pinsr (dst
, dst
, src
, GEN_INT (1 << pos
)));
41067 /* This function returns the calling abi specific va_list type node.
41068 It returns the FNDECL specific va_list type. */
41071 ix86_fn_abi_va_list (tree fndecl
)
41074 return va_list_type_node
;
41075 gcc_assert (fndecl
!= NULL_TREE
);
41077 if (ix86_function_abi ((const_tree
) fndecl
) == MS_ABI
)
41078 return ms_va_list_type_node
;
41080 return sysv_va_list_type_node
;
41083 /* Returns the canonical va_list type specified by TYPE. If there
41084 is no valid TYPE provided, it return NULL_TREE. */
41087 ix86_canonical_va_list_type (tree type
)
41091 /* Resolve references and pointers to va_list type. */
41092 if (TREE_CODE (type
) == MEM_REF
)
41093 type
= TREE_TYPE (type
);
41094 else if (POINTER_TYPE_P (type
) && POINTER_TYPE_P (TREE_TYPE(type
)))
41095 type
= TREE_TYPE (type
);
41096 else if (POINTER_TYPE_P (type
) && TREE_CODE (TREE_TYPE (type
)) == ARRAY_TYPE
)
41097 type
= TREE_TYPE (type
);
41099 if (TARGET_64BIT
&& va_list_type_node
!= NULL_TREE
)
41101 wtype
= va_list_type_node
;
41102 gcc_assert (wtype
!= NULL_TREE
);
41104 if (TREE_CODE (wtype
) == ARRAY_TYPE
)
41106 /* If va_list is an array type, the argument may have decayed
41107 to a pointer type, e.g. by being passed to another function.
41108 In that case, unwrap both types so that we can compare the
41109 underlying records. */
41110 if (TREE_CODE (htype
) == ARRAY_TYPE
41111 || POINTER_TYPE_P (htype
))
41113 wtype
= TREE_TYPE (wtype
);
41114 htype
= TREE_TYPE (htype
);
41117 if (TYPE_MAIN_VARIANT (wtype
) == TYPE_MAIN_VARIANT (htype
))
41118 return va_list_type_node
;
41119 wtype
= sysv_va_list_type_node
;
41120 gcc_assert (wtype
!= NULL_TREE
);
41122 if (TREE_CODE (wtype
) == ARRAY_TYPE
)
41124 /* If va_list is an array type, the argument may have decayed
41125 to a pointer type, e.g. by being passed to another function.
41126 In that case, unwrap both types so that we can compare the
41127 underlying records. */
41128 if (TREE_CODE (htype
) == ARRAY_TYPE
41129 || POINTER_TYPE_P (htype
))
41131 wtype
= TREE_TYPE (wtype
);
41132 htype
= TREE_TYPE (htype
);
41135 if (TYPE_MAIN_VARIANT (wtype
) == TYPE_MAIN_VARIANT (htype
))
41136 return sysv_va_list_type_node
;
41137 wtype
= ms_va_list_type_node
;
41138 gcc_assert (wtype
!= NULL_TREE
);
41140 if (TREE_CODE (wtype
) == ARRAY_TYPE
)
41142 /* If va_list is an array type, the argument may have decayed
41143 to a pointer type, e.g. by being passed to another function.
41144 In that case, unwrap both types so that we can compare the
41145 underlying records. */
41146 if (TREE_CODE (htype
) == ARRAY_TYPE
41147 || POINTER_TYPE_P (htype
))
41149 wtype
= TREE_TYPE (wtype
);
41150 htype
= TREE_TYPE (htype
);
41153 if (TYPE_MAIN_VARIANT (wtype
) == TYPE_MAIN_VARIANT (htype
))
41154 return ms_va_list_type_node
;
41157 return std_canonical_va_list_type (type
);
41160 /* Iterate through the target-specific builtin types for va_list.
41161 IDX denotes the iterator, *PTREE is set to the result type of
41162 the va_list builtin, and *PNAME to its internal type.
41163 Returns zero if there is no element for this index, otherwise
41164 IDX should be increased upon the next call.
41165 Note, do not iterate a base builtin's name like __builtin_va_list.
41166 Used from c_common_nodes_and_builtins. */
41169 ix86_enum_va_list (int idx
, const char **pname
, tree
*ptree
)
41179 *ptree
= ms_va_list_type_node
;
41180 *pname
= "__builtin_ms_va_list";
41184 *ptree
= sysv_va_list_type_node
;
41185 *pname
= "__builtin_sysv_va_list";
41193 #undef TARGET_SCHED_DISPATCH
41194 #define TARGET_SCHED_DISPATCH has_dispatch
41195 #undef TARGET_SCHED_DISPATCH_DO
41196 #define TARGET_SCHED_DISPATCH_DO do_dispatch
41197 #undef TARGET_SCHED_REASSOCIATION_WIDTH
41198 #define TARGET_SCHED_REASSOCIATION_WIDTH ix86_reassociation_width
41199 #undef TARGET_SCHED_REORDER
41200 #define TARGET_SCHED_REORDER ix86_sched_reorder
41201 #undef TARGET_SCHED_ADJUST_PRIORITY
41202 #define TARGET_SCHED_ADJUST_PRIORITY ix86_adjust_priority
41203 #undef TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK
41204 #define TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK ix86_dependencies_evaluation_hook
41206 /* The size of the dispatch window is the total number of bytes of
41207 object code allowed in a window. */
41208 #define DISPATCH_WINDOW_SIZE 16
41210 /* Number of dispatch windows considered for scheduling. */
41211 #define MAX_DISPATCH_WINDOWS 3
41213 /* Maximum number of instructions in a window. */
41216 /* Maximum number of immediate operands in a window. */
41219 /* Maximum number of immediate bits allowed in a window. */
41220 #define MAX_IMM_SIZE 128
41222 /* Maximum number of 32 bit immediates allowed in a window. */
41223 #define MAX_IMM_32 4
41225 /* Maximum number of 64 bit immediates allowed in a window. */
41226 #define MAX_IMM_64 2
41228 /* Maximum total of loads or prefetches allowed in a window. */
41231 /* Maximum total of stores allowed in a window. */
41232 #define MAX_STORE 1
41238 /* Dispatch groups. Istructions that affect the mix in a dispatch window. */
41239 enum dispatch_group
{
41254 /* Number of allowable groups in a dispatch window. It is an array
41255 indexed by dispatch_group enum. 100 is used as a big number,
41256 because the number of these kind of operations does not have any
41257 effect in dispatch window, but we need them for other reasons in
41259 static unsigned int num_allowable_groups
[disp_last
] = {
41260 0, 2, 1, 1, 2, 4, 4, 2, 1, BIG
, BIG
41263 char group_name
[disp_last
+ 1][16] = {
41264 "disp_no_group", "disp_load", "disp_store", "disp_load_store",
41265 "disp_prefetch", "disp_imm", "disp_imm_32", "disp_imm_64",
41266 "disp_branch", "disp_cmp", "disp_jcc", "disp_last"
41269 /* Instruction path. */
41272 path_single
, /* Single micro op. */
41273 path_double
, /* Double micro op. */
41274 path_multi
, /* Instructions with more than 2 micro op.. */
41278 /* sched_insn_info defines a window to the instructions scheduled in
41279 the basic block. It contains a pointer to the insn_info table and
41280 the instruction scheduled.
41282 Windows are allocated for each basic block and are linked
41284 typedef struct sched_insn_info_s
{
41286 enum dispatch_group group
;
41287 enum insn_path path
;
41292 /* Linked list of dispatch windows. This is a two way list of
41293 dispatch windows of a basic block. It contains information about
41294 the number of uops in the window and the total number of
41295 instructions and of bytes in the object code for this dispatch
41297 typedef struct dispatch_windows_s
{
41298 int num_insn
; /* Number of insn in the window. */
41299 int num_uops
; /* Number of uops in the window. */
41300 int window_size
; /* Number of bytes in the window. */
41301 int window_num
; /* Window number between 0 or 1. */
41302 int num_imm
; /* Number of immediates in an insn. */
41303 int num_imm_32
; /* Number of 32 bit immediates in an insn. */
41304 int num_imm_64
; /* Number of 64 bit immediates in an insn. */
41305 int imm_size
; /* Total immediates in the window. */
41306 int num_loads
; /* Total memory loads in the window. */
41307 int num_stores
; /* Total memory stores in the window. */
41308 int violation
; /* Violation exists in window. */
41309 sched_insn_info
*window
; /* Pointer to the window. */
41310 struct dispatch_windows_s
*next
;
41311 struct dispatch_windows_s
*prev
;
41312 } dispatch_windows
;
41314 /* Immediate valuse used in an insn. */
41315 typedef struct imm_info_s
41322 static dispatch_windows
*dispatch_window_list
;
41323 static dispatch_windows
*dispatch_window_list1
;
41325 /* Get dispatch group of insn. */
41327 static enum dispatch_group
41328 get_mem_group (rtx insn
)
41330 enum attr_memory memory
;
41332 if (INSN_CODE (insn
) < 0)
41333 return disp_no_group
;
41334 memory
= get_attr_memory (insn
);
41335 if (memory
== MEMORY_STORE
)
41338 if (memory
== MEMORY_LOAD
)
41341 if (memory
== MEMORY_BOTH
)
41342 return disp_load_store
;
41344 return disp_no_group
;
41347 /* Return true if insn is a compare instruction. */
41352 enum attr_type type
;
41354 type
= get_attr_type (insn
);
41355 return (type
== TYPE_TEST
41356 || type
== TYPE_ICMP
41357 || type
== TYPE_FCMP
41358 || GET_CODE (PATTERN (insn
)) == COMPARE
);
41361 /* Return true if a dispatch violation encountered. */
41364 dispatch_violation (void)
41366 if (dispatch_window_list
->next
)
41367 return dispatch_window_list
->next
->violation
;
41368 return dispatch_window_list
->violation
;
41371 /* Return true if insn is a branch instruction. */
41374 is_branch (rtx insn
)
41376 return (CALL_P (insn
) || JUMP_P (insn
));
41379 /* Return true if insn is a prefetch instruction. */
41382 is_prefetch (rtx insn
)
41384 return NONJUMP_INSN_P (insn
) && GET_CODE (PATTERN (insn
)) == PREFETCH
;
41387 /* This function initializes a dispatch window and the list container holding a
41388 pointer to the window. */
41391 init_window (int window_num
)
41394 dispatch_windows
*new_list
;
41396 if (window_num
== 0)
41397 new_list
= dispatch_window_list
;
41399 new_list
= dispatch_window_list1
;
41401 new_list
->num_insn
= 0;
41402 new_list
->num_uops
= 0;
41403 new_list
->window_size
= 0;
41404 new_list
->next
= NULL
;
41405 new_list
->prev
= NULL
;
41406 new_list
->window_num
= window_num
;
41407 new_list
->num_imm
= 0;
41408 new_list
->num_imm_32
= 0;
41409 new_list
->num_imm_64
= 0;
41410 new_list
->imm_size
= 0;
41411 new_list
->num_loads
= 0;
41412 new_list
->num_stores
= 0;
41413 new_list
->violation
= false;
41415 for (i
= 0; i
< MAX_INSN
; i
++)
41417 new_list
->window
[i
].insn
= NULL
;
41418 new_list
->window
[i
].group
= disp_no_group
;
41419 new_list
->window
[i
].path
= no_path
;
41420 new_list
->window
[i
].byte_len
= 0;
41421 new_list
->window
[i
].imm_bytes
= 0;
41426 /* This function allocates and initializes a dispatch window and the
41427 list container holding a pointer to the window. */
41429 static dispatch_windows
*
41430 allocate_window (void)
41432 dispatch_windows
*new_list
= XNEW (struct dispatch_windows_s
);
41433 new_list
->window
= XNEWVEC (struct sched_insn_info_s
, MAX_INSN
+ 1);
41438 /* This routine initializes the dispatch scheduling information. It
41439 initiates building dispatch scheduler tables and constructs the
41440 first dispatch window. */
41443 init_dispatch_sched (void)
41445 /* Allocate a dispatch list and a window. */
41446 dispatch_window_list
= allocate_window ();
41447 dispatch_window_list1
= allocate_window ();
41452 /* This function returns true if a branch is detected. End of a basic block
41453 does not have to be a branch, but here we assume only branches end a
41457 is_end_basic_block (enum dispatch_group group
)
41459 return group
== disp_branch
;
41462 /* This function is called when the end of a window processing is reached. */
41465 process_end_window (void)
41467 gcc_assert (dispatch_window_list
->num_insn
<= MAX_INSN
);
41468 if (dispatch_window_list
->next
)
41470 gcc_assert (dispatch_window_list1
->num_insn
<= MAX_INSN
);
41471 gcc_assert (dispatch_window_list
->window_size
41472 + dispatch_window_list1
->window_size
<= 48);
41478 /* Allocates a new dispatch window and adds it to WINDOW_LIST.
41479 WINDOW_NUM is either 0 or 1. A maximum of two windows are generated
41480 for 48 bytes of instructions. Note that these windows are not dispatch
41481 windows that their sizes are DISPATCH_WINDOW_SIZE. */
41483 static dispatch_windows
*
41484 allocate_next_window (int window_num
)
41486 if (window_num
== 0)
41488 if (dispatch_window_list
->next
)
41491 return dispatch_window_list
;
41494 dispatch_window_list
->next
= dispatch_window_list1
;
41495 dispatch_window_list1
->prev
= dispatch_window_list
;
41497 return dispatch_window_list1
;
41500 /* Increment the number of immediate operands of an instruction. */
41503 find_constant_1 (rtx
*in_rtx
, imm_info
*imm_values
)
41508 switch ( GET_CODE (*in_rtx
))
41513 (imm_values
->imm
)++;
41514 if (x86_64_immediate_operand (*in_rtx
, SImode
))
41515 (imm_values
->imm32
)++;
41517 (imm_values
->imm64
)++;
41521 (imm_values
->imm
)++;
41522 (imm_values
->imm64
)++;
41526 if (LABEL_KIND (*in_rtx
) == LABEL_NORMAL
)
41528 (imm_values
->imm
)++;
41529 (imm_values
->imm32
)++;
41540 /* Compute number of immediate operands of an instruction. */
41543 find_constant (rtx in_rtx
, imm_info
*imm_values
)
41545 for_each_rtx (INSN_P (in_rtx
) ? &PATTERN (in_rtx
) : &in_rtx
,
41546 (rtx_function
) find_constant_1
, (void *) imm_values
);
41549 /* Return total size of immediate operands of an instruction along with number
41550 of corresponding immediate-operands. It initializes its parameters to zero
41551 befor calling FIND_CONSTANT.
41552 INSN is the input instruction. IMM is the total of immediates.
41553 IMM32 is the number of 32 bit immediates. IMM64 is the number of 64
41557 get_num_immediates (rtx insn
, int *imm
, int *imm32
, int *imm64
)
41559 imm_info imm_values
= {0, 0, 0};
41561 find_constant (insn
, &imm_values
);
41562 *imm
= imm_values
.imm
;
41563 *imm32
= imm_values
.imm32
;
41564 *imm64
= imm_values
.imm64
;
41565 return imm_values
.imm32
* 4 + imm_values
.imm64
* 8;
41568 /* This function indicates if an operand of an instruction is an
41572 has_immediate (rtx insn
)
41574 int num_imm_operand
;
41575 int num_imm32_operand
;
41576 int num_imm64_operand
;
41579 return get_num_immediates (insn
, &num_imm_operand
, &num_imm32_operand
,
41580 &num_imm64_operand
);
41584 /* Return single or double path for instructions. */
41586 static enum insn_path
41587 get_insn_path (rtx insn
)
41589 enum attr_amdfam10_decode path
= get_attr_amdfam10_decode (insn
);
41591 if ((int)path
== 0)
41592 return path_single
;
41594 if ((int)path
== 1)
41595 return path_double
;
41600 /* Return insn dispatch group. */
41602 static enum dispatch_group
41603 get_insn_group (rtx insn
)
41605 enum dispatch_group group
= get_mem_group (insn
);
41609 if (is_branch (insn
))
41610 return disp_branch
;
41615 if (has_immediate (insn
))
41618 if (is_prefetch (insn
))
41619 return disp_prefetch
;
41621 return disp_no_group
;
41624 /* Count number of GROUP restricted instructions in a dispatch
41625 window WINDOW_LIST. */
41628 count_num_restricted (rtx insn
, dispatch_windows
*window_list
)
41630 enum dispatch_group group
= get_insn_group (insn
);
41632 int num_imm_operand
;
41633 int num_imm32_operand
;
41634 int num_imm64_operand
;
41636 if (group
== disp_no_group
)
41639 if (group
== disp_imm
)
41641 imm_size
= get_num_immediates (insn
, &num_imm_operand
, &num_imm32_operand
,
41642 &num_imm64_operand
);
41643 if (window_list
->imm_size
+ imm_size
> MAX_IMM_SIZE
41644 || num_imm_operand
+ window_list
->num_imm
> MAX_IMM
41645 || (num_imm32_operand
> 0
41646 && (window_list
->num_imm_32
+ num_imm32_operand
> MAX_IMM_32
41647 || window_list
->num_imm_64
* 2 + num_imm32_operand
> MAX_IMM_32
))
41648 || (num_imm64_operand
> 0
41649 && (window_list
->num_imm_64
+ num_imm64_operand
> MAX_IMM_64
41650 || window_list
->num_imm_32
+ num_imm64_operand
* 2 > MAX_IMM_32
))
41651 || (window_list
->imm_size
+ imm_size
== MAX_IMM_SIZE
41652 && num_imm64_operand
> 0
41653 && ((window_list
->num_imm_64
> 0
41654 && window_list
->num_insn
>= 2)
41655 || window_list
->num_insn
>= 3)))
41661 if ((group
== disp_load_store
41662 && (window_list
->num_loads
>= MAX_LOAD
41663 || window_list
->num_stores
>= MAX_STORE
))
41664 || ((group
== disp_load
41665 || group
== disp_prefetch
)
41666 && window_list
->num_loads
>= MAX_LOAD
)
41667 || (group
== disp_store
41668 && window_list
->num_stores
>= MAX_STORE
))
41674 /* This function returns true if insn satisfies dispatch rules on the
41675 last window scheduled. */
41678 fits_dispatch_window (rtx insn
)
41680 dispatch_windows
*window_list
= dispatch_window_list
;
41681 dispatch_windows
*window_list_next
= dispatch_window_list
->next
;
41682 unsigned int num_restrict
;
41683 enum dispatch_group group
= get_insn_group (insn
);
41684 enum insn_path path
= get_insn_path (insn
);
41687 /* Make disp_cmp and disp_jcc get scheduled at the latest. These
41688 instructions should be given the lowest priority in the
41689 scheduling process in Haifa scheduler to make sure they will be
41690 scheduled in the same dispatch window as the reference to them. */
41691 if (group
== disp_jcc
|| group
== disp_cmp
)
41694 /* Check nonrestricted. */
41695 if (group
== disp_no_group
|| group
== disp_branch
)
41698 /* Get last dispatch window. */
41699 if (window_list_next
)
41700 window_list
= window_list_next
;
41702 if (window_list
->window_num
== 1)
41704 sum
= window_list
->prev
->window_size
+ window_list
->window_size
;
41707 || (min_insn_size (insn
) + sum
) >= 48)
41708 /* Window 1 is full. Go for next window. */
41712 num_restrict
= count_num_restricted (insn
, window_list
);
41714 if (num_restrict
> num_allowable_groups
[group
])
41717 /* See if it fits in the first window. */
41718 if (window_list
->window_num
== 0)
41720 /* The first widow should have only single and double path
41722 if (path
== path_double
41723 && (window_list
->num_uops
+ 2) > MAX_INSN
)
41725 else if (path
!= path_single
)
41731 /* Add an instruction INSN with NUM_UOPS micro-operations to the
41732 dispatch window WINDOW_LIST. */
41735 add_insn_window (rtx insn
, dispatch_windows
*window_list
, int num_uops
)
41737 int byte_len
= min_insn_size (insn
);
41738 int num_insn
= window_list
->num_insn
;
41740 sched_insn_info
*window
= window_list
->window
;
41741 enum dispatch_group group
= get_insn_group (insn
);
41742 enum insn_path path
= get_insn_path (insn
);
41743 int num_imm_operand
;
41744 int num_imm32_operand
;
41745 int num_imm64_operand
;
41747 if (!window_list
->violation
&& group
!= disp_cmp
41748 && !fits_dispatch_window (insn
))
41749 window_list
->violation
= true;
41751 imm_size
= get_num_immediates (insn
, &num_imm_operand
, &num_imm32_operand
,
41752 &num_imm64_operand
);
41754 /* Initialize window with new instruction. */
41755 window
[num_insn
].insn
= insn
;
41756 window
[num_insn
].byte_len
= byte_len
;
41757 window
[num_insn
].group
= group
;
41758 window
[num_insn
].path
= path
;
41759 window
[num_insn
].imm_bytes
= imm_size
;
41761 window_list
->window_size
+= byte_len
;
41762 window_list
->num_insn
= num_insn
+ 1;
41763 window_list
->num_uops
= window_list
->num_uops
+ num_uops
;
41764 window_list
->imm_size
+= imm_size
;
41765 window_list
->num_imm
+= num_imm_operand
;
41766 window_list
->num_imm_32
+= num_imm32_operand
;
41767 window_list
->num_imm_64
+= num_imm64_operand
;
41769 if (group
== disp_store
)
41770 window_list
->num_stores
+= 1;
41771 else if (group
== disp_load
41772 || group
== disp_prefetch
)
41773 window_list
->num_loads
+= 1;
41774 else if (group
== disp_load_store
)
41776 window_list
->num_stores
+= 1;
41777 window_list
->num_loads
+= 1;
41781 /* Adds a scheduled instruction, INSN, to the current dispatch window.
41782 If the total bytes of instructions or the number of instructions in
41783 the window exceed allowable, it allocates a new window. */
41786 add_to_dispatch_window (rtx insn
)
41789 dispatch_windows
*window_list
;
41790 dispatch_windows
*next_list
;
41791 dispatch_windows
*window0_list
;
41792 enum insn_path path
;
41793 enum dispatch_group insn_group
;
41801 if (INSN_CODE (insn
) < 0)
41804 byte_len
= min_insn_size (insn
);
41805 window_list
= dispatch_window_list
;
41806 next_list
= window_list
->next
;
41807 path
= get_insn_path (insn
);
41808 insn_group
= get_insn_group (insn
);
41810 /* Get the last dispatch window. */
41812 window_list
= dispatch_window_list
->next
;
41814 if (path
== path_single
)
41816 else if (path
== path_double
)
41819 insn_num_uops
= (int) path
;
41821 /* If current window is full, get a new window.
41822 Window number zero is full, if MAX_INSN uops are scheduled in it.
41823 Window number one is full, if window zero's bytes plus window
41824 one's bytes is 32, or if the bytes of the new instruction added
41825 to the total makes it greater than 48, or it has already MAX_INSN
41826 instructions in it. */
41827 num_insn
= window_list
->num_insn
;
41828 num_uops
= window_list
->num_uops
;
41829 window_num
= window_list
->window_num
;
41830 insn_fits
= fits_dispatch_window (insn
);
41832 if (num_insn
>= MAX_INSN
41833 || num_uops
+ insn_num_uops
> MAX_INSN
41836 window_num
= ~window_num
& 1;
41837 window_list
= allocate_next_window (window_num
);
41840 if (window_num
== 0)
41842 add_insn_window (insn
, window_list
, insn_num_uops
);
41843 if (window_list
->num_insn
>= MAX_INSN
41844 && insn_group
== disp_branch
)
41846 process_end_window ();
41850 else if (window_num
== 1)
41852 window0_list
= window_list
->prev
;
41853 sum
= window0_list
->window_size
+ window_list
->window_size
;
41855 || (byte_len
+ sum
) >= 48)
41857 process_end_window ();
41858 window_list
= dispatch_window_list
;
41861 add_insn_window (insn
, window_list
, insn_num_uops
);
41864 gcc_unreachable ();
41866 if (is_end_basic_block (insn_group
))
41868 /* End of basic block is reached do end-basic-block process. */
41869 process_end_window ();
41874 /* Print the dispatch window, WINDOW_NUM, to FILE. */
41876 DEBUG_FUNCTION
static void
41877 debug_dispatch_window_file (FILE *file
, int window_num
)
41879 dispatch_windows
*list
;
41882 if (window_num
== 0)
41883 list
= dispatch_window_list
;
41885 list
= dispatch_window_list1
;
41887 fprintf (file
, "Window #%d:\n", list
->window_num
);
41888 fprintf (file
, " num_insn = %d, num_uops = %d, window_size = %d\n",
41889 list
->num_insn
, list
->num_uops
, list
->window_size
);
41890 fprintf (file
, " num_imm = %d, num_imm_32 = %d, num_imm_64 = %d, imm_size = %d\n",
41891 list
->num_imm
, list
->num_imm_32
, list
->num_imm_64
, list
->imm_size
);
41893 fprintf (file
, " num_loads = %d, num_stores = %d\n", list
->num_loads
,
41895 fprintf (file
, " insn info:\n");
41897 for (i
= 0; i
< MAX_INSN
; i
++)
41899 if (!list
->window
[i
].insn
)
41901 fprintf (file
, " group[%d] = %s, insn[%d] = %p, path[%d] = %d byte_len[%d] = %d, imm_bytes[%d] = %d\n",
41902 i
, group_name
[list
->window
[i
].group
],
41903 i
, (void *)list
->window
[i
].insn
,
41904 i
, list
->window
[i
].path
,
41905 i
, list
->window
[i
].byte_len
,
41906 i
, list
->window
[i
].imm_bytes
);
41910 /* Print to stdout a dispatch window. */
41912 DEBUG_FUNCTION
void
41913 debug_dispatch_window (int window_num
)
41915 debug_dispatch_window_file (stdout
, window_num
);
41918 /* Print INSN dispatch information to FILE. */
41920 DEBUG_FUNCTION
static void
41921 debug_insn_dispatch_info_file (FILE *file
, rtx insn
)
41924 enum insn_path path
;
41925 enum dispatch_group group
;
41927 int num_imm_operand
;
41928 int num_imm32_operand
;
41929 int num_imm64_operand
;
41931 if (INSN_CODE (insn
) < 0)
41934 byte_len
= min_insn_size (insn
);
41935 path
= get_insn_path (insn
);
41936 group
= get_insn_group (insn
);
41937 imm_size
= get_num_immediates (insn
, &num_imm_operand
, &num_imm32_operand
,
41938 &num_imm64_operand
);
41940 fprintf (file
, " insn info:\n");
41941 fprintf (file
, " group = %s, path = %d, byte_len = %d\n",
41942 group_name
[group
], path
, byte_len
);
41943 fprintf (file
, " num_imm = %d, num_imm_32 = %d, num_imm_64 = %d, imm_size = %d\n",
41944 num_imm_operand
, num_imm32_operand
, num_imm64_operand
, imm_size
);
41947 /* Print to STDERR the status of the ready list with respect to
41948 dispatch windows. */
41950 DEBUG_FUNCTION
void
41951 debug_ready_dispatch (void)
41954 int no_ready
= number_in_ready ();
41956 fprintf (stdout
, "Number of ready: %d\n", no_ready
);
41958 for (i
= 0; i
< no_ready
; i
++)
41959 debug_insn_dispatch_info_file (stdout
, get_ready_element (i
));
41962 /* This routine is the driver of the dispatch scheduler. */
41965 do_dispatch (rtx insn
, int mode
)
41967 if (mode
== DISPATCH_INIT
)
41968 init_dispatch_sched ();
41969 else if (mode
== ADD_TO_DISPATCH_WINDOW
)
41970 add_to_dispatch_window (insn
);
41973 /* Return TRUE if Dispatch Scheduling is supported. */
41976 has_dispatch (rtx insn
, int action
)
41978 if ((TARGET_BDVER1
|| TARGET_BDVER2
|| TARGET_BDVER3
)
41979 && flag_dispatch_scheduler
)
41985 case IS_DISPATCH_ON
:
41990 return is_cmp (insn
);
41992 case DISPATCH_VIOLATION
:
41993 return dispatch_violation ();
41995 case FITS_DISPATCH_WINDOW
:
41996 return fits_dispatch_window (insn
);
42002 /* Implementation of reassociation_width target hook used by
42003 reassoc phase to identify parallelism level in reassociated
42004 tree. Statements tree_code is passed in OPC. Arguments type
42007 Currently parallel reassociation is enabled for Atom
42008 processors only and we set reassociation width to be 2
42009 because Atom may issue up to 2 instructions per cycle.
42011 Return value should be fixed if parallel reassociation is
42012 enabled for other processors. */
42015 ix86_reassociation_width (unsigned int opc ATTRIBUTE_UNUSED
,
42016 enum machine_mode mode
)
42020 if (INTEGRAL_MODE_P (mode
) && TARGET_REASSOC_INT_TO_PARALLEL
)
42022 else if (FLOAT_MODE_P (mode
) && TARGET_REASSOC_FP_TO_PARALLEL
)
42028 /* ??? No autovectorization into MMX or 3DNOW until we can reliably
42029 place emms and femms instructions. */
42031 static enum machine_mode
42032 ix86_preferred_simd_mode (enum machine_mode mode
)
42040 return (TARGET_AVX
&& !TARGET_PREFER_AVX128
) ? V32QImode
: V16QImode
;
42042 return (TARGET_AVX
&& !TARGET_PREFER_AVX128
) ? V16HImode
: V8HImode
;
42044 return (TARGET_AVX
&& !TARGET_PREFER_AVX128
) ? V8SImode
: V4SImode
;
42046 return (TARGET_AVX
&& !TARGET_PREFER_AVX128
) ? V4DImode
: V2DImode
;
42049 if (TARGET_AVX
&& !TARGET_PREFER_AVX128
)
42055 if (!TARGET_VECTORIZE_DOUBLE
)
42057 else if (TARGET_AVX
&& !TARGET_PREFER_AVX128
)
42059 else if (TARGET_SSE2
)
42068 /* If AVX is enabled then try vectorizing with both 256bit and 128bit
42071 static unsigned int
42072 ix86_autovectorize_vector_sizes (void)
42074 return (TARGET_AVX
&& !TARGET_PREFER_AVX128
) ? 32 | 16 : 0;
42079 /* Return class of registers which could be used for pseudo of MODE
42080 and of class RCLASS for spilling instead of memory. Return NO_REGS
42081 if it is not possible or non-profitable. */
42083 ix86_spill_class (reg_class_t rclass
, enum machine_mode mode
)
42085 if (TARGET_SSE
&& TARGET_GENERAL_REGS_SSE_SPILL
&& ! TARGET_MMX
42086 && (mode
== SImode
|| (TARGET_64BIT
&& mode
== DImode
))
42087 && INTEGER_CLASS_P (rclass
))
42092 /* Implement targetm.vectorize.init_cost. */
42095 ix86_init_cost (struct loop
*loop_info ATTRIBUTE_UNUSED
)
42097 unsigned *cost
= XNEWVEC (unsigned, 3);
42098 cost
[vect_prologue
] = cost
[vect_body
] = cost
[vect_epilogue
] = 0;
42102 /* Implement targetm.vectorize.add_stmt_cost. */
42105 ix86_add_stmt_cost (void *data
, int count
, enum vect_cost_for_stmt kind
,
42106 struct _stmt_vec_info
*stmt_info
, int misalign
,
42107 enum vect_cost_model_location where
)
42109 unsigned *cost
= (unsigned *) data
;
42110 unsigned retval
= 0;
42112 if (flag_vect_cost_model
)
42114 tree vectype
= stmt_info
? stmt_vectype (stmt_info
) : NULL_TREE
;
42115 int stmt_cost
= ix86_builtin_vectorization_cost (kind
, vectype
, misalign
);
42117 /* Statements in an inner loop relative to the loop being
42118 vectorized are weighted more heavily. The value here is
42119 arbitrary and could potentially be improved with analysis. */
42120 if (where
== vect_body
&& stmt_info
&& stmt_in_inner_loop_p (stmt_info
))
42121 count
*= 50; /* FIXME. */
42123 retval
= (unsigned) (count
* stmt_cost
);
42124 cost
[where
] += retval
;
42130 /* Implement targetm.vectorize.finish_cost. */
42133 ix86_finish_cost (void *data
, unsigned *prologue_cost
,
42134 unsigned *body_cost
, unsigned *epilogue_cost
)
42136 unsigned *cost
= (unsigned *) data
;
42137 *prologue_cost
= cost
[vect_prologue
];
42138 *body_cost
= cost
[vect_body
];
42139 *epilogue_cost
= cost
[vect_epilogue
];
42142 /* Implement targetm.vectorize.destroy_cost_data. */
42145 ix86_destroy_cost_data (void *data
)
42150 /* Validate target specific memory model bits in VAL. */
42152 static unsigned HOST_WIDE_INT
42153 ix86_memmodel_check (unsigned HOST_WIDE_INT val
)
42155 unsigned HOST_WIDE_INT model
= val
& MEMMODEL_MASK
;
42158 if (val
& ~(unsigned HOST_WIDE_INT
)(IX86_HLE_ACQUIRE
|IX86_HLE_RELEASE
42160 || ((val
& IX86_HLE_ACQUIRE
) && (val
& IX86_HLE_RELEASE
)))
42162 warning (OPT_Winvalid_memory_model
,
42163 "Unknown architecture specific memory model");
42164 return MEMMODEL_SEQ_CST
;
42166 strong
= (model
== MEMMODEL_ACQ_REL
|| model
== MEMMODEL_SEQ_CST
);
42167 if (val
& IX86_HLE_ACQUIRE
&& !(model
== MEMMODEL_ACQUIRE
|| strong
))
42169 warning (OPT_Winvalid_memory_model
,
42170 "HLE_ACQUIRE not used with ACQUIRE or stronger memory model");
42171 return MEMMODEL_SEQ_CST
| IX86_HLE_ACQUIRE
;
42173 if (val
& IX86_HLE_RELEASE
&& !(model
== MEMMODEL_RELEASE
|| strong
))
42175 warning (OPT_Winvalid_memory_model
,
42176 "HLE_RELEASE not used with RELEASE or stronger memory model");
42177 return MEMMODEL_SEQ_CST
| IX86_HLE_RELEASE
;
42182 /* Initialize the GCC target structure. */
42183 #undef TARGET_RETURN_IN_MEMORY
42184 #define TARGET_RETURN_IN_MEMORY ix86_return_in_memory
42186 #undef TARGET_LEGITIMIZE_ADDRESS
42187 #define TARGET_LEGITIMIZE_ADDRESS ix86_legitimize_address
42189 #undef TARGET_ATTRIBUTE_TABLE
42190 #define TARGET_ATTRIBUTE_TABLE ix86_attribute_table
42191 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
42192 # undef TARGET_MERGE_DECL_ATTRIBUTES
42193 # define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
42196 #undef TARGET_COMP_TYPE_ATTRIBUTES
42197 #define TARGET_COMP_TYPE_ATTRIBUTES ix86_comp_type_attributes
42199 #undef TARGET_INIT_BUILTINS
42200 #define TARGET_INIT_BUILTINS ix86_init_builtins
42201 #undef TARGET_BUILTIN_DECL
42202 #define TARGET_BUILTIN_DECL ix86_builtin_decl
42203 #undef TARGET_EXPAND_BUILTIN
42204 #define TARGET_EXPAND_BUILTIN ix86_expand_builtin
42206 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
42207 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
42208 ix86_builtin_vectorized_function
42210 #undef TARGET_VECTORIZE_BUILTIN_TM_LOAD
42211 #define TARGET_VECTORIZE_BUILTIN_TM_LOAD ix86_builtin_tm_load
42213 #undef TARGET_VECTORIZE_BUILTIN_TM_STORE
42214 #define TARGET_VECTORIZE_BUILTIN_TM_STORE ix86_builtin_tm_store
42216 #undef TARGET_VECTORIZE_BUILTIN_GATHER
42217 #define TARGET_VECTORIZE_BUILTIN_GATHER ix86_vectorize_builtin_gather
42219 #undef TARGET_BUILTIN_RECIPROCAL
42220 #define TARGET_BUILTIN_RECIPROCAL ix86_builtin_reciprocal
42222 #undef TARGET_ASM_FUNCTION_EPILOGUE
42223 #define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
42225 #undef TARGET_ENCODE_SECTION_INFO
42226 #ifndef SUBTARGET_ENCODE_SECTION_INFO
42227 #define TARGET_ENCODE_SECTION_INFO ix86_encode_section_info
42229 #define TARGET_ENCODE_SECTION_INFO SUBTARGET_ENCODE_SECTION_INFO
42232 #undef TARGET_ASM_OPEN_PAREN
42233 #define TARGET_ASM_OPEN_PAREN ""
42234 #undef TARGET_ASM_CLOSE_PAREN
42235 #define TARGET_ASM_CLOSE_PAREN ""
42237 #undef TARGET_ASM_BYTE_OP
42238 #define TARGET_ASM_BYTE_OP ASM_BYTE
42240 #undef TARGET_ASM_ALIGNED_HI_OP
42241 #define TARGET_ASM_ALIGNED_HI_OP ASM_SHORT
42242 #undef TARGET_ASM_ALIGNED_SI_OP
42243 #define TARGET_ASM_ALIGNED_SI_OP ASM_LONG
42245 #undef TARGET_ASM_ALIGNED_DI_OP
42246 #define TARGET_ASM_ALIGNED_DI_OP ASM_QUAD
42249 #undef TARGET_PROFILE_BEFORE_PROLOGUE
42250 #define TARGET_PROFILE_BEFORE_PROLOGUE ix86_profile_before_prologue
42252 #undef TARGET_MANGLE_DECL_ASSEMBLER_NAME
42253 #define TARGET_MANGLE_DECL_ASSEMBLER_NAME ix86_mangle_decl_assembler_name
42255 #undef TARGET_ASM_UNALIGNED_HI_OP
42256 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
42257 #undef TARGET_ASM_UNALIGNED_SI_OP
42258 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
42259 #undef TARGET_ASM_UNALIGNED_DI_OP
42260 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
42262 #undef TARGET_PRINT_OPERAND
42263 #define TARGET_PRINT_OPERAND ix86_print_operand
42264 #undef TARGET_PRINT_OPERAND_ADDRESS
42265 #define TARGET_PRINT_OPERAND_ADDRESS ix86_print_operand_address
42266 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
42267 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P ix86_print_operand_punct_valid_p
42268 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
42269 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA i386_asm_output_addr_const_extra
42271 #undef TARGET_SCHED_INIT_GLOBAL
42272 #define TARGET_SCHED_INIT_GLOBAL ix86_sched_init_global
42273 #undef TARGET_SCHED_ADJUST_COST
42274 #define TARGET_SCHED_ADJUST_COST ix86_adjust_cost
42275 #undef TARGET_SCHED_ISSUE_RATE
42276 #define TARGET_SCHED_ISSUE_RATE ix86_issue_rate
42277 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
42278 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
42279 ia32_multipass_dfa_lookahead
42281 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
42282 #define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall
42284 #undef TARGET_MEMMODEL_CHECK
42285 #define TARGET_MEMMODEL_CHECK ix86_memmodel_check
42288 #undef TARGET_HAVE_TLS
42289 #define TARGET_HAVE_TLS true
42291 #undef TARGET_CANNOT_FORCE_CONST_MEM
42292 #define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem
42293 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
42294 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
42296 #undef TARGET_DELEGITIMIZE_ADDRESS
42297 #define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address
42299 #undef TARGET_MS_BITFIELD_LAYOUT_P
42300 #define TARGET_MS_BITFIELD_LAYOUT_P ix86_ms_bitfield_layout_p
42303 #undef TARGET_BINDS_LOCAL_P
42304 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
42306 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
42307 #undef TARGET_BINDS_LOCAL_P
42308 #define TARGET_BINDS_LOCAL_P i386_pe_binds_local_p
42311 #undef TARGET_ASM_OUTPUT_MI_THUNK
42312 #define TARGET_ASM_OUTPUT_MI_THUNK x86_output_mi_thunk
42313 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
42314 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK x86_can_output_mi_thunk
42316 #undef TARGET_ASM_FILE_START
42317 #define TARGET_ASM_FILE_START x86_file_start
42319 #undef TARGET_OPTION_OVERRIDE
42320 #define TARGET_OPTION_OVERRIDE ix86_option_override
42322 #undef TARGET_REGISTER_MOVE_COST
42323 #define TARGET_REGISTER_MOVE_COST ix86_register_move_cost
42324 #undef TARGET_MEMORY_MOVE_COST
42325 #define TARGET_MEMORY_MOVE_COST ix86_memory_move_cost
42326 #undef TARGET_RTX_COSTS
42327 #define TARGET_RTX_COSTS ix86_rtx_costs
42328 #undef TARGET_ADDRESS_COST
42329 #define TARGET_ADDRESS_COST ix86_address_cost
42331 #undef TARGET_FIXED_CONDITION_CODE_REGS
42332 #define TARGET_FIXED_CONDITION_CODE_REGS ix86_fixed_condition_code_regs
42333 #undef TARGET_CC_MODES_COMPATIBLE
42334 #define TARGET_CC_MODES_COMPATIBLE ix86_cc_modes_compatible
42336 #undef TARGET_MACHINE_DEPENDENT_REORG
42337 #define TARGET_MACHINE_DEPENDENT_REORG ix86_reorg
42339 #undef TARGET_BUILTIN_SETJMP_FRAME_VALUE
42340 #define TARGET_BUILTIN_SETJMP_FRAME_VALUE ix86_builtin_setjmp_frame_value
42342 #undef TARGET_BUILD_BUILTIN_VA_LIST
42343 #define TARGET_BUILD_BUILTIN_VA_LIST ix86_build_builtin_va_list
42345 #undef TARGET_FOLD_BUILTIN
42346 #define TARGET_FOLD_BUILTIN ix86_fold_builtin
42348 #undef TARGET_COMPARE_VERSION_PRIORITY
42349 #define TARGET_COMPARE_VERSION_PRIORITY ix86_compare_version_priority
42351 #undef TARGET_GENERATE_VERSION_DISPATCHER_BODY
42352 #define TARGET_GENERATE_VERSION_DISPATCHER_BODY \
42353 ix86_generate_version_dispatcher_body
42355 #undef TARGET_GET_FUNCTION_VERSIONS_DISPATCHER
42356 #define TARGET_GET_FUNCTION_VERSIONS_DISPATCHER \
42357 ix86_get_function_versions_dispatcher
42359 #undef TARGET_ENUM_VA_LIST_P
42360 #define TARGET_ENUM_VA_LIST_P ix86_enum_va_list
42362 #undef TARGET_FN_ABI_VA_LIST
42363 #define TARGET_FN_ABI_VA_LIST ix86_fn_abi_va_list
42365 #undef TARGET_CANONICAL_VA_LIST_TYPE
42366 #define TARGET_CANONICAL_VA_LIST_TYPE ix86_canonical_va_list_type
42368 #undef TARGET_EXPAND_BUILTIN_VA_START
42369 #define TARGET_EXPAND_BUILTIN_VA_START ix86_va_start
42371 #undef TARGET_MD_ASM_CLOBBERS
42372 #define TARGET_MD_ASM_CLOBBERS ix86_md_asm_clobbers
42374 #undef TARGET_PROMOTE_PROTOTYPES
42375 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
42376 #undef TARGET_STRUCT_VALUE_RTX
42377 #define TARGET_STRUCT_VALUE_RTX ix86_struct_value_rtx
42378 #undef TARGET_SETUP_INCOMING_VARARGS
42379 #define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs
42380 #undef TARGET_MUST_PASS_IN_STACK
42381 #define TARGET_MUST_PASS_IN_STACK ix86_must_pass_in_stack
42382 #undef TARGET_FUNCTION_ARG_ADVANCE
42383 #define TARGET_FUNCTION_ARG_ADVANCE ix86_function_arg_advance
42384 #undef TARGET_FUNCTION_ARG
42385 #define TARGET_FUNCTION_ARG ix86_function_arg
42386 #undef TARGET_FUNCTION_ARG_BOUNDARY
42387 #define TARGET_FUNCTION_ARG_BOUNDARY ix86_function_arg_boundary
42388 #undef TARGET_PASS_BY_REFERENCE
42389 #define TARGET_PASS_BY_REFERENCE ix86_pass_by_reference
42390 #undef TARGET_INTERNAL_ARG_POINTER
42391 #define TARGET_INTERNAL_ARG_POINTER ix86_internal_arg_pointer
42392 #undef TARGET_UPDATE_STACK_BOUNDARY
42393 #define TARGET_UPDATE_STACK_BOUNDARY ix86_update_stack_boundary
42394 #undef TARGET_GET_DRAP_RTX
42395 #define TARGET_GET_DRAP_RTX ix86_get_drap_rtx
42396 #undef TARGET_STRICT_ARGUMENT_NAMING
42397 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
42398 #undef TARGET_STATIC_CHAIN
42399 #define TARGET_STATIC_CHAIN ix86_static_chain
42400 #undef TARGET_TRAMPOLINE_INIT
42401 #define TARGET_TRAMPOLINE_INIT ix86_trampoline_init
42402 #undef TARGET_RETURN_POPS_ARGS
42403 #define TARGET_RETURN_POPS_ARGS ix86_return_pops_args
42405 #undef TARGET_LEGITIMATE_COMBINED_INSN
42406 #define TARGET_LEGITIMATE_COMBINED_INSN ix86_legitimate_combined_insn
42408 #undef TARGET_ASAN_SHADOW_OFFSET
42409 #define TARGET_ASAN_SHADOW_OFFSET ix86_asan_shadow_offset
42411 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
42412 #define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
42414 #undef TARGET_SCALAR_MODE_SUPPORTED_P
42415 #define TARGET_SCALAR_MODE_SUPPORTED_P ix86_scalar_mode_supported_p
42417 #undef TARGET_VECTOR_MODE_SUPPORTED_P
42418 #define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
42420 #undef TARGET_C_MODE_FOR_SUFFIX
42421 #define TARGET_C_MODE_FOR_SUFFIX ix86_c_mode_for_suffix
42424 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
42425 #define TARGET_ASM_OUTPUT_DWARF_DTPREL i386_output_dwarf_dtprel
42428 #ifdef SUBTARGET_INSERT_ATTRIBUTES
42429 #undef TARGET_INSERT_ATTRIBUTES
42430 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
42433 #undef TARGET_MANGLE_TYPE
42434 #define TARGET_MANGLE_TYPE ix86_mangle_type
42437 #undef TARGET_STACK_PROTECT_FAIL
42438 #define TARGET_STACK_PROTECT_FAIL ix86_stack_protect_fail
42441 #undef TARGET_FUNCTION_VALUE
42442 #define TARGET_FUNCTION_VALUE ix86_function_value
42444 #undef TARGET_FUNCTION_VALUE_REGNO_P
42445 #define TARGET_FUNCTION_VALUE_REGNO_P ix86_function_value_regno_p
42447 #undef TARGET_PROMOTE_FUNCTION_MODE
42448 #define TARGET_PROMOTE_FUNCTION_MODE ix86_promote_function_mode
42450 #undef TARGET_MEMBER_TYPE_FORCES_BLK
42451 #define TARGET_MEMBER_TYPE_FORCES_BLK ix86_member_type_forces_blk
42453 #undef TARGET_INSTANTIATE_DECLS
42454 #define TARGET_INSTANTIATE_DECLS ix86_instantiate_decls
42456 #undef TARGET_SECONDARY_RELOAD
42457 #define TARGET_SECONDARY_RELOAD ix86_secondary_reload
42459 #undef TARGET_CLASS_MAX_NREGS
42460 #define TARGET_CLASS_MAX_NREGS ix86_class_max_nregs
42462 #undef TARGET_PREFERRED_RELOAD_CLASS
42463 #define TARGET_PREFERRED_RELOAD_CLASS ix86_preferred_reload_class
42464 #undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
42465 #define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS ix86_preferred_output_reload_class
42466 #undef TARGET_CLASS_LIKELY_SPILLED_P
42467 #define TARGET_CLASS_LIKELY_SPILLED_P ix86_class_likely_spilled_p
42469 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
42470 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
42471 ix86_builtin_vectorization_cost
42472 #undef TARGET_VECTORIZE_VEC_PERM_CONST_OK
42473 #define TARGET_VECTORIZE_VEC_PERM_CONST_OK \
42474 ix86_vectorize_vec_perm_const_ok
42475 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
42476 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
42477 ix86_preferred_simd_mode
42478 #undef TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES
42479 #define TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES \
42480 ix86_autovectorize_vector_sizes
42481 #undef TARGET_VECTORIZE_INIT_COST
42482 #define TARGET_VECTORIZE_INIT_COST ix86_init_cost
42483 #undef TARGET_VECTORIZE_ADD_STMT_COST
42484 #define TARGET_VECTORIZE_ADD_STMT_COST ix86_add_stmt_cost
42485 #undef TARGET_VECTORIZE_FINISH_COST
42486 #define TARGET_VECTORIZE_FINISH_COST ix86_finish_cost
42487 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
42488 #define TARGET_VECTORIZE_DESTROY_COST_DATA ix86_destroy_cost_data
42490 #undef TARGET_SET_CURRENT_FUNCTION
42491 #define TARGET_SET_CURRENT_FUNCTION ix86_set_current_function
42493 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
42494 #define TARGET_OPTION_VALID_ATTRIBUTE_P ix86_valid_target_attribute_p
42496 #undef TARGET_OPTION_SAVE
42497 #define TARGET_OPTION_SAVE ix86_function_specific_save
42499 #undef TARGET_OPTION_RESTORE
42500 #define TARGET_OPTION_RESTORE ix86_function_specific_restore
42502 #undef TARGET_OPTION_PRINT
42503 #define TARGET_OPTION_PRINT ix86_function_specific_print
42505 #undef TARGET_OPTION_FUNCTION_VERSIONS
42506 #define TARGET_OPTION_FUNCTION_VERSIONS ix86_function_versions
42508 #undef TARGET_CAN_INLINE_P
42509 #define TARGET_CAN_INLINE_P ix86_can_inline_p
42511 #undef TARGET_EXPAND_TO_RTL_HOOK
42512 #define TARGET_EXPAND_TO_RTL_HOOK ix86_maybe_switch_abi
42514 #undef TARGET_LEGITIMATE_ADDRESS_P
42515 #define TARGET_LEGITIMATE_ADDRESS_P ix86_legitimate_address_p
42517 #undef TARGET_LRA_P
42518 #define TARGET_LRA_P hook_bool_void_true
42520 #undef TARGET_REGISTER_PRIORITY
42521 #define TARGET_REGISTER_PRIORITY ix86_register_priority
42523 #undef TARGET_LEGITIMATE_CONSTANT_P
42524 #define TARGET_LEGITIMATE_CONSTANT_P ix86_legitimate_constant_p
42526 #undef TARGET_FRAME_POINTER_REQUIRED
42527 #define TARGET_FRAME_POINTER_REQUIRED ix86_frame_pointer_required
42529 #undef TARGET_CAN_ELIMINATE
42530 #define TARGET_CAN_ELIMINATE ix86_can_eliminate
42532 #undef TARGET_EXTRA_LIVE_ON_ENTRY
42533 #define TARGET_EXTRA_LIVE_ON_ENTRY ix86_live_on_entry
42535 #undef TARGET_ASM_CODE_END
42536 #define TARGET_ASM_CODE_END ix86_code_end
42538 #undef TARGET_CONDITIONAL_REGISTER_USAGE
42539 #define TARGET_CONDITIONAL_REGISTER_USAGE ix86_conditional_register_usage
42542 #undef TARGET_INIT_LIBFUNCS
42543 #define TARGET_INIT_LIBFUNCS darwin_rename_builtins
42546 #undef TARGET_SPILL_CLASS
42547 #define TARGET_SPILL_CLASS ix86_spill_class
42549 struct gcc_target targetm
= TARGET_INITIALIZER
;
42551 #include "gt-i386.h"