1 /* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
3 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
30 #include "hard-reg-set.h"
31 #include "insn-config.h"
32 #include "conditions.h"
34 #include "insn-codes.h"
35 #include "insn-attr.h"
42 #include "diagnostic-core.h"
44 #include "basic-block.h"
47 #include "target-def.h"
48 #include "common/common-target.h"
49 #include "langhooks.h"
55 #include "tm-constrs.h"
59 #include "sched-int.h"
63 #include "diagnostic.h"
65 #include "tree-pass.h"
66 #include "tree-flow.h"
68 static rtx
legitimize_dllimport_symbol (rtx
, bool);
70 #ifndef CHECK_STACK_LIMIT
71 #define CHECK_STACK_LIMIT (-1)
74 /* Return index of given mode in mult and division cost tables. */
75 #define MODE_INDEX(mode) \
76 ((mode) == QImode ? 0 \
77 : (mode) == HImode ? 1 \
78 : (mode) == SImode ? 2 \
79 : (mode) == DImode ? 3 \
82 /* Processor costs (relative to an add) */
83 /* We assume COSTS_N_INSNS is defined as (N)*4 and an addition is 2 bytes. */
84 #define COSTS_N_BYTES(N) ((N) * 2)
86 #define DUMMY_STRINGOP_ALGS {libcall, {{-1, libcall, false}}}
89 struct processor_costs ix86_size_cost
= {/* costs for tuning for size */
90 COSTS_N_BYTES (2), /* cost of an add instruction */
91 COSTS_N_BYTES (3), /* cost of a lea instruction */
92 COSTS_N_BYTES (2), /* variable shift costs */
93 COSTS_N_BYTES (3), /* constant shift costs */
94 {COSTS_N_BYTES (3), /* cost of starting multiply for QI */
95 COSTS_N_BYTES (3), /* HI */
96 COSTS_N_BYTES (3), /* SI */
97 COSTS_N_BYTES (3), /* DI */
98 COSTS_N_BYTES (5)}, /* other */
99 0, /* cost of multiply per each bit set */
100 {COSTS_N_BYTES (3), /* cost of a divide/mod for QI */
101 COSTS_N_BYTES (3), /* HI */
102 COSTS_N_BYTES (3), /* SI */
103 COSTS_N_BYTES (3), /* DI */
104 COSTS_N_BYTES (5)}, /* other */
105 COSTS_N_BYTES (3), /* cost of movsx */
106 COSTS_N_BYTES (3), /* cost of movzx */
107 0, /* "large" insn */
109 2, /* cost for loading QImode using movzbl */
110 {2, 2, 2}, /* cost of loading integer registers
111 in QImode, HImode and SImode.
112 Relative to reg-reg move (2). */
113 {2, 2, 2}, /* cost of storing integer registers */
114 2, /* cost of reg,reg fld/fst */
115 {2, 2, 2}, /* cost of loading fp registers
116 in SFmode, DFmode and XFmode */
117 {2, 2, 2}, /* cost of storing fp registers
118 in SFmode, DFmode and XFmode */
119 3, /* cost of moving MMX register */
120 {3, 3}, /* cost of loading MMX registers
121 in SImode and DImode */
122 {3, 3}, /* cost of storing MMX registers
123 in SImode and DImode */
124 3, /* cost of moving SSE register */
125 {3, 3, 3}, /* cost of loading SSE registers
126 in SImode, DImode and TImode */
127 {3, 3, 3}, /* cost of storing SSE registers
128 in SImode, DImode and TImode */
129 3, /* MMX or SSE register to integer */
130 0, /* size of l1 cache */
131 0, /* size of l2 cache */
132 0, /* size of prefetch block */
133 0, /* number of parallel prefetches */
135 COSTS_N_BYTES (2), /* cost of FADD and FSUB insns. */
136 COSTS_N_BYTES (2), /* cost of FMUL instruction. */
137 COSTS_N_BYTES (2), /* cost of FDIV instruction. */
138 COSTS_N_BYTES (2), /* cost of FABS instruction. */
139 COSTS_N_BYTES (2), /* cost of FCHS instruction. */
140 COSTS_N_BYTES (2), /* cost of FSQRT instruction. */
141 {{rep_prefix_1_byte
, {{-1, rep_prefix_1_byte
, false}}},
142 {rep_prefix_1_byte
, {{-1, rep_prefix_1_byte
, false}}}},
143 {{rep_prefix_1_byte
, {{-1, rep_prefix_1_byte
, false}}},
144 {rep_prefix_1_byte
, {{-1, rep_prefix_1_byte
, false}}}},
145 1, /* scalar_stmt_cost. */
146 1, /* scalar load_cost. */
147 1, /* scalar_store_cost. */
148 1, /* vec_stmt_cost. */
149 1, /* vec_to_scalar_cost. */
150 1, /* scalar_to_vec_cost. */
151 1, /* vec_align_load_cost. */
152 1, /* vec_unalign_load_cost. */
153 1, /* vec_store_cost. */
154 1, /* cond_taken_branch_cost. */
155 1, /* cond_not_taken_branch_cost. */
158 /* Processor costs (relative to an add) */
160 struct processor_costs i386_cost
= { /* 386 specific costs */
161 COSTS_N_INSNS (1), /* cost of an add instruction */
162 COSTS_N_INSNS (1), /* cost of a lea instruction */
163 COSTS_N_INSNS (3), /* variable shift costs */
164 COSTS_N_INSNS (2), /* constant shift costs */
165 {COSTS_N_INSNS (6), /* cost of starting multiply for QI */
166 COSTS_N_INSNS (6), /* HI */
167 COSTS_N_INSNS (6), /* SI */
168 COSTS_N_INSNS (6), /* DI */
169 COSTS_N_INSNS (6)}, /* other */
170 COSTS_N_INSNS (1), /* cost of multiply per each bit set */
171 {COSTS_N_INSNS (23), /* cost of a divide/mod for QI */
172 COSTS_N_INSNS (23), /* HI */
173 COSTS_N_INSNS (23), /* SI */
174 COSTS_N_INSNS (23), /* DI */
175 COSTS_N_INSNS (23)}, /* other */
176 COSTS_N_INSNS (3), /* cost of movsx */
177 COSTS_N_INSNS (2), /* cost of movzx */
178 15, /* "large" insn */
180 4, /* cost for loading QImode using movzbl */
181 {2, 4, 2}, /* cost of loading integer registers
182 in QImode, HImode and SImode.
183 Relative to reg-reg move (2). */
184 {2, 4, 2}, /* cost of storing integer registers */
185 2, /* cost of reg,reg fld/fst */
186 {8, 8, 8}, /* cost of loading fp registers
187 in SFmode, DFmode and XFmode */
188 {8, 8, 8}, /* cost of storing fp registers
189 in SFmode, DFmode and XFmode */
190 2, /* cost of moving MMX register */
191 {4, 8}, /* cost of loading MMX registers
192 in SImode and DImode */
193 {4, 8}, /* cost of storing MMX registers
194 in SImode and DImode */
195 2, /* cost of moving SSE register */
196 {4, 8, 16}, /* cost of loading SSE registers
197 in SImode, DImode and TImode */
198 {4, 8, 16}, /* cost of storing SSE registers
199 in SImode, DImode and TImode */
200 3, /* MMX or SSE register to integer */
201 0, /* size of l1 cache */
202 0, /* size of l2 cache */
203 0, /* size of prefetch block */
204 0, /* number of parallel prefetches */
206 COSTS_N_INSNS (23), /* cost of FADD and FSUB insns. */
207 COSTS_N_INSNS (27), /* cost of FMUL instruction. */
208 COSTS_N_INSNS (88), /* cost of FDIV instruction. */
209 COSTS_N_INSNS (22), /* cost of FABS instruction. */
210 COSTS_N_INSNS (24), /* cost of FCHS instruction. */
211 COSTS_N_INSNS (122), /* cost of FSQRT instruction. */
212 {{rep_prefix_1_byte
, {{-1, rep_prefix_1_byte
, false}}},
213 DUMMY_STRINGOP_ALGS
},
214 {{rep_prefix_1_byte
, {{-1, rep_prefix_1_byte
, false}}},
215 DUMMY_STRINGOP_ALGS
},
216 1, /* scalar_stmt_cost. */
217 1, /* scalar load_cost. */
218 1, /* scalar_store_cost. */
219 1, /* vec_stmt_cost. */
220 1, /* vec_to_scalar_cost. */
221 1, /* scalar_to_vec_cost. */
222 1, /* vec_align_load_cost. */
223 2, /* vec_unalign_load_cost. */
224 1, /* vec_store_cost. */
225 3, /* cond_taken_branch_cost. */
226 1, /* cond_not_taken_branch_cost. */
230 struct processor_costs i486_cost
= { /* 486 specific costs */
231 COSTS_N_INSNS (1), /* cost of an add instruction */
232 COSTS_N_INSNS (1), /* cost of a lea instruction */
233 COSTS_N_INSNS (3), /* variable shift costs */
234 COSTS_N_INSNS (2), /* constant shift costs */
235 {COSTS_N_INSNS (12), /* cost of starting multiply for QI */
236 COSTS_N_INSNS (12), /* HI */
237 COSTS_N_INSNS (12), /* SI */
238 COSTS_N_INSNS (12), /* DI */
239 COSTS_N_INSNS (12)}, /* other */
240 1, /* cost of multiply per each bit set */
241 {COSTS_N_INSNS (40), /* cost of a divide/mod for QI */
242 COSTS_N_INSNS (40), /* HI */
243 COSTS_N_INSNS (40), /* SI */
244 COSTS_N_INSNS (40), /* DI */
245 COSTS_N_INSNS (40)}, /* other */
246 COSTS_N_INSNS (3), /* cost of movsx */
247 COSTS_N_INSNS (2), /* cost of movzx */
248 15, /* "large" insn */
250 4, /* cost for loading QImode using movzbl */
251 {2, 4, 2}, /* cost of loading integer registers
252 in QImode, HImode and SImode.
253 Relative to reg-reg move (2). */
254 {2, 4, 2}, /* cost of storing integer registers */
255 2, /* cost of reg,reg fld/fst */
256 {8, 8, 8}, /* cost of loading fp registers
257 in SFmode, DFmode and XFmode */
258 {8, 8, 8}, /* cost of storing fp registers
259 in SFmode, DFmode and XFmode */
260 2, /* cost of moving MMX register */
261 {4, 8}, /* cost of loading MMX registers
262 in SImode and DImode */
263 {4, 8}, /* cost of storing MMX registers
264 in SImode and DImode */
265 2, /* cost of moving SSE register */
266 {4, 8, 16}, /* cost of loading SSE registers
267 in SImode, DImode and TImode */
268 {4, 8, 16}, /* cost of storing SSE registers
269 in SImode, DImode and TImode */
270 3, /* MMX or SSE register to integer */
271 4, /* size of l1 cache. 486 has 8kB cache
272 shared for code and data, so 4kB is
273 not really precise. */
274 4, /* size of l2 cache */
275 0, /* size of prefetch block */
276 0, /* number of parallel prefetches */
278 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
279 COSTS_N_INSNS (16), /* cost of FMUL instruction. */
280 COSTS_N_INSNS (73), /* cost of FDIV instruction. */
281 COSTS_N_INSNS (3), /* cost of FABS instruction. */
282 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
283 COSTS_N_INSNS (83), /* cost of FSQRT instruction. */
284 {{rep_prefix_4_byte
, {{-1, rep_prefix_4_byte
, false}}},
285 DUMMY_STRINGOP_ALGS
},
286 {{rep_prefix_4_byte
, {{-1, rep_prefix_4_byte
, false}}},
287 DUMMY_STRINGOP_ALGS
},
288 1, /* scalar_stmt_cost. */
289 1, /* scalar load_cost. */
290 1, /* scalar_store_cost. */
291 1, /* vec_stmt_cost. */
292 1, /* vec_to_scalar_cost. */
293 1, /* scalar_to_vec_cost. */
294 1, /* vec_align_load_cost. */
295 2, /* vec_unalign_load_cost. */
296 1, /* vec_store_cost. */
297 3, /* cond_taken_branch_cost. */
298 1, /* cond_not_taken_branch_cost. */
302 struct processor_costs pentium_cost
= {
303 COSTS_N_INSNS (1), /* cost of an add instruction */
304 COSTS_N_INSNS (1), /* cost of a lea instruction */
305 COSTS_N_INSNS (4), /* variable shift costs */
306 COSTS_N_INSNS (1), /* constant shift costs */
307 {COSTS_N_INSNS (11), /* cost of starting multiply for QI */
308 COSTS_N_INSNS (11), /* HI */
309 COSTS_N_INSNS (11), /* SI */
310 COSTS_N_INSNS (11), /* DI */
311 COSTS_N_INSNS (11)}, /* other */
312 0, /* cost of multiply per each bit set */
313 {COSTS_N_INSNS (25), /* cost of a divide/mod for QI */
314 COSTS_N_INSNS (25), /* HI */
315 COSTS_N_INSNS (25), /* SI */
316 COSTS_N_INSNS (25), /* DI */
317 COSTS_N_INSNS (25)}, /* other */
318 COSTS_N_INSNS (3), /* cost of movsx */
319 COSTS_N_INSNS (2), /* cost of movzx */
320 8, /* "large" insn */
322 6, /* cost for loading QImode using movzbl */
323 {2, 4, 2}, /* cost of loading integer registers
324 in QImode, HImode and SImode.
325 Relative to reg-reg move (2). */
326 {2, 4, 2}, /* cost of storing integer registers */
327 2, /* cost of reg,reg fld/fst */
328 {2, 2, 6}, /* cost of loading fp registers
329 in SFmode, DFmode and XFmode */
330 {4, 4, 6}, /* cost of storing fp registers
331 in SFmode, DFmode and XFmode */
332 8, /* cost of moving MMX register */
333 {8, 8}, /* cost of loading MMX registers
334 in SImode and DImode */
335 {8, 8}, /* cost of storing MMX registers
336 in SImode and DImode */
337 2, /* cost of moving SSE register */
338 {4, 8, 16}, /* cost of loading SSE registers
339 in SImode, DImode and TImode */
340 {4, 8, 16}, /* cost of storing SSE registers
341 in SImode, DImode and TImode */
342 3, /* MMX or SSE register to integer */
343 8, /* size of l1 cache. */
344 8, /* size of l2 cache */
345 0, /* size of prefetch block */
346 0, /* number of parallel prefetches */
348 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
349 COSTS_N_INSNS (3), /* cost of FMUL instruction. */
350 COSTS_N_INSNS (39), /* cost of FDIV instruction. */
351 COSTS_N_INSNS (1), /* cost of FABS instruction. */
352 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
353 COSTS_N_INSNS (70), /* cost of FSQRT instruction. */
354 {{libcall
, {{256, rep_prefix_4_byte
, false}, {-1, libcall
, false}}},
355 DUMMY_STRINGOP_ALGS
},
356 {{libcall
, {{-1, rep_prefix_4_byte
, false}}},
357 DUMMY_STRINGOP_ALGS
},
358 1, /* scalar_stmt_cost. */
359 1, /* scalar load_cost. */
360 1, /* scalar_store_cost. */
361 1, /* vec_stmt_cost. */
362 1, /* vec_to_scalar_cost. */
363 1, /* scalar_to_vec_cost. */
364 1, /* vec_align_load_cost. */
365 2, /* vec_unalign_load_cost. */
366 1, /* vec_store_cost. */
367 3, /* cond_taken_branch_cost. */
368 1, /* cond_not_taken_branch_cost. */
372 struct processor_costs pentiumpro_cost
= {
373 COSTS_N_INSNS (1), /* cost of an add instruction */
374 COSTS_N_INSNS (1), /* cost of a lea instruction */
375 COSTS_N_INSNS (1), /* variable shift costs */
376 COSTS_N_INSNS (1), /* constant shift costs */
377 {COSTS_N_INSNS (4), /* cost of starting multiply for QI */
378 COSTS_N_INSNS (4), /* HI */
379 COSTS_N_INSNS (4), /* SI */
380 COSTS_N_INSNS (4), /* DI */
381 COSTS_N_INSNS (4)}, /* other */
382 0, /* cost of multiply per each bit set */
383 {COSTS_N_INSNS (17), /* cost of a divide/mod for QI */
384 COSTS_N_INSNS (17), /* HI */
385 COSTS_N_INSNS (17), /* SI */
386 COSTS_N_INSNS (17), /* DI */
387 COSTS_N_INSNS (17)}, /* other */
388 COSTS_N_INSNS (1), /* cost of movsx */
389 COSTS_N_INSNS (1), /* cost of movzx */
390 8, /* "large" insn */
392 2, /* cost for loading QImode using movzbl */
393 {4, 4, 4}, /* cost of loading integer registers
394 in QImode, HImode and SImode.
395 Relative to reg-reg move (2). */
396 {2, 2, 2}, /* cost of storing integer registers */
397 2, /* cost of reg,reg fld/fst */
398 {2, 2, 6}, /* cost of loading fp registers
399 in SFmode, DFmode and XFmode */
400 {4, 4, 6}, /* cost of storing fp registers
401 in SFmode, DFmode and XFmode */
402 2, /* cost of moving MMX register */
403 {2, 2}, /* cost of loading MMX registers
404 in SImode and DImode */
405 {2, 2}, /* cost of storing MMX registers
406 in SImode and DImode */
407 2, /* cost of moving SSE register */
408 {2, 2, 8}, /* cost of loading SSE registers
409 in SImode, DImode and TImode */
410 {2, 2, 8}, /* cost of storing SSE registers
411 in SImode, DImode and TImode */
412 3, /* MMX or SSE register to integer */
413 8, /* size of l1 cache. */
414 256, /* size of l2 cache */
415 32, /* size of prefetch block */
416 6, /* number of parallel prefetches */
418 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
419 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
420 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
421 COSTS_N_INSNS (2), /* cost of FABS instruction. */
422 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
423 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
424 /* PentiumPro has optimized rep instructions for blocks aligned by 8 bytes
425 (we ensure the alignment). For small blocks inline loop is still a
426 noticeable win, for bigger blocks either rep movsl or rep movsb is
427 way to go. Rep movsb has apparently more expensive startup time in CPU,
428 but after 4K the difference is down in the noise. */
429 {{rep_prefix_4_byte
, {{128, loop
, false}, {1024, unrolled_loop
, false},
430 {8192, rep_prefix_4_byte
, false},
431 {-1, rep_prefix_1_byte
, false}}},
432 DUMMY_STRINGOP_ALGS
},
433 {{rep_prefix_4_byte
, {{1024, unrolled_loop
, false},
434 {8192, rep_prefix_4_byte
, false},
435 {-1, libcall
, false}}},
436 DUMMY_STRINGOP_ALGS
},
437 1, /* scalar_stmt_cost. */
438 1, /* scalar load_cost. */
439 1, /* scalar_store_cost. */
440 1, /* vec_stmt_cost. */
441 1, /* vec_to_scalar_cost. */
442 1, /* scalar_to_vec_cost. */
443 1, /* vec_align_load_cost. */
444 2, /* vec_unalign_load_cost. */
445 1, /* vec_store_cost. */
446 3, /* cond_taken_branch_cost. */
447 1, /* cond_not_taken_branch_cost. */
451 struct processor_costs geode_cost
= {
452 COSTS_N_INSNS (1), /* cost of an add instruction */
453 COSTS_N_INSNS (1), /* cost of a lea instruction */
454 COSTS_N_INSNS (2), /* variable shift costs */
455 COSTS_N_INSNS (1), /* constant shift costs */
456 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
457 COSTS_N_INSNS (4), /* HI */
458 COSTS_N_INSNS (7), /* SI */
459 COSTS_N_INSNS (7), /* DI */
460 COSTS_N_INSNS (7)}, /* other */
461 0, /* cost of multiply per each bit set */
462 {COSTS_N_INSNS (15), /* cost of a divide/mod for QI */
463 COSTS_N_INSNS (23), /* HI */
464 COSTS_N_INSNS (39), /* SI */
465 COSTS_N_INSNS (39), /* DI */
466 COSTS_N_INSNS (39)}, /* other */
467 COSTS_N_INSNS (1), /* cost of movsx */
468 COSTS_N_INSNS (1), /* cost of movzx */
469 8, /* "large" insn */
471 1, /* cost for loading QImode using movzbl */
472 {1, 1, 1}, /* cost of loading integer registers
473 in QImode, HImode and SImode.
474 Relative to reg-reg move (2). */
475 {1, 1, 1}, /* cost of storing integer registers */
476 1, /* cost of reg,reg fld/fst */
477 {1, 1, 1}, /* cost of loading fp registers
478 in SFmode, DFmode and XFmode */
479 {4, 6, 6}, /* cost of storing fp registers
480 in SFmode, DFmode and XFmode */
482 1, /* cost of moving MMX register */
483 {1, 1}, /* cost of loading MMX registers
484 in SImode and DImode */
485 {1, 1}, /* cost of storing MMX registers
486 in SImode and DImode */
487 1, /* cost of moving SSE register */
488 {1, 1, 1}, /* cost of loading SSE registers
489 in SImode, DImode and TImode */
490 {1, 1, 1}, /* cost of storing SSE registers
491 in SImode, DImode and TImode */
492 1, /* MMX or SSE register to integer */
493 64, /* size of l1 cache. */
494 128, /* size of l2 cache. */
495 32, /* size of prefetch block */
496 1, /* number of parallel prefetches */
498 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
499 COSTS_N_INSNS (11), /* cost of FMUL instruction. */
500 COSTS_N_INSNS (47), /* cost of FDIV instruction. */
501 COSTS_N_INSNS (1), /* cost of FABS instruction. */
502 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
503 COSTS_N_INSNS (54), /* cost of FSQRT instruction. */
504 {{libcall
, {{256, rep_prefix_4_byte
, false}, {-1, libcall
, false}}},
505 DUMMY_STRINGOP_ALGS
},
506 {{libcall
, {{256, rep_prefix_4_byte
, false}, {-1, libcall
, false}}},
507 DUMMY_STRINGOP_ALGS
},
508 1, /* scalar_stmt_cost. */
509 1, /* scalar load_cost. */
510 1, /* scalar_store_cost. */
511 1, /* vec_stmt_cost. */
512 1, /* vec_to_scalar_cost. */
513 1, /* scalar_to_vec_cost. */
514 1, /* vec_align_load_cost. */
515 2, /* vec_unalign_load_cost. */
516 1, /* vec_store_cost. */
517 3, /* cond_taken_branch_cost. */
518 1, /* cond_not_taken_branch_cost. */
522 struct processor_costs k6_cost
= {
523 COSTS_N_INSNS (1), /* cost of an add instruction */
524 COSTS_N_INSNS (2), /* cost of a lea instruction */
525 COSTS_N_INSNS (1), /* variable shift costs */
526 COSTS_N_INSNS (1), /* constant shift costs */
527 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
528 COSTS_N_INSNS (3), /* HI */
529 COSTS_N_INSNS (3), /* SI */
530 COSTS_N_INSNS (3), /* DI */
531 COSTS_N_INSNS (3)}, /* other */
532 0, /* cost of multiply per each bit set */
533 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
534 COSTS_N_INSNS (18), /* HI */
535 COSTS_N_INSNS (18), /* SI */
536 COSTS_N_INSNS (18), /* DI */
537 COSTS_N_INSNS (18)}, /* other */
538 COSTS_N_INSNS (2), /* cost of movsx */
539 COSTS_N_INSNS (2), /* cost of movzx */
540 8, /* "large" insn */
542 3, /* cost for loading QImode using movzbl */
543 {4, 5, 4}, /* cost of loading integer registers
544 in QImode, HImode and SImode.
545 Relative to reg-reg move (2). */
546 {2, 3, 2}, /* cost of storing integer registers */
547 4, /* cost of reg,reg fld/fst */
548 {6, 6, 6}, /* cost of loading fp registers
549 in SFmode, DFmode and XFmode */
550 {4, 4, 4}, /* cost of storing fp registers
551 in SFmode, DFmode and XFmode */
552 2, /* cost of moving MMX register */
553 {2, 2}, /* cost of loading MMX registers
554 in SImode and DImode */
555 {2, 2}, /* cost of storing MMX registers
556 in SImode and DImode */
557 2, /* cost of moving SSE register */
558 {2, 2, 8}, /* cost of loading SSE registers
559 in SImode, DImode and TImode */
560 {2, 2, 8}, /* cost of storing SSE registers
561 in SImode, DImode and TImode */
562 6, /* MMX or SSE register to integer */
563 32, /* size of l1 cache. */
564 32, /* size of l2 cache. Some models
565 have integrated l2 cache, but
566 optimizing for k6 is not important
567 enough to worry about that. */
568 32, /* size of prefetch block */
569 1, /* number of parallel prefetches */
571 COSTS_N_INSNS (2), /* cost of FADD and FSUB insns. */
572 COSTS_N_INSNS (2), /* cost of FMUL instruction. */
573 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
574 COSTS_N_INSNS (2), /* cost of FABS instruction. */
575 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
576 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
577 {{libcall
, {{256, rep_prefix_4_byte
, false}, {-1, libcall
, false}}},
578 DUMMY_STRINGOP_ALGS
},
579 {{libcall
, {{256, rep_prefix_4_byte
, false}, {-1, libcall
, false}}},
580 DUMMY_STRINGOP_ALGS
},
581 1, /* scalar_stmt_cost. */
582 1, /* scalar load_cost. */
583 1, /* scalar_store_cost. */
584 1, /* vec_stmt_cost. */
585 1, /* vec_to_scalar_cost. */
586 1, /* scalar_to_vec_cost. */
587 1, /* vec_align_load_cost. */
588 2, /* vec_unalign_load_cost. */
589 1, /* vec_store_cost. */
590 3, /* cond_taken_branch_cost. */
591 1, /* cond_not_taken_branch_cost. */
595 struct processor_costs athlon_cost
= {
596 COSTS_N_INSNS (1), /* cost of an add instruction */
597 COSTS_N_INSNS (2), /* cost of a lea instruction */
598 COSTS_N_INSNS (1), /* variable shift costs */
599 COSTS_N_INSNS (1), /* constant shift costs */
600 {COSTS_N_INSNS (5), /* cost of starting multiply for QI */
601 COSTS_N_INSNS (5), /* HI */
602 COSTS_N_INSNS (5), /* SI */
603 COSTS_N_INSNS (5), /* DI */
604 COSTS_N_INSNS (5)}, /* other */
605 0, /* cost of multiply per each bit set */
606 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
607 COSTS_N_INSNS (26), /* HI */
608 COSTS_N_INSNS (42), /* SI */
609 COSTS_N_INSNS (74), /* DI */
610 COSTS_N_INSNS (74)}, /* other */
611 COSTS_N_INSNS (1), /* cost of movsx */
612 COSTS_N_INSNS (1), /* cost of movzx */
613 8, /* "large" insn */
615 4, /* cost for loading QImode using movzbl */
616 {3, 4, 3}, /* cost of loading integer registers
617 in QImode, HImode and SImode.
618 Relative to reg-reg move (2). */
619 {3, 4, 3}, /* cost of storing integer registers */
620 4, /* cost of reg,reg fld/fst */
621 {4, 4, 12}, /* cost of loading fp registers
622 in SFmode, DFmode and XFmode */
623 {6, 6, 8}, /* cost of storing fp registers
624 in SFmode, DFmode and XFmode */
625 2, /* cost of moving MMX register */
626 {4, 4}, /* cost of loading MMX registers
627 in SImode and DImode */
628 {4, 4}, /* cost of storing MMX registers
629 in SImode and DImode */
630 2, /* cost of moving SSE register */
631 {4, 4, 6}, /* cost of loading SSE registers
632 in SImode, DImode and TImode */
633 {4, 4, 5}, /* cost of storing SSE registers
634 in SImode, DImode and TImode */
635 5, /* MMX or SSE register to integer */
636 64, /* size of l1 cache. */
637 256, /* size of l2 cache. */
638 64, /* size of prefetch block */
639 6, /* number of parallel prefetches */
641 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
642 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
643 COSTS_N_INSNS (24), /* cost of FDIV instruction. */
644 COSTS_N_INSNS (2), /* cost of FABS instruction. */
645 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
646 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
647 /* For some reason, Athlon deals better with REP prefix (relative to loops)
648 compared to K8. Alignment becomes important after 8 bytes for memcpy and
649 128 bytes for memset. */
650 {{libcall
, {{2048, rep_prefix_4_byte
, false}, {-1, libcall
, false}}},
651 DUMMY_STRINGOP_ALGS
},
652 {{libcall
, {{2048, rep_prefix_4_byte
, false}, {-1, libcall
, false}}},
653 DUMMY_STRINGOP_ALGS
},
654 1, /* scalar_stmt_cost. */
655 1, /* scalar load_cost. */
656 1, /* scalar_store_cost. */
657 1, /* vec_stmt_cost. */
658 1, /* vec_to_scalar_cost. */
659 1, /* scalar_to_vec_cost. */
660 1, /* vec_align_load_cost. */
661 2, /* vec_unalign_load_cost. */
662 1, /* vec_store_cost. */
663 3, /* cond_taken_branch_cost. */
664 1, /* cond_not_taken_branch_cost. */
668 struct processor_costs k8_cost
= {
669 COSTS_N_INSNS (1), /* cost of an add instruction */
670 COSTS_N_INSNS (2), /* cost of a lea instruction */
671 COSTS_N_INSNS (1), /* variable shift costs */
672 COSTS_N_INSNS (1), /* constant shift costs */
673 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
674 COSTS_N_INSNS (4), /* HI */
675 COSTS_N_INSNS (3), /* SI */
676 COSTS_N_INSNS (4), /* DI */
677 COSTS_N_INSNS (5)}, /* other */
678 0, /* cost of multiply per each bit set */
679 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
680 COSTS_N_INSNS (26), /* HI */
681 COSTS_N_INSNS (42), /* SI */
682 COSTS_N_INSNS (74), /* DI */
683 COSTS_N_INSNS (74)}, /* other */
684 COSTS_N_INSNS (1), /* cost of movsx */
685 COSTS_N_INSNS (1), /* cost of movzx */
686 8, /* "large" insn */
688 4, /* cost for loading QImode using movzbl */
689 {3, 4, 3}, /* cost of loading integer registers
690 in QImode, HImode and SImode.
691 Relative to reg-reg move (2). */
692 {3, 4, 3}, /* cost of storing integer registers */
693 4, /* cost of reg,reg fld/fst */
694 {4, 4, 12}, /* cost of loading fp registers
695 in SFmode, DFmode and XFmode */
696 {6, 6, 8}, /* cost of storing fp registers
697 in SFmode, DFmode and XFmode */
698 2, /* cost of moving MMX register */
699 {3, 3}, /* cost of loading MMX registers
700 in SImode and DImode */
701 {4, 4}, /* cost of storing MMX registers
702 in SImode and DImode */
703 2, /* cost of moving SSE register */
704 {4, 3, 6}, /* cost of loading SSE registers
705 in SImode, DImode and TImode */
706 {4, 4, 5}, /* cost of storing SSE registers
707 in SImode, DImode and TImode */
708 5, /* MMX or SSE register to integer */
709 64, /* size of l1 cache. */
710 512, /* size of l2 cache. */
711 64, /* size of prefetch block */
712 /* New AMD processors never drop prefetches; if they cannot be performed
713 immediately, they are queued. We set number of simultaneous prefetches
714 to a large constant to reflect this (it probably is not a good idea not
715 to limit number of prefetches at all, as their execution also takes some
717 100, /* number of parallel prefetches */
719 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
720 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
721 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
722 COSTS_N_INSNS (2), /* cost of FABS instruction. */
723 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
724 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
725 /* K8 has optimized REP instruction for medium sized blocks, but for very
726 small blocks it is better to use loop. For large blocks, libcall can
727 do nontemporary accesses and beat inline considerably. */
728 {{libcall
, {{6, loop
, false}, {14, unrolled_loop
, false},
729 {-1, rep_prefix_4_byte
, false}}},
730 {libcall
, {{16, loop
, false}, {8192, rep_prefix_8_byte
, false},
731 {-1, libcall
, false}}}},
732 {{libcall
, {{8, loop
, false}, {24, unrolled_loop
, false},
733 {2048, rep_prefix_4_byte
, false}, {-1, libcall
, false}}},
734 {libcall
, {{48, unrolled_loop
, false},
735 {8192, rep_prefix_8_byte
, false}, {-1, libcall
, false}}}},
736 4, /* scalar_stmt_cost. */
737 2, /* scalar load_cost. */
738 2, /* scalar_store_cost. */
739 5, /* vec_stmt_cost. */
740 0, /* vec_to_scalar_cost. */
741 2, /* scalar_to_vec_cost. */
742 2, /* vec_align_load_cost. */
743 3, /* vec_unalign_load_cost. */
744 3, /* vec_store_cost. */
745 3, /* cond_taken_branch_cost. */
746 2, /* cond_not_taken_branch_cost. */
749 struct processor_costs amdfam10_cost
= {
750 COSTS_N_INSNS (1), /* cost of an add instruction */
751 COSTS_N_INSNS (2), /* cost of a lea instruction */
752 COSTS_N_INSNS (1), /* variable shift costs */
753 COSTS_N_INSNS (1), /* constant shift costs */
754 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
755 COSTS_N_INSNS (4), /* HI */
756 COSTS_N_INSNS (3), /* SI */
757 COSTS_N_INSNS (4), /* DI */
758 COSTS_N_INSNS (5)}, /* other */
759 0, /* cost of multiply per each bit set */
760 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
761 COSTS_N_INSNS (35), /* HI */
762 COSTS_N_INSNS (51), /* SI */
763 COSTS_N_INSNS (83), /* DI */
764 COSTS_N_INSNS (83)}, /* other */
765 COSTS_N_INSNS (1), /* cost of movsx */
766 COSTS_N_INSNS (1), /* cost of movzx */
767 8, /* "large" insn */
769 4, /* cost for loading QImode using movzbl */
770 {3, 4, 3}, /* cost of loading integer registers
771 in QImode, HImode and SImode.
772 Relative to reg-reg move (2). */
773 {3, 4, 3}, /* cost of storing integer registers */
774 4, /* cost of reg,reg fld/fst */
775 {4, 4, 12}, /* cost of loading fp registers
776 in SFmode, DFmode and XFmode */
777 {6, 6, 8}, /* cost of storing fp registers
778 in SFmode, DFmode and XFmode */
779 2, /* cost of moving MMX register */
780 {3, 3}, /* cost of loading MMX registers
781 in SImode and DImode */
782 {4, 4}, /* cost of storing MMX registers
783 in SImode and DImode */
784 2, /* cost of moving SSE register */
785 {4, 4, 3}, /* cost of loading SSE registers
786 in SImode, DImode and TImode */
787 {4, 4, 5}, /* cost of storing SSE registers
788 in SImode, DImode and TImode */
789 3, /* MMX or SSE register to integer */
791 MOVD reg64, xmmreg Double FSTORE 4
792 MOVD reg32, xmmreg Double FSTORE 4
794 MOVD reg64, xmmreg Double FADD 3
796 MOVD reg32, xmmreg Double FADD 3
798 64, /* size of l1 cache. */
799 512, /* size of l2 cache. */
800 64, /* size of prefetch block */
801 /* New AMD processors never drop prefetches; if they cannot be performed
802 immediately, they are queued. We set number of simultaneous prefetches
803 to a large constant to reflect this (it probably is not a good idea not
804 to limit number of prefetches at all, as their execution also takes some
806 100, /* number of parallel prefetches */
808 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
809 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
810 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
811 COSTS_N_INSNS (2), /* cost of FABS instruction. */
812 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
813 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
815 /* AMDFAM10 has optimized REP instruction for medium sized blocks, but for
816 very small blocks it is better to use loop. For large blocks, libcall can
817 do nontemporary accesses and beat inline considerably. */
818 {{libcall
, {{6, loop
, false}, {14, unrolled_loop
, false},
819 {-1, rep_prefix_4_byte
, false}}},
820 {libcall
, {{16, loop
, false}, {8192, rep_prefix_8_byte
, false},
821 {-1, libcall
, false}}}},
822 {{libcall
, {{8, loop
, false}, {24, unrolled_loop
, false},
823 {2048, rep_prefix_4_byte
, false}, {-1, libcall
, false}}},
824 {libcall
, {{48, unrolled_loop
, false}, {8192, rep_prefix_8_byte
, false},
825 {-1, libcall
, false}}}},
826 4, /* scalar_stmt_cost. */
827 2, /* scalar load_cost. */
828 2, /* scalar_store_cost. */
829 6, /* vec_stmt_cost. */
830 0, /* vec_to_scalar_cost. */
831 2, /* scalar_to_vec_cost. */
832 2, /* vec_align_load_cost. */
833 2, /* vec_unalign_load_cost. */
834 2, /* vec_store_cost. */
835 2, /* cond_taken_branch_cost. */
836 1, /* cond_not_taken_branch_cost. */
839 struct processor_costs bdver1_cost
= {
840 COSTS_N_INSNS (1), /* cost of an add instruction */
841 COSTS_N_INSNS (1), /* cost of a lea instruction */
842 COSTS_N_INSNS (1), /* variable shift costs */
843 COSTS_N_INSNS (1), /* constant shift costs */
844 {COSTS_N_INSNS (4), /* cost of starting multiply for QI */
845 COSTS_N_INSNS (4), /* HI */
846 COSTS_N_INSNS (4), /* SI */
847 COSTS_N_INSNS (6), /* DI */
848 COSTS_N_INSNS (6)}, /* other */
849 0, /* cost of multiply per each bit set */
850 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
851 COSTS_N_INSNS (35), /* HI */
852 COSTS_N_INSNS (51), /* SI */
853 COSTS_N_INSNS (83), /* DI */
854 COSTS_N_INSNS (83)}, /* other */
855 COSTS_N_INSNS (1), /* cost of movsx */
856 COSTS_N_INSNS (1), /* cost of movzx */
857 8, /* "large" insn */
859 4, /* cost for loading QImode using movzbl */
860 {5, 5, 4}, /* cost of loading integer registers
861 in QImode, HImode and SImode.
862 Relative to reg-reg move (2). */
863 {4, 4, 4}, /* cost of storing integer registers */
864 2, /* cost of reg,reg fld/fst */
865 {5, 5, 12}, /* cost of loading fp registers
866 in SFmode, DFmode and XFmode */
867 {4, 4, 8}, /* cost of storing fp registers
868 in SFmode, DFmode and XFmode */
869 2, /* cost of moving MMX register */
870 {4, 4}, /* cost of loading MMX registers
871 in SImode and DImode */
872 {4, 4}, /* cost of storing MMX registers
873 in SImode and DImode */
874 2, /* cost of moving SSE register */
875 {4, 4, 4}, /* cost of loading SSE registers
876 in SImode, DImode and TImode */
877 {4, 4, 4}, /* cost of storing SSE registers
878 in SImode, DImode and TImode */
879 2, /* MMX or SSE register to integer */
881 MOVD reg64, xmmreg Double FSTORE 4
882 MOVD reg32, xmmreg Double FSTORE 4
884 MOVD reg64, xmmreg Double FADD 3
886 MOVD reg32, xmmreg Double FADD 3
888 16, /* size of l1 cache. */
889 2048, /* size of l2 cache. */
890 64, /* size of prefetch block */
891 /* New AMD processors never drop prefetches; if they cannot be performed
892 immediately, they are queued. We set number of simultaneous prefetches
893 to a large constant to reflect this (it probably is not a good idea not
894 to limit number of prefetches at all, as their execution also takes some
896 100, /* number of parallel prefetches */
898 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
899 COSTS_N_INSNS (6), /* cost of FMUL instruction. */
900 COSTS_N_INSNS (42), /* cost of FDIV instruction. */
901 COSTS_N_INSNS (2), /* cost of FABS instruction. */
902 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
903 COSTS_N_INSNS (52), /* cost of FSQRT instruction. */
905 /* BDVER1 has optimized REP instruction for medium sized blocks, but for
906 very small blocks it is better to use loop. For large blocks, libcall
907 can do nontemporary accesses and beat inline considerably. */
908 {{libcall
, {{6, loop
, false}, {14, unrolled_loop
, false},
909 {-1, rep_prefix_4_byte
, false}}},
910 {libcall
, {{16, loop
, false}, {8192, rep_prefix_8_byte
, false},
911 {-1, libcall
, false}}}},
912 {{libcall
, {{8, loop
, false}, {24, unrolled_loop
, false},
913 {2048, rep_prefix_4_byte
, false}, {-1, libcall
, false}}},
914 {libcall
, {{48, unrolled_loop
, false}, {8192, rep_prefix_8_byte
, false},
915 {-1, libcall
, false}}}},
916 6, /* scalar_stmt_cost. */
917 4, /* scalar load_cost. */
918 4, /* scalar_store_cost. */
919 6, /* vec_stmt_cost. */
920 0, /* vec_to_scalar_cost. */
921 2, /* scalar_to_vec_cost. */
922 4, /* vec_align_load_cost. */
923 4, /* vec_unalign_load_cost. */
924 4, /* vec_store_cost. */
925 2, /* cond_taken_branch_cost. */
926 1, /* cond_not_taken_branch_cost. */
929 struct processor_costs bdver2_cost
= {
930 COSTS_N_INSNS (1), /* cost of an add instruction */
931 COSTS_N_INSNS (1), /* cost of a lea instruction */
932 COSTS_N_INSNS (1), /* variable shift costs */
933 COSTS_N_INSNS (1), /* constant shift costs */
934 {COSTS_N_INSNS (4), /* cost of starting multiply for QI */
935 COSTS_N_INSNS (4), /* HI */
936 COSTS_N_INSNS (4), /* SI */
937 COSTS_N_INSNS (6), /* DI */
938 COSTS_N_INSNS (6)}, /* other */
939 0, /* cost of multiply per each bit set */
940 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
941 COSTS_N_INSNS (35), /* HI */
942 COSTS_N_INSNS (51), /* SI */
943 COSTS_N_INSNS (83), /* DI */
944 COSTS_N_INSNS (83)}, /* other */
945 COSTS_N_INSNS (1), /* cost of movsx */
946 COSTS_N_INSNS (1), /* cost of movzx */
947 8, /* "large" insn */
949 4, /* cost for loading QImode using movzbl */
950 {5, 5, 4}, /* cost of loading integer registers
951 in QImode, HImode and SImode.
952 Relative to reg-reg move (2). */
953 {4, 4, 4}, /* cost of storing integer registers */
954 2, /* cost of reg,reg fld/fst */
955 {5, 5, 12}, /* cost of loading fp registers
956 in SFmode, DFmode and XFmode */
957 {4, 4, 8}, /* cost of storing fp registers
958 in SFmode, DFmode and XFmode */
959 2, /* cost of moving MMX register */
960 {4, 4}, /* cost of loading MMX registers
961 in SImode and DImode */
962 {4, 4}, /* cost of storing MMX registers
963 in SImode and DImode */
964 2, /* cost of moving SSE register */
965 {4, 4, 4}, /* cost of loading SSE registers
966 in SImode, DImode and TImode */
967 {4, 4, 4}, /* cost of storing SSE registers
968 in SImode, DImode and TImode */
969 2, /* MMX or SSE register to integer */
971 MOVD reg64, xmmreg Double FSTORE 4
972 MOVD reg32, xmmreg Double FSTORE 4
974 MOVD reg64, xmmreg Double FADD 3
976 MOVD reg32, xmmreg Double FADD 3
978 16, /* size of l1 cache. */
979 2048, /* size of l2 cache. */
980 64, /* size of prefetch block */
981 /* New AMD processors never drop prefetches; if they cannot be performed
982 immediately, they are queued. We set number of simultaneous prefetches
983 to a large constant to reflect this (it probably is not a good idea not
984 to limit number of prefetches at all, as their execution also takes some
986 100, /* number of parallel prefetches */
988 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
989 COSTS_N_INSNS (6), /* cost of FMUL instruction. */
990 COSTS_N_INSNS (42), /* cost of FDIV instruction. */
991 COSTS_N_INSNS (2), /* cost of FABS instruction. */
992 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
993 COSTS_N_INSNS (52), /* cost of FSQRT instruction. */
995 /* BDVER2 has optimized REP instruction for medium sized blocks, but for
996 very small blocks it is better to use loop. For large blocks, libcall
997 can do nontemporary accesses and beat inline considerably. */
998 {{libcall
, {{6, loop
, false}, {14, unrolled_loop
, false},
999 {-1, rep_prefix_4_byte
, false}}},
1000 {libcall
, {{16, loop
, false}, {8192, rep_prefix_8_byte
, false},
1001 {-1, libcall
, false}}}},
1002 {{libcall
, {{8, loop
, false}, {24, unrolled_loop
, false},
1003 {2048, rep_prefix_4_byte
, false}, {-1, libcall
, false}}},
1004 {libcall
, {{48, unrolled_loop
, false}, {8192, rep_prefix_8_byte
, false},
1005 {-1, libcall
, false}}}},
1006 6, /* scalar_stmt_cost. */
1007 4, /* scalar load_cost. */
1008 4, /* scalar_store_cost. */
1009 6, /* vec_stmt_cost. */
1010 0, /* vec_to_scalar_cost. */
1011 2, /* scalar_to_vec_cost. */
1012 4, /* vec_align_load_cost. */
1013 4, /* vec_unalign_load_cost. */
1014 4, /* vec_store_cost. */
1015 2, /* cond_taken_branch_cost. */
1016 1, /* cond_not_taken_branch_cost. */
1019 struct processor_costs bdver3_cost
= {
1020 COSTS_N_INSNS (1), /* cost of an add instruction */
1021 COSTS_N_INSNS (1), /* cost of a lea instruction */
1022 COSTS_N_INSNS (1), /* variable shift costs */
1023 COSTS_N_INSNS (1), /* constant shift costs */
1024 {COSTS_N_INSNS (4), /* cost of starting multiply for QI */
1025 COSTS_N_INSNS (4), /* HI */
1026 COSTS_N_INSNS (4), /* SI */
1027 COSTS_N_INSNS (6), /* DI */
1028 COSTS_N_INSNS (6)}, /* other */
1029 0, /* cost of multiply per each bit set */
1030 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
1031 COSTS_N_INSNS (35), /* HI */
1032 COSTS_N_INSNS (51), /* SI */
1033 COSTS_N_INSNS (83), /* DI */
1034 COSTS_N_INSNS (83)}, /* other */
1035 COSTS_N_INSNS (1), /* cost of movsx */
1036 COSTS_N_INSNS (1), /* cost of movzx */
1037 8, /* "large" insn */
1039 4, /* cost for loading QImode using movzbl */
1040 {5, 5, 4}, /* cost of loading integer registers
1041 in QImode, HImode and SImode.
1042 Relative to reg-reg move (2). */
1043 {4, 4, 4}, /* cost of storing integer registers */
1044 2, /* cost of reg,reg fld/fst */
1045 {5, 5, 12}, /* cost of loading fp registers
1046 in SFmode, DFmode and XFmode */
1047 {4, 4, 8}, /* cost of storing fp registers
1048 in SFmode, DFmode and XFmode */
1049 2, /* cost of moving MMX register */
1050 {4, 4}, /* cost of loading MMX registers
1051 in SImode and DImode */
1052 {4, 4}, /* cost of storing MMX registers
1053 in SImode and DImode */
1054 2, /* cost of moving SSE register */
1055 {4, 4, 4}, /* cost of loading SSE registers
1056 in SImode, DImode and TImode */
1057 {4, 4, 4}, /* cost of storing SSE registers
1058 in SImode, DImode and TImode */
1059 2, /* MMX or SSE register to integer */
1060 16, /* size of l1 cache. */
1061 2048, /* size of l2 cache. */
1062 64, /* size of prefetch block */
1063 /* New AMD processors never drop prefetches; if they cannot be performed
1064 immediately, they are queued. We set number of simultaneous prefetches
1065 to a large constant to reflect this (it probably is not a good idea not
1066 to limit number of prefetches at all, as their execution also takes some
1068 100, /* number of parallel prefetches */
1069 2, /* Branch cost */
1070 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
1071 COSTS_N_INSNS (6), /* cost of FMUL instruction. */
1072 COSTS_N_INSNS (42), /* cost of FDIV instruction. */
1073 COSTS_N_INSNS (2), /* cost of FABS instruction. */
1074 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
1075 COSTS_N_INSNS (52), /* cost of FSQRT instruction. */
1077 /* BDVER3 has optimized REP instruction for medium sized blocks, but for
1078 very small blocks it is better to use loop. For large blocks, libcall
1079 can do nontemporary accesses and beat inline considerably. */
1080 {{libcall
, {{6, loop
, false}, {14, unrolled_loop
, false},
1081 {-1, rep_prefix_4_byte
, false}}},
1082 {libcall
, {{16, loop
, false}, {8192, rep_prefix_8_byte
, false},
1083 {-1, libcall
, false}}}},
1084 {{libcall
, {{8, loop
, false}, {24, unrolled_loop
, false},
1085 {2048, rep_prefix_4_byte
, false}, {-1, libcall
, false}}},
1086 {libcall
, {{48, unrolled_loop
, false}, {8192, rep_prefix_8_byte
, false},
1087 {-1, libcall
, false}}}},
1088 6, /* scalar_stmt_cost. */
1089 4, /* scalar load_cost. */
1090 4, /* scalar_store_cost. */
1091 6, /* vec_stmt_cost. */
1092 0, /* vec_to_scalar_cost. */
1093 2, /* scalar_to_vec_cost. */
1094 4, /* vec_align_load_cost. */
1095 4, /* vec_unalign_load_cost. */
1096 4, /* vec_store_cost. */
1097 2, /* cond_taken_branch_cost. */
1098 1, /* cond_not_taken_branch_cost. */
1101 struct processor_costs btver1_cost
= {
1102 COSTS_N_INSNS (1), /* cost of an add instruction */
1103 COSTS_N_INSNS (2), /* cost of a lea instruction */
1104 COSTS_N_INSNS (1), /* variable shift costs */
1105 COSTS_N_INSNS (1), /* constant shift costs */
1106 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1107 COSTS_N_INSNS (4), /* HI */
1108 COSTS_N_INSNS (3), /* SI */
1109 COSTS_N_INSNS (4), /* DI */
1110 COSTS_N_INSNS (5)}, /* other */
1111 0, /* cost of multiply per each bit set */
1112 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
1113 COSTS_N_INSNS (35), /* HI */
1114 COSTS_N_INSNS (51), /* SI */
1115 COSTS_N_INSNS (83), /* DI */
1116 COSTS_N_INSNS (83)}, /* other */
1117 COSTS_N_INSNS (1), /* cost of movsx */
1118 COSTS_N_INSNS (1), /* cost of movzx */
1119 8, /* "large" insn */
1121 4, /* cost for loading QImode using movzbl */
1122 {3, 4, 3}, /* cost of loading integer registers
1123 in QImode, HImode and SImode.
1124 Relative to reg-reg move (2). */
1125 {3, 4, 3}, /* cost of storing integer registers */
1126 4, /* cost of reg,reg fld/fst */
1127 {4, 4, 12}, /* cost of loading fp registers
1128 in SFmode, DFmode and XFmode */
1129 {6, 6, 8}, /* cost of storing fp registers
1130 in SFmode, DFmode and XFmode */
1131 2, /* cost of moving MMX register */
1132 {3, 3}, /* cost of loading MMX registers
1133 in SImode and DImode */
1134 {4, 4}, /* cost of storing MMX registers
1135 in SImode and DImode */
1136 2, /* cost of moving SSE register */
1137 {4, 4, 3}, /* cost of loading SSE registers
1138 in SImode, DImode and TImode */
1139 {4, 4, 5}, /* cost of storing SSE registers
1140 in SImode, DImode and TImode */
1141 3, /* MMX or SSE register to integer */
1143 MOVD reg64, xmmreg Double FSTORE 4
1144 MOVD reg32, xmmreg Double FSTORE 4
1146 MOVD reg64, xmmreg Double FADD 3
1148 MOVD reg32, xmmreg Double FADD 3
1150 32, /* size of l1 cache. */
1151 512, /* size of l2 cache. */
1152 64, /* size of prefetch block */
1153 100, /* number of parallel prefetches */
1154 2, /* Branch cost */
1155 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
1156 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
1157 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
1158 COSTS_N_INSNS (2), /* cost of FABS instruction. */
1159 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
1160 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
1162 /* BTVER1 has optimized REP instruction for medium sized blocks, but for
1163 very small blocks it is better to use loop. For large blocks, libcall can
1164 do nontemporary accesses and beat inline considerably. */
1165 {{libcall
, {{6, loop
, false}, {14, unrolled_loop
, false},
1166 {-1, rep_prefix_4_byte
, false}}},
1167 {libcall
, {{16, loop
, false}, {8192, rep_prefix_8_byte
, false},
1168 {-1, libcall
, false}}}},
1169 {{libcall
, {{8, loop
, false}, {24, unrolled_loop
, false},
1170 {2048, rep_prefix_4_byte
, false}, {-1, libcall
, false}}},
1171 {libcall
, {{48, unrolled_loop
, false}, {8192, rep_prefix_8_byte
, false},
1172 {-1, libcall
, false}}}},
1173 4, /* scalar_stmt_cost. */
1174 2, /* scalar load_cost. */
1175 2, /* scalar_store_cost. */
1176 6, /* vec_stmt_cost. */
1177 0, /* vec_to_scalar_cost. */
1178 2, /* scalar_to_vec_cost. */
1179 2, /* vec_align_load_cost. */
1180 2, /* vec_unalign_load_cost. */
1181 2, /* vec_store_cost. */
1182 2, /* cond_taken_branch_cost. */
1183 1, /* cond_not_taken_branch_cost. */
1186 struct processor_costs btver2_cost
= {
1187 COSTS_N_INSNS (1), /* cost of an add instruction */
1188 COSTS_N_INSNS (2), /* cost of a lea instruction */
1189 COSTS_N_INSNS (1), /* variable shift costs */
1190 COSTS_N_INSNS (1), /* constant shift costs */
1191 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1192 COSTS_N_INSNS (4), /* HI */
1193 COSTS_N_INSNS (3), /* SI */
1194 COSTS_N_INSNS (4), /* DI */
1195 COSTS_N_INSNS (5)}, /* other */
1196 0, /* cost of multiply per each bit set */
1197 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
1198 COSTS_N_INSNS (35), /* HI */
1199 COSTS_N_INSNS (51), /* SI */
1200 COSTS_N_INSNS (83), /* DI */
1201 COSTS_N_INSNS (83)}, /* other */
1202 COSTS_N_INSNS (1), /* cost of movsx */
1203 COSTS_N_INSNS (1), /* cost of movzx */
1204 8, /* "large" insn */
1206 4, /* cost for loading QImode using movzbl */
1207 {3, 4, 3}, /* cost of loading integer registers
1208 in QImode, HImode and SImode.
1209 Relative to reg-reg move (2). */
1210 {3, 4, 3}, /* cost of storing integer registers */
1211 4, /* cost of reg,reg fld/fst */
1212 {4, 4, 12}, /* cost of loading fp registers
1213 in SFmode, DFmode and XFmode */
1214 {6, 6, 8}, /* cost of storing fp registers
1215 in SFmode, DFmode and XFmode */
1216 2, /* cost of moving MMX register */
1217 {3, 3}, /* cost of loading MMX registers
1218 in SImode and DImode */
1219 {4, 4}, /* cost of storing MMX registers
1220 in SImode and DImode */
1221 2, /* cost of moving SSE register */
1222 {4, 4, 3}, /* cost of loading SSE registers
1223 in SImode, DImode and TImode */
1224 {4, 4, 5}, /* cost of storing SSE registers
1225 in SImode, DImode and TImode */
1226 3, /* MMX or SSE register to integer */
1228 MOVD reg64, xmmreg Double FSTORE 4
1229 MOVD reg32, xmmreg Double FSTORE 4
1231 MOVD reg64, xmmreg Double FADD 3
1233 MOVD reg32, xmmreg Double FADD 3
1235 32, /* size of l1 cache. */
1236 2048, /* size of l2 cache. */
1237 64, /* size of prefetch block */
1238 100, /* number of parallel prefetches */
1239 2, /* Branch cost */
1240 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
1241 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
1242 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
1243 COSTS_N_INSNS (2), /* cost of FABS instruction. */
1244 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
1245 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
1247 {{libcall
, {{6, loop
, false}, {14, unrolled_loop
, false},
1248 {-1, rep_prefix_4_byte
, false}}},
1249 {libcall
, {{16, loop
, false}, {8192, rep_prefix_8_byte
, false},
1250 {-1, libcall
, false}}}},
1251 {{libcall
, {{8, loop
, false}, {24, unrolled_loop
, false},
1252 {2048, rep_prefix_4_byte
, false}, {-1, libcall
, false}}},
1253 {libcall
, {{48, unrolled_loop
, false}, {8192, rep_prefix_8_byte
, false},
1254 {-1, libcall
, false}}}},
1255 4, /* scalar_stmt_cost. */
1256 2, /* scalar load_cost. */
1257 2, /* scalar_store_cost. */
1258 6, /* vec_stmt_cost. */
1259 0, /* vec_to_scalar_cost. */
1260 2, /* scalar_to_vec_cost. */
1261 2, /* vec_align_load_cost. */
1262 2, /* vec_unalign_load_cost. */
1263 2, /* vec_store_cost. */
1264 2, /* cond_taken_branch_cost. */
1265 1, /* cond_not_taken_branch_cost. */
1269 struct processor_costs pentium4_cost
= {
1270 COSTS_N_INSNS (1), /* cost of an add instruction */
1271 COSTS_N_INSNS (3), /* cost of a lea instruction */
1272 COSTS_N_INSNS (4), /* variable shift costs */
1273 COSTS_N_INSNS (4), /* constant shift costs */
1274 {COSTS_N_INSNS (15), /* cost of starting multiply for QI */
1275 COSTS_N_INSNS (15), /* HI */
1276 COSTS_N_INSNS (15), /* SI */
1277 COSTS_N_INSNS (15), /* DI */
1278 COSTS_N_INSNS (15)}, /* other */
1279 0, /* cost of multiply per each bit set */
1280 {COSTS_N_INSNS (56), /* cost of a divide/mod for QI */
1281 COSTS_N_INSNS (56), /* HI */
1282 COSTS_N_INSNS (56), /* SI */
1283 COSTS_N_INSNS (56), /* DI */
1284 COSTS_N_INSNS (56)}, /* other */
1285 COSTS_N_INSNS (1), /* cost of movsx */
1286 COSTS_N_INSNS (1), /* cost of movzx */
1287 16, /* "large" insn */
1289 2, /* cost for loading QImode using movzbl */
1290 {4, 5, 4}, /* cost of loading integer registers
1291 in QImode, HImode and SImode.
1292 Relative to reg-reg move (2). */
1293 {2, 3, 2}, /* cost of storing integer registers */
1294 2, /* cost of reg,reg fld/fst */
1295 {2, 2, 6}, /* cost of loading fp registers
1296 in SFmode, DFmode and XFmode */
1297 {4, 4, 6}, /* cost of storing fp registers
1298 in SFmode, DFmode and XFmode */
1299 2, /* cost of moving MMX register */
1300 {2, 2}, /* cost of loading MMX registers
1301 in SImode and DImode */
1302 {2, 2}, /* cost of storing MMX registers
1303 in SImode and DImode */
1304 12, /* cost of moving SSE register */
1305 {12, 12, 12}, /* cost of loading SSE registers
1306 in SImode, DImode and TImode */
1307 {2, 2, 8}, /* cost of storing SSE registers
1308 in SImode, DImode and TImode */
1309 10, /* MMX or SSE register to integer */
1310 8, /* size of l1 cache. */
1311 256, /* size of l2 cache. */
1312 64, /* size of prefetch block */
1313 6, /* number of parallel prefetches */
1314 2, /* Branch cost */
1315 COSTS_N_INSNS (5), /* cost of FADD and FSUB insns. */
1316 COSTS_N_INSNS (7), /* cost of FMUL instruction. */
1317 COSTS_N_INSNS (43), /* cost of FDIV instruction. */
1318 COSTS_N_INSNS (2), /* cost of FABS instruction. */
1319 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
1320 COSTS_N_INSNS (43), /* cost of FSQRT instruction. */
1321 {{libcall
, {{12, loop_1_byte
, false}, {-1, rep_prefix_4_byte
, false}}},
1322 DUMMY_STRINGOP_ALGS
},
1323 {{libcall
, {{6, loop_1_byte
, false}, {48, loop
, false},
1324 {20480, rep_prefix_4_byte
, false}, {-1, libcall
, false}}},
1325 DUMMY_STRINGOP_ALGS
},
1326 1, /* scalar_stmt_cost. */
1327 1, /* scalar load_cost. */
1328 1, /* scalar_store_cost. */
1329 1, /* vec_stmt_cost. */
1330 1, /* vec_to_scalar_cost. */
1331 1, /* scalar_to_vec_cost. */
1332 1, /* vec_align_load_cost. */
1333 2, /* vec_unalign_load_cost. */
1334 1, /* vec_store_cost. */
1335 3, /* cond_taken_branch_cost. */
1336 1, /* cond_not_taken_branch_cost. */
1340 struct processor_costs nocona_cost
= {
1341 COSTS_N_INSNS (1), /* cost of an add instruction */
1342 COSTS_N_INSNS (1), /* cost of a lea instruction */
1343 COSTS_N_INSNS (1), /* variable shift costs */
1344 COSTS_N_INSNS (1), /* constant shift costs */
1345 {COSTS_N_INSNS (10), /* cost of starting multiply for QI */
1346 COSTS_N_INSNS (10), /* HI */
1347 COSTS_N_INSNS (10), /* SI */
1348 COSTS_N_INSNS (10), /* DI */
1349 COSTS_N_INSNS (10)}, /* other */
1350 0, /* cost of multiply per each bit set */
1351 {COSTS_N_INSNS (66), /* cost of a divide/mod for QI */
1352 COSTS_N_INSNS (66), /* HI */
1353 COSTS_N_INSNS (66), /* SI */
1354 COSTS_N_INSNS (66), /* DI */
1355 COSTS_N_INSNS (66)}, /* other */
1356 COSTS_N_INSNS (1), /* cost of movsx */
1357 COSTS_N_INSNS (1), /* cost of movzx */
1358 16, /* "large" insn */
1359 17, /* MOVE_RATIO */
1360 4, /* cost for loading QImode using movzbl */
1361 {4, 4, 4}, /* cost of loading integer registers
1362 in QImode, HImode and SImode.
1363 Relative to reg-reg move (2). */
1364 {4, 4, 4}, /* cost of storing integer registers */
1365 3, /* cost of reg,reg fld/fst */
1366 {12, 12, 12}, /* cost of loading fp registers
1367 in SFmode, DFmode and XFmode */
1368 {4, 4, 4}, /* cost of storing fp registers
1369 in SFmode, DFmode and XFmode */
1370 6, /* cost of moving MMX register */
1371 {12, 12}, /* cost of loading MMX registers
1372 in SImode and DImode */
1373 {12, 12}, /* cost of storing MMX registers
1374 in SImode and DImode */
1375 6, /* cost of moving SSE register */
1376 {12, 12, 12}, /* cost of loading SSE registers
1377 in SImode, DImode and TImode */
1378 {12, 12, 12}, /* cost of storing SSE registers
1379 in SImode, DImode and TImode */
1380 8, /* MMX or SSE register to integer */
1381 8, /* size of l1 cache. */
1382 1024, /* size of l2 cache. */
1383 128, /* size of prefetch block */
1384 8, /* number of parallel prefetches */
1385 1, /* Branch cost */
1386 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
1387 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1388 COSTS_N_INSNS (40), /* cost of FDIV instruction. */
1389 COSTS_N_INSNS (3), /* cost of FABS instruction. */
1390 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
1391 COSTS_N_INSNS (44), /* cost of FSQRT instruction. */
1392 {{libcall
, {{12, loop_1_byte
, false}, {-1, rep_prefix_4_byte
, false}}},
1393 {libcall
, {{32, loop
, false}, {20000, rep_prefix_8_byte
, false},
1394 {100000, unrolled_loop
, false}, {-1, libcall
, false}}}},
1395 {{libcall
, {{6, loop_1_byte
, false}, {48, loop
, false},
1396 {20480, rep_prefix_4_byte
, false}, {-1, libcall
, false}}},
1397 {libcall
, {{24, loop
, false}, {64, unrolled_loop
, false},
1398 {8192, rep_prefix_8_byte
, false}, {-1, libcall
, false}}}},
1399 1, /* scalar_stmt_cost. */
1400 1, /* scalar load_cost. */
1401 1, /* scalar_store_cost. */
1402 1, /* vec_stmt_cost. */
1403 1, /* vec_to_scalar_cost. */
1404 1, /* scalar_to_vec_cost. */
1405 1, /* vec_align_load_cost. */
1406 2, /* vec_unalign_load_cost. */
1407 1, /* vec_store_cost. */
1408 3, /* cond_taken_branch_cost. */
1409 1, /* cond_not_taken_branch_cost. */
1413 struct processor_costs atom_cost
= {
1414 COSTS_N_INSNS (1), /* cost of an add instruction */
1415 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1416 COSTS_N_INSNS (1), /* variable shift costs */
1417 COSTS_N_INSNS (1), /* constant shift costs */
1418 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1419 COSTS_N_INSNS (4), /* HI */
1420 COSTS_N_INSNS (3), /* SI */
1421 COSTS_N_INSNS (4), /* DI */
1422 COSTS_N_INSNS (2)}, /* other */
1423 0, /* cost of multiply per each bit set */
1424 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1425 COSTS_N_INSNS (26), /* HI */
1426 COSTS_N_INSNS (42), /* SI */
1427 COSTS_N_INSNS (74), /* DI */
1428 COSTS_N_INSNS (74)}, /* other */
1429 COSTS_N_INSNS (1), /* cost of movsx */
1430 COSTS_N_INSNS (1), /* cost of movzx */
1431 8, /* "large" insn */
1432 17, /* MOVE_RATIO */
1433 4, /* cost for loading QImode using movzbl */
1434 {4, 4, 4}, /* cost of loading integer registers
1435 in QImode, HImode and SImode.
1436 Relative to reg-reg move (2). */
1437 {4, 4, 4}, /* cost of storing integer registers */
1438 4, /* cost of reg,reg fld/fst */
1439 {12, 12, 12}, /* cost of loading fp registers
1440 in SFmode, DFmode and XFmode */
1441 {6, 6, 8}, /* cost of storing fp registers
1442 in SFmode, DFmode and XFmode */
1443 2, /* cost of moving MMX register */
1444 {8, 8}, /* cost of loading MMX registers
1445 in SImode and DImode */
1446 {8, 8}, /* cost of storing MMX registers
1447 in SImode and DImode */
1448 2, /* cost of moving SSE register */
1449 {8, 8, 8}, /* cost of loading SSE registers
1450 in SImode, DImode and TImode */
1451 {8, 8, 8}, /* cost of storing SSE registers
1452 in SImode, DImode and TImode */
1453 5, /* MMX or SSE register to integer */
1454 32, /* size of l1 cache. */
1455 256, /* size of l2 cache. */
1456 64, /* size of prefetch block */
1457 6, /* number of parallel prefetches */
1458 3, /* Branch cost */
1459 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1460 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1461 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1462 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1463 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1464 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1465 {{libcall
, {{11, loop
, false}, {-1, rep_prefix_4_byte
, false}}},
1466 {libcall
, {{32, loop
, false}, {64, rep_prefix_4_byte
, false},
1467 {8192, rep_prefix_8_byte
, false}, {-1, libcall
, false}}}},
1468 {{libcall
, {{8, loop
, false}, {15, unrolled_loop
, false},
1469 {2048, rep_prefix_4_byte
, false}, {-1, libcall
, false}}},
1470 {libcall
, {{24, loop
, false}, {32, unrolled_loop
, false},
1471 {8192, rep_prefix_8_byte
, false}, {-1, libcall
, false}}}},
1472 1, /* scalar_stmt_cost. */
1473 1, /* scalar load_cost. */
1474 1, /* scalar_store_cost. */
1475 1, /* vec_stmt_cost. */
1476 1, /* vec_to_scalar_cost. */
1477 1, /* scalar_to_vec_cost. */
1478 1, /* vec_align_load_cost. */
1479 2, /* vec_unalign_load_cost. */
1480 1, /* vec_store_cost. */
1481 3, /* cond_taken_branch_cost. */
1482 1, /* cond_not_taken_branch_cost. */
1485 /* Generic64 should produce code tuned for Nocona and K8. */
1487 struct processor_costs generic64_cost
= {
1488 COSTS_N_INSNS (1), /* cost of an add instruction */
1489 /* On all chips taken into consideration lea is 2 cycles and more. With
1490 this cost however our current implementation of synth_mult results in
1491 use of unnecessary temporary registers causing regression on several
1492 SPECfp benchmarks. */
1493 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1494 COSTS_N_INSNS (1), /* variable shift costs */
1495 COSTS_N_INSNS (1), /* constant shift costs */
1496 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1497 COSTS_N_INSNS (4), /* HI */
1498 COSTS_N_INSNS (3), /* SI */
1499 COSTS_N_INSNS (4), /* DI */
1500 COSTS_N_INSNS (2)}, /* other */
1501 0, /* cost of multiply per each bit set */
1502 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1503 COSTS_N_INSNS (26), /* HI */
1504 COSTS_N_INSNS (42), /* SI */
1505 COSTS_N_INSNS (74), /* DI */
1506 COSTS_N_INSNS (74)}, /* other */
1507 COSTS_N_INSNS (1), /* cost of movsx */
1508 COSTS_N_INSNS (1), /* cost of movzx */
1509 8, /* "large" insn */
1510 17, /* MOVE_RATIO */
1511 4, /* cost for loading QImode using movzbl */
1512 {4, 4, 4}, /* cost of loading integer registers
1513 in QImode, HImode and SImode.
1514 Relative to reg-reg move (2). */
1515 {4, 4, 4}, /* cost of storing integer registers */
1516 4, /* cost of reg,reg fld/fst */
1517 {12, 12, 12}, /* cost of loading fp registers
1518 in SFmode, DFmode and XFmode */
1519 {6, 6, 8}, /* cost of storing fp registers
1520 in SFmode, DFmode and XFmode */
1521 2, /* cost of moving MMX register */
1522 {8, 8}, /* cost of loading MMX registers
1523 in SImode and DImode */
1524 {8, 8}, /* cost of storing MMX registers
1525 in SImode and DImode */
1526 2, /* cost of moving SSE register */
1527 {8, 8, 8}, /* cost of loading SSE registers
1528 in SImode, DImode and TImode */
1529 {8, 8, 8}, /* cost of storing SSE registers
1530 in SImode, DImode and TImode */
1531 5, /* MMX or SSE register to integer */
1532 32, /* size of l1 cache. */
1533 512, /* size of l2 cache. */
1534 64, /* size of prefetch block */
1535 6, /* number of parallel prefetches */
1536 /* Benchmarks shows large regressions on K8 sixtrack benchmark when this
1537 value is increased to perhaps more appropriate value of 5. */
1538 3, /* Branch cost */
1539 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1540 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1541 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1542 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1543 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1544 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1545 {DUMMY_STRINGOP_ALGS
,
1546 {libcall
, {{32, loop
, false}, {8192, rep_prefix_8_byte
, false},
1547 {-1, libcall
, false}}}},
1548 {DUMMY_STRINGOP_ALGS
,
1549 {libcall
, {{32, loop
, false}, {8192, rep_prefix_8_byte
, false},
1550 {-1, libcall
, false}}}},
1551 1, /* scalar_stmt_cost. */
1552 1, /* scalar load_cost. */
1553 1, /* scalar_store_cost. */
1554 1, /* vec_stmt_cost. */
1555 1, /* vec_to_scalar_cost. */
1556 1, /* scalar_to_vec_cost. */
1557 1, /* vec_align_load_cost. */
1558 2, /* vec_unalign_load_cost. */
1559 1, /* vec_store_cost. */
1560 3, /* cond_taken_branch_cost. */
1561 1, /* cond_not_taken_branch_cost. */
1564 /* core_cost should produce code tuned for Core familly of CPUs. */
1566 struct processor_costs core_cost
= {
1567 COSTS_N_INSNS (1), /* cost of an add instruction */
1568 /* On all chips taken into consideration lea is 2 cycles and more. With
1569 this cost however our current implementation of synth_mult results in
1570 use of unnecessary temporary registers causing regression on several
1571 SPECfp benchmarks. */
1572 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1573 COSTS_N_INSNS (1), /* variable shift costs */
1574 COSTS_N_INSNS (1), /* constant shift costs */
1575 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1576 COSTS_N_INSNS (4), /* HI */
1577 COSTS_N_INSNS (3), /* SI */
1578 COSTS_N_INSNS (4), /* DI */
1579 COSTS_N_INSNS (2)}, /* other */
1580 0, /* cost of multiply per each bit set */
1581 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1582 COSTS_N_INSNS (26), /* HI */
1583 COSTS_N_INSNS (42), /* SI */
1584 COSTS_N_INSNS (74), /* DI */
1585 COSTS_N_INSNS (74)}, /* other */
1586 COSTS_N_INSNS (1), /* cost of movsx */
1587 COSTS_N_INSNS (1), /* cost of movzx */
1588 8, /* "large" insn */
1589 17, /* MOVE_RATIO */
1590 4, /* cost for loading QImode using movzbl */
1591 {4, 4, 4}, /* cost of loading integer registers
1592 in QImode, HImode and SImode.
1593 Relative to reg-reg move (2). */
1594 {4, 4, 4}, /* cost of storing integer registers */
1595 4, /* cost of reg,reg fld/fst */
1596 {12, 12, 12}, /* cost of loading fp registers
1597 in SFmode, DFmode and XFmode */
1598 {6, 6, 8}, /* cost of storing fp registers
1599 in SFmode, DFmode and XFmode */
1600 2, /* cost of moving MMX register */
1601 {8, 8}, /* cost of loading MMX registers
1602 in SImode and DImode */
1603 {8, 8}, /* cost of storing MMX registers
1604 in SImode and DImode */
1605 2, /* cost of moving SSE register */
1606 {8, 8, 8}, /* cost of loading SSE registers
1607 in SImode, DImode and TImode */
1608 {8, 8, 8}, /* cost of storing SSE registers
1609 in SImode, DImode and TImode */
1610 5, /* MMX or SSE register to integer */
1611 64, /* size of l1 cache. */
1612 512, /* size of l2 cache. */
1613 64, /* size of prefetch block */
1614 6, /* number of parallel prefetches */
1615 /* FIXME perhaps more appropriate value is 5. */
1616 3, /* Branch cost */
1617 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1618 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1619 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1620 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1621 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1622 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1623 {{libcall
, {{1024, rep_prefix_4_byte
, true}, {-1, libcall
, false}}},
1624 {libcall
, {{24, loop
, true}, {128, rep_prefix_8_byte
, true},
1625 {-1, libcall
, false}}}},
1626 {{libcall
, {{6, loop_1_byte
, true},
1628 {8192, rep_prefix_4_byte
, true},
1629 {-1, libcall
, false}}},
1630 {libcall
, {{24, loop
, true}, {512, rep_prefix_8_byte
, true},
1631 {-1, libcall
, false}}}},
1632 1, /* scalar_stmt_cost. */
1633 1, /* scalar load_cost. */
1634 1, /* scalar_store_cost. */
1635 1, /* vec_stmt_cost. */
1636 1, /* vec_to_scalar_cost. */
1637 1, /* scalar_to_vec_cost. */
1638 1, /* vec_align_load_cost. */
1639 2, /* vec_unalign_load_cost. */
1640 1, /* vec_store_cost. */
1641 3, /* cond_taken_branch_cost. */
1642 1, /* cond_not_taken_branch_cost. */
1645 /* Generic32 should produce code tuned for PPro, Pentium4, Nocona,
1648 struct processor_costs generic32_cost
= {
1649 COSTS_N_INSNS (1), /* cost of an add instruction */
1650 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1651 COSTS_N_INSNS (1), /* variable shift costs */
1652 COSTS_N_INSNS (1), /* constant shift costs */
1653 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1654 COSTS_N_INSNS (4), /* HI */
1655 COSTS_N_INSNS (3), /* SI */
1656 COSTS_N_INSNS (4), /* DI */
1657 COSTS_N_INSNS (2)}, /* other */
1658 0, /* cost of multiply per each bit set */
1659 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1660 COSTS_N_INSNS (26), /* HI */
1661 COSTS_N_INSNS (42), /* SI */
1662 COSTS_N_INSNS (74), /* DI */
1663 COSTS_N_INSNS (74)}, /* other */
1664 COSTS_N_INSNS (1), /* cost of movsx */
1665 COSTS_N_INSNS (1), /* cost of movzx */
1666 8, /* "large" insn */
1667 17, /* MOVE_RATIO */
1668 4, /* cost for loading QImode using movzbl */
1669 {4, 4, 4}, /* cost of loading integer registers
1670 in QImode, HImode and SImode.
1671 Relative to reg-reg move (2). */
1672 {4, 4, 4}, /* cost of storing integer registers */
1673 4, /* cost of reg,reg fld/fst */
1674 {12, 12, 12}, /* cost of loading fp registers
1675 in SFmode, DFmode and XFmode */
1676 {6, 6, 8}, /* cost of storing fp registers
1677 in SFmode, DFmode and XFmode */
1678 2, /* cost of moving MMX register */
1679 {8, 8}, /* cost of loading MMX registers
1680 in SImode and DImode */
1681 {8, 8}, /* cost of storing MMX registers
1682 in SImode and DImode */
1683 2, /* cost of moving SSE register */
1684 {8, 8, 8}, /* cost of loading SSE registers
1685 in SImode, DImode and TImode */
1686 {8, 8, 8}, /* cost of storing SSE registers
1687 in SImode, DImode and TImode */
1688 5, /* MMX or SSE register to integer */
1689 32, /* size of l1 cache. */
1690 256, /* size of l2 cache. */
1691 64, /* size of prefetch block */
1692 6, /* number of parallel prefetches */
1693 3, /* Branch cost */
1694 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1695 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1696 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1697 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1698 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1699 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1700 {{libcall
, {{32, loop
, false}, {8192, rep_prefix_4_byte
, false},
1701 {-1, libcall
, false}}},
1702 DUMMY_STRINGOP_ALGS
},
1703 {{libcall
, {{32, loop
, false}, {8192, rep_prefix_4_byte
, false},
1704 {-1, libcall
, false}}},
1705 DUMMY_STRINGOP_ALGS
},
1706 1, /* scalar_stmt_cost. */
1707 1, /* scalar load_cost. */
1708 1, /* scalar_store_cost. */
1709 1, /* vec_stmt_cost. */
1710 1, /* vec_to_scalar_cost. */
1711 1, /* scalar_to_vec_cost. */
1712 1, /* vec_align_load_cost. */
1713 2, /* vec_unalign_load_cost. */
1714 1, /* vec_store_cost. */
1715 3, /* cond_taken_branch_cost. */
1716 1, /* cond_not_taken_branch_cost. */
1719 /* Set by -mtune. */
1720 const struct processor_costs
*ix86_tune_cost
= &pentium_cost
;
1722 /* Set by -mtune or -Os. */
1723 const struct processor_costs
*ix86_cost
= &pentium_cost
;
1725 /* Processor feature/optimization bitmasks. */
1726 #define m_386 (1<<PROCESSOR_I386)
1727 #define m_486 (1<<PROCESSOR_I486)
1728 #define m_PENT (1<<PROCESSOR_PENTIUM)
1729 #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
1730 #define m_PENT4 (1<<PROCESSOR_PENTIUM4)
1731 #define m_NOCONA (1<<PROCESSOR_NOCONA)
1732 #define m_P4_NOCONA (m_PENT4 | m_NOCONA)
1733 #define m_CORE2 (1<<PROCESSOR_CORE2)
1734 #define m_COREI7 (1<<PROCESSOR_COREI7)
1735 #define m_CORE2I7 (m_CORE2 | m_COREI7)
1736 #define m_ATOM (1<<PROCESSOR_ATOM)
1738 #define m_GEODE (1<<PROCESSOR_GEODE)
1739 #define m_K6 (1<<PROCESSOR_K6)
1740 #define m_K6_GEODE (m_K6 | m_GEODE)
1741 #define m_K8 (1<<PROCESSOR_K8)
1742 #define m_ATHLON (1<<PROCESSOR_ATHLON)
1743 #define m_ATHLON_K8 (m_K8 | m_ATHLON)
1744 #define m_AMDFAM10 (1<<PROCESSOR_AMDFAM10)
1745 #define m_BDVER1 (1<<PROCESSOR_BDVER1)
1746 #define m_BDVER2 (1<<PROCESSOR_BDVER2)
1747 #define m_BDVER3 (1<<PROCESSOR_BDVER3)
1748 #define m_BDVER (m_BDVER1 | m_BDVER2 | m_BDVER3)
1749 #define m_BTVER (m_BTVER1 | m_BTVER2)
1750 #define m_BTVER1 (1<<PROCESSOR_BTVER1)
1751 #define m_BTVER2 (1<<PROCESSOR_BTVER2)
1752 #define m_AMD_MULTIPLE (m_ATHLON_K8 | m_AMDFAM10 | m_BDVER | m_BTVER)
1754 #define m_GENERIC32 (1<<PROCESSOR_GENERIC32)
1755 #define m_GENERIC64 (1<<PROCESSOR_GENERIC64)
1757 /* Generic instruction choice should be common subset of supported CPUs
1758 (PPro/PENT4/NOCONA/CORE2/Athlon/K8). */
1759 #define m_GENERIC (m_GENERIC32 | m_GENERIC64)
1761 /* Feature tests against the various tunings. */
1762 unsigned char ix86_tune_features
[X86_TUNE_LAST
];
1764 /* Feature tests against the various tunings used to create ix86_tune_features
1765 based on the processor mask. */
1766 static unsigned int initial_ix86_tune_features
[X86_TUNE_LAST
] = {
1767 /* X86_TUNE_USE_LEAVE: Leave does not affect Nocona SPEC2000 results
1768 negatively, so enabling for Generic64 seems like good code size
1769 tradeoff. We can't enable it for 32bit generic because it does not
1770 work well with PPro base chips. */
1771 m_386
| m_CORE2I7
| m_K6_GEODE
| m_AMD_MULTIPLE
| m_GENERIC64
,
1773 /* X86_TUNE_PUSH_MEMORY */
1774 m_386
| m_P4_NOCONA
| m_CORE2I7
| m_K6_GEODE
| m_AMD_MULTIPLE
| m_GENERIC
,
1776 /* X86_TUNE_ZERO_EXTEND_WITH_AND */
1779 /* X86_TUNE_UNROLL_STRLEN */
1780 m_486
| m_PENT
| m_PPRO
| m_ATOM
| m_CORE2I7
| m_K6
| m_AMD_MULTIPLE
| m_GENERIC
,
1782 /* X86_TUNE_BRANCH_PREDICTION_HINTS: Branch hints were put in P4 based
1783 on simulation result. But after P4 was made, no performance benefit
1784 was observed with branch hints. It also increases the code size.
1785 As a result, icc never generates branch hints. */
1788 /* X86_TUNE_DOUBLE_WITH_ADD */
1791 /* X86_TUNE_USE_SAHF */
1792 m_PPRO
| m_P4_NOCONA
| m_CORE2I7
| m_ATOM
| m_K6_GEODE
| m_K8
| m_AMDFAM10
| m_BDVER
| m_BTVER
| m_GENERIC
,
1794 /* X86_TUNE_MOVX: Enable to zero extend integer registers to avoid
1795 partial dependencies. */
1796 m_PPRO
| m_P4_NOCONA
| m_CORE2I7
| m_ATOM
| m_GEODE
| m_AMD_MULTIPLE
| m_GENERIC
,
1798 /* X86_TUNE_PARTIAL_REG_STALL: We probably ought to watch for partial
1799 register stalls on Generic32 compilation setting as well. However
1800 in current implementation the partial register stalls are not eliminated
1801 very well - they can be introduced via subregs synthesized by combine
1802 and can happen in caller/callee saving sequences. Because this option
1803 pays back little on PPro based chips and is in conflict with partial reg
1804 dependencies used by Athlon/P4 based chips, it is better to leave it off
1805 for generic32 for now. */
1808 /* X86_TUNE_PARTIAL_FLAG_REG_STALL */
1809 m_CORE2I7
| m_GENERIC
,
1811 /* X86_TUNE_LCP_STALL: Avoid an expensive length-changing prefix stall
1812 * on 16-bit immediate moves into memory on Core2 and Corei7. */
1813 m_CORE2I7
| m_GENERIC
,
1815 /* X86_TUNE_USE_HIMODE_FIOP */
1816 m_386
| m_486
| m_K6_GEODE
,
1818 /* X86_TUNE_USE_SIMODE_FIOP */
1819 ~(m_PENT
| m_PPRO
| m_CORE2I7
| m_ATOM
| m_AMD_MULTIPLE
| m_GENERIC
),
1821 /* X86_TUNE_USE_MOV0 */
1824 /* X86_TUNE_USE_CLTD */
1825 ~(m_PENT
| m_ATOM
| m_K6
),
1827 /* X86_TUNE_USE_XCHGB: Use xchgb %rh,%rl instead of rolw/rorw $8,rx. */
1830 /* X86_TUNE_SPLIT_LONG_MOVES */
1833 /* X86_TUNE_READ_MODIFY_WRITE */
1836 /* X86_TUNE_READ_MODIFY */
1839 /* X86_TUNE_PROMOTE_QIMODE */
1840 m_386
| m_486
| m_PENT
| m_CORE2I7
| m_ATOM
| m_K6_GEODE
| m_AMD_MULTIPLE
| m_GENERIC
,
1842 /* X86_TUNE_FAST_PREFIX */
1843 ~(m_386
| m_486
| m_PENT
),
1845 /* X86_TUNE_SINGLE_STRINGOP */
1846 m_386
| m_P4_NOCONA
,
1848 /* X86_TUNE_QIMODE_MATH */
1851 /* X86_TUNE_HIMODE_MATH: On PPro this flag is meant to avoid partial
1852 register stalls. Just like X86_TUNE_PARTIAL_REG_STALL this option
1853 might be considered for Generic32 if our scheme for avoiding partial
1854 stalls was more effective. */
1857 /* X86_TUNE_PROMOTE_QI_REGS */
1860 /* X86_TUNE_PROMOTE_HI_REGS */
1863 /* X86_TUNE_SINGLE_POP: Enable if single pop insn is preferred
1864 over esp addition. */
1865 m_386
| m_486
| m_PENT
| m_PPRO
,
1867 /* X86_TUNE_DOUBLE_POP: Enable if double pop insn is preferred
1868 over esp addition. */
1871 /* X86_TUNE_SINGLE_PUSH: Enable if single push insn is preferred
1872 over esp subtraction. */
1873 m_386
| m_486
| m_PENT
| m_K6_GEODE
,
1875 /* X86_TUNE_DOUBLE_PUSH. Enable if double push insn is preferred
1876 over esp subtraction. */
1877 m_PENT
| m_K6_GEODE
,
1879 /* X86_TUNE_INTEGER_DFMODE_MOVES: Enable if integer moves are preferred
1880 for DFmode copies */
1881 ~(m_PPRO
| m_P4_NOCONA
| m_CORE2I7
| m_ATOM
| m_GEODE
| m_AMD_MULTIPLE
| m_ATOM
| m_GENERIC
),
1883 /* X86_TUNE_PARTIAL_REG_DEPENDENCY */
1884 m_P4_NOCONA
| m_CORE2I7
| m_ATOM
| m_AMD_MULTIPLE
| m_GENERIC
,
1886 /* X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY: In the Generic model we have a
1887 conflict here in between PPro/Pentium4 based chips that thread 128bit
1888 SSE registers as single units versus K8 based chips that divide SSE
1889 registers to two 64bit halves. This knob promotes all store destinations
1890 to be 128bit to allow register renaming on 128bit SSE units, but usually
1891 results in one extra microop on 64bit SSE units. Experimental results
1892 shows that disabling this option on P4 brings over 20% SPECfp regression,
1893 while enabling it on K8 brings roughly 2.4% regression that can be partly
1894 masked by careful scheduling of moves. */
1895 m_PPRO
| m_P4_NOCONA
| m_CORE2I7
| m_ATOM
| m_AMDFAM10
| m_BDVER
| m_GENERIC
,
1897 /* X86_TUNE_SSE_UNALIGNED_LOAD_OPTIMAL */
1898 m_COREI7
| m_AMDFAM10
| m_BDVER
| m_BTVER
,
1900 /* X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL */
1903 /* X86_TUNE_SSE_PACKED_SINGLE_INSN_OPTIMAL */
1906 /* X86_TUNE_SSE_SPLIT_REGS: Set for machines where the type and dependencies
1907 are resolved on SSE register parts instead of whole registers, so we may
1908 maintain just lower part of scalar values in proper format leaving the
1909 upper part undefined. */
1912 /* X86_TUNE_SSE_TYPELESS_STORES */
1915 /* X86_TUNE_SSE_LOAD0_BY_PXOR */
1916 m_PPRO
| m_P4_NOCONA
,
1918 /* X86_TUNE_MEMORY_MISMATCH_STALL */
1919 m_P4_NOCONA
| m_CORE2I7
| m_ATOM
| m_AMD_MULTIPLE
| m_GENERIC
,
1921 /* X86_TUNE_PROLOGUE_USING_MOVE */
1922 m_PPRO
| m_ATHLON_K8
,
1924 /* X86_TUNE_EPILOGUE_USING_MOVE */
1925 m_PPRO
| m_ATHLON_K8
,
1927 /* X86_TUNE_SHIFT1 */
1930 /* X86_TUNE_USE_FFREEP */
1933 /* X86_TUNE_INTER_UNIT_MOVES */
1934 ~(m_AMD_MULTIPLE
| m_GENERIC
),
1936 /* X86_TUNE_INTER_UNIT_CONVERSIONS */
1937 ~(m_AMDFAM10
| m_BDVER
),
1939 /* X86_TUNE_FOUR_JUMP_LIMIT: Some CPU cores are not able to predict more
1940 than 4 branch instructions in the 16 byte window. */
1941 m_PPRO
| m_P4_NOCONA
| m_CORE2I7
| m_ATOM
| m_AMD_MULTIPLE
| m_GENERIC
,
1943 /* X86_TUNE_SCHEDULE */
1944 m_PENT
| m_PPRO
| m_CORE2I7
| m_ATOM
| m_K6_GEODE
| m_AMD_MULTIPLE
| m_GENERIC
,
1946 /* X86_TUNE_USE_BT */
1947 m_CORE2I7
| m_ATOM
| m_AMD_MULTIPLE
| m_GENERIC
,
1949 /* X86_TUNE_USE_INCDEC */
1950 ~(m_P4_NOCONA
| m_CORE2I7
| m_ATOM
| m_GENERIC
),
1952 /* X86_TUNE_PAD_RETURNS */
1953 m_CORE2I7
| m_AMD_MULTIPLE
| m_GENERIC
,
1955 /* X86_TUNE_PAD_SHORT_FUNCTION: Pad short funtion. */
1958 /* X86_TUNE_EXT_80387_CONSTANTS */
1959 m_PPRO
| m_P4_NOCONA
| m_CORE2I7
| m_ATOM
| m_K6_GEODE
| m_ATHLON_K8
| m_GENERIC
,
1961 /* X86_TUNE_AVOID_VECTOR_DECODE */
1962 m_CORE2I7
| m_K8
| m_GENERIC64
,
1964 /* X86_TUNE_PROMOTE_HIMODE_IMUL: Modern CPUs have same latency for HImode
1965 and SImode multiply, but 386 and 486 do HImode multiply faster. */
1968 /* X86_TUNE_SLOW_IMUL_IMM32_MEM: Imul of 32-bit constant and memory is
1969 vector path on AMD machines. */
1970 m_CORE2I7
| m_K8
| m_AMDFAM10
| m_BDVER
| m_BTVER
| m_GENERIC64
,
1972 /* X86_TUNE_SLOW_IMUL_IMM8: Imul of 8-bit constant is vector path on AMD
1974 m_CORE2I7
| m_K8
| m_AMDFAM10
| m_BDVER
| m_BTVER
| m_GENERIC64
,
1976 /* X86_TUNE_MOVE_M1_VIA_OR: On pentiums, it is faster to load -1 via OR
1980 /* X86_TUNE_NOT_UNPAIRABLE: NOT is not pairable on Pentium, while XOR is,
1981 but one byte longer. */
1984 /* X86_TUNE_NOT_VECTORMODE: On AMD K6, NOT is vector decoded with memory
1985 operand that cannot be represented using a modRM byte. The XOR
1986 replacement is long decoded, so this split helps here as well. */
1989 /* X86_TUNE_USE_VECTOR_FP_CONVERTS: Prefer vector packed SSE conversion
1991 m_CORE2I7
| m_AMDFAM10
| m_GENERIC
,
1993 /* X86_TUNE_USE_VECTOR_CONVERTS: Prefer vector packed SSE conversion
1994 from integer to FP. */
1997 /* X86_TUNE_FUSE_CMP_AND_BRANCH: Fuse a compare or test instruction
1998 with a subsequent conditional jump instruction into a single
1999 compare-and-branch uop. */
2002 /* X86_TUNE_OPT_AGU: Optimize for Address Generation Unit. This flag
2003 will impact LEA instruction selection. */
2006 /* X86_TUNE_VECTORIZE_DOUBLE: Enable double precision vector
2010 /* X86_SOFTARE_PREFETCHING_BENEFICIAL: Enable software prefetching
2011 at -O3. For the moment, the prefetching seems badly tuned for Intel
2013 m_K6_GEODE
| m_AMD_MULTIPLE
,
2015 /* X86_TUNE_AVX128_OPTIMAL: Enable 128-bit AVX instruction generation for
2016 the auto-vectorizer. */
2019 /* X86_TUNE_REASSOC_INT_TO_PARALLEL: Try to produce parallel computations
2020 during reassociation of integer computation. */
2023 /* X86_TUNE_REASSOC_FP_TO_PARALLEL: Try to produce parallel computations
2024 during reassociation of fp computation. */
2027 /* X86_TUNE_GENERAL_REGS_SSE_SPILL: Try to spill general regs to SSE
2028 regs instead of memory. */
2029 m_COREI7
| m_CORE2I7
2032 /* Feature tests against the various architecture variations. */
2033 unsigned char ix86_arch_features
[X86_ARCH_LAST
];
2035 /* Feature tests against the various architecture variations, used to create
2036 ix86_arch_features based on the processor mask. */
2037 static unsigned int initial_ix86_arch_features
[X86_ARCH_LAST
] = {
2038 /* X86_ARCH_CMOV: Conditional move was added for pentiumpro. */
2039 ~(m_386
| m_486
| m_PENT
| m_K6
),
2041 /* X86_ARCH_CMPXCHG: Compare and exchange was added for 80486. */
2044 /* X86_ARCH_CMPXCHG8B: Compare and exchange 8 bytes was added for pentium. */
2047 /* X86_ARCH_XADD: Exchange and add was added for 80486. */
2050 /* X86_ARCH_BSWAP: Byteswap was added for 80486. */
2054 static const unsigned int x86_accumulate_outgoing_args
2055 = m_PPRO
| m_P4_NOCONA
| m_ATOM
| m_CORE2I7
| m_AMD_MULTIPLE
| m_GENERIC
;
2057 static const unsigned int x86_arch_always_fancy_math_387
2058 = m_PENT
| m_PPRO
| m_P4_NOCONA
| m_CORE2I7
| m_ATOM
| m_AMD_MULTIPLE
| m_GENERIC
;
2060 static const unsigned int x86_avx256_split_unaligned_load
2061 = m_COREI7
| m_GENERIC
;
2063 static const unsigned int x86_avx256_split_unaligned_store
2064 = m_COREI7
| m_BDVER
| m_GENERIC
;
2066 /* In case the average insn count for single function invocation is
2067 lower than this constant, emit fast (but longer) prologue and
2069 #define FAST_PROLOGUE_INSN_COUNT 20
2071 /* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
2072 static const char *const qi_reg_name
[] = QI_REGISTER_NAMES
;
2073 static const char *const qi_high_reg_name
[] = QI_HIGH_REGISTER_NAMES
;
2074 static const char *const hi_reg_name
[] = HI_REGISTER_NAMES
;
2076 /* Array of the smallest class containing reg number REGNO, indexed by
2077 REGNO. Used by REGNO_REG_CLASS in i386.h. */
2079 enum reg_class
const regclass_map
[FIRST_PSEUDO_REGISTER
] =
2081 /* ax, dx, cx, bx */
2082 AREG
, DREG
, CREG
, BREG
,
2083 /* si, di, bp, sp */
2084 SIREG
, DIREG
, NON_Q_REGS
, NON_Q_REGS
,
2086 FP_TOP_REG
, FP_SECOND_REG
, FLOAT_REGS
, FLOAT_REGS
,
2087 FLOAT_REGS
, FLOAT_REGS
, FLOAT_REGS
, FLOAT_REGS
,
2090 /* flags, fpsr, fpcr, frame */
2091 NO_REGS
, NO_REGS
, NO_REGS
, NON_Q_REGS
,
2093 SSE_FIRST_REG
, SSE_REGS
, SSE_REGS
, SSE_REGS
, SSE_REGS
, SSE_REGS
,
2096 MMX_REGS
, MMX_REGS
, MMX_REGS
, MMX_REGS
, MMX_REGS
, MMX_REGS
,
2099 NON_Q_REGS
, NON_Q_REGS
, NON_Q_REGS
, NON_Q_REGS
,
2100 NON_Q_REGS
, NON_Q_REGS
, NON_Q_REGS
, NON_Q_REGS
,
2101 /* SSE REX registers */
2102 SSE_REGS
, SSE_REGS
, SSE_REGS
, SSE_REGS
, SSE_REGS
, SSE_REGS
,
2106 /* The "default" register map used in 32bit mode. */
2108 int const dbx_register_map
[FIRST_PSEUDO_REGISTER
] =
2110 0, 2, 1, 3, 6, 7, 4, 5, /* general regs */
2111 12, 13, 14, 15, 16, 17, 18, 19, /* fp regs */
2112 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
2113 21, 22, 23, 24, 25, 26, 27, 28, /* SSE */
2114 29, 30, 31, 32, 33, 34, 35, 36, /* MMX */
2115 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
2116 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
2119 /* The "default" register map used in 64bit mode. */
2121 int const dbx64_register_map
[FIRST_PSEUDO_REGISTER
] =
2123 0, 1, 2, 3, 4, 5, 6, 7, /* general regs */
2124 33, 34, 35, 36, 37, 38, 39, 40, /* fp regs */
2125 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
2126 17, 18, 19, 20, 21, 22, 23, 24, /* SSE */
2127 41, 42, 43, 44, 45, 46, 47, 48, /* MMX */
2128 8,9,10,11,12,13,14,15, /* extended integer registers */
2129 25, 26, 27, 28, 29, 30, 31, 32, /* extended SSE registers */
2132 /* Define the register numbers to be used in Dwarf debugging information.
2133 The SVR4 reference port C compiler uses the following register numbers
2134 in its Dwarf output code:
2135 0 for %eax (gcc regno = 0)
2136 1 for %ecx (gcc regno = 2)
2137 2 for %edx (gcc regno = 1)
2138 3 for %ebx (gcc regno = 3)
2139 4 for %esp (gcc regno = 7)
2140 5 for %ebp (gcc regno = 6)
2141 6 for %esi (gcc regno = 4)
2142 7 for %edi (gcc regno = 5)
2143 The following three DWARF register numbers are never generated by
2144 the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
2145 believes these numbers have these meanings.
2146 8 for %eip (no gcc equivalent)
2147 9 for %eflags (gcc regno = 17)
2148 10 for %trapno (no gcc equivalent)
2149 It is not at all clear how we should number the FP stack registers
2150 for the x86 architecture. If the version of SDB on x86/svr4 were
2151 a bit less brain dead with respect to floating-point then we would
2152 have a precedent to follow with respect to DWARF register numbers
2153 for x86 FP registers, but the SDB on x86/svr4 is so completely
2154 broken with respect to FP registers that it is hardly worth thinking
2155 of it as something to strive for compatibility with.
2156 The version of x86/svr4 SDB I have at the moment does (partially)
2157 seem to believe that DWARF register number 11 is associated with
2158 the x86 register %st(0), but that's about all. Higher DWARF
2159 register numbers don't seem to be associated with anything in
2160 particular, and even for DWARF regno 11, SDB only seems to under-
2161 stand that it should say that a variable lives in %st(0) (when
2162 asked via an `=' command) if we said it was in DWARF regno 11,
2163 but SDB still prints garbage when asked for the value of the
2164 variable in question (via a `/' command).
2165 (Also note that the labels SDB prints for various FP stack regs
2166 when doing an `x' command are all wrong.)
2167 Note that these problems generally don't affect the native SVR4
2168 C compiler because it doesn't allow the use of -O with -g and
2169 because when it is *not* optimizing, it allocates a memory
2170 location for each floating-point variable, and the memory
2171 location is what gets described in the DWARF AT_location
2172 attribute for the variable in question.
2173 Regardless of the severe mental illness of the x86/svr4 SDB, we
2174 do something sensible here and we use the following DWARF
2175 register numbers. Note that these are all stack-top-relative
2177 11 for %st(0) (gcc regno = 8)
2178 12 for %st(1) (gcc regno = 9)
2179 13 for %st(2) (gcc regno = 10)
2180 14 for %st(3) (gcc regno = 11)
2181 15 for %st(4) (gcc regno = 12)
2182 16 for %st(5) (gcc regno = 13)
2183 17 for %st(6) (gcc regno = 14)
2184 18 for %st(7) (gcc regno = 15)
2186 int const svr4_dbx_register_map
[FIRST_PSEUDO_REGISTER
] =
2188 0, 2, 1, 3, 6, 7, 5, 4, /* general regs */
2189 11, 12, 13, 14, 15, 16, 17, 18, /* fp regs */
2190 -1, 9, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
2191 21, 22, 23, 24, 25, 26, 27, 28, /* SSE registers */
2192 29, 30, 31, 32, 33, 34, 35, 36, /* MMX registers */
2193 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
2194 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
2197 /* Define parameter passing and return registers. */
2199 static int const x86_64_int_parameter_registers
[6] =
2201 DI_REG
, SI_REG
, DX_REG
, CX_REG
, R8_REG
, R9_REG
2204 static int const x86_64_ms_abi_int_parameter_registers
[4] =
2206 CX_REG
, DX_REG
, R8_REG
, R9_REG
2209 static int const x86_64_int_return_registers
[4] =
2211 AX_REG
, DX_REG
, DI_REG
, SI_REG
2214 /* Define the structure for the machine field in struct function. */
2216 struct GTY(()) stack_local_entry
{
2217 unsigned short mode
;
2220 struct stack_local_entry
*next
;
2223 /* Structure describing stack frame layout.
2224 Stack grows downward:
2230 saved static chain if ix86_static_chain_on_stack
2232 saved frame pointer if frame_pointer_needed
2233 <- HARD_FRAME_POINTER
2239 <- sse_regs_save_offset
2242 [va_arg registers] |
2246 [padding2] | = to_allocate
2255 int outgoing_arguments_size
;
2257 /* The offsets relative to ARG_POINTER. */
2258 HOST_WIDE_INT frame_pointer_offset
;
2259 HOST_WIDE_INT hard_frame_pointer_offset
;
2260 HOST_WIDE_INT stack_pointer_offset
;
2261 HOST_WIDE_INT hfp_save_offset
;
2262 HOST_WIDE_INT reg_save_offset
;
2263 HOST_WIDE_INT sse_reg_save_offset
;
2265 /* When save_regs_using_mov is set, emit prologue using
2266 move instead of push instructions. */
2267 bool save_regs_using_mov
;
2270 /* Which cpu are we scheduling for. */
2271 enum attr_cpu ix86_schedule
;
2273 /* Which cpu are we optimizing for. */
2274 enum processor_type ix86_tune
;
2276 /* Which instruction set architecture to use. */
2277 enum processor_type ix86_arch
;
2279 /* True if processor has SSE prefetch instruction. */
2280 unsigned char x86_prefetch_sse
;
2282 /* -mstackrealign option */
2283 static const char ix86_force_align_arg_pointer_string
[]
2284 = "force_align_arg_pointer";
2286 static rtx (*ix86_gen_leave
) (void);
2287 static rtx (*ix86_gen_add3
) (rtx
, rtx
, rtx
);
2288 static rtx (*ix86_gen_sub3
) (rtx
, rtx
, rtx
);
2289 static rtx (*ix86_gen_sub3_carry
) (rtx
, rtx
, rtx
, rtx
, rtx
);
2290 static rtx (*ix86_gen_one_cmpl2
) (rtx
, rtx
);
2291 static rtx (*ix86_gen_monitor
) (rtx
, rtx
, rtx
);
2292 static rtx (*ix86_gen_andsp
) (rtx
, rtx
, rtx
);
2293 static rtx (*ix86_gen_allocate_stack_worker
) (rtx
, rtx
);
2294 static rtx (*ix86_gen_adjust_stack_and_probe
) (rtx
, rtx
, rtx
);
2295 static rtx (*ix86_gen_probe_stack_range
) (rtx
, rtx
, rtx
);
2296 static rtx (*ix86_gen_tls_global_dynamic_64
) (rtx
, rtx
, rtx
);
2297 static rtx (*ix86_gen_tls_local_dynamic_base_64
) (rtx
, rtx
);
2299 /* Preferred alignment for stack boundary in bits. */
2300 unsigned int ix86_preferred_stack_boundary
;
2302 /* Alignment for incoming stack boundary in bits specified at
2304 static unsigned int ix86_user_incoming_stack_boundary
;
2306 /* Default alignment for incoming stack boundary in bits. */
2307 static unsigned int ix86_default_incoming_stack_boundary
;
2309 /* Alignment for incoming stack boundary in bits. */
2310 unsigned int ix86_incoming_stack_boundary
;
2312 /* Calling abi specific va_list type nodes. */
2313 static GTY(()) tree sysv_va_list_type_node
;
2314 static GTY(()) tree ms_va_list_type_node
;
2316 /* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */
2317 char internal_label_prefix
[16];
2318 int internal_label_prefix_len
;
2320 /* Fence to use after loop using movnt. */
2323 /* Register class used for passing given 64bit part of the argument.
2324 These represent classes as documented by the PS ABI, with the exception
2325 of SSESF, SSEDF classes, that are basically SSE class, just gcc will
2326 use SF or DFmode move instead of DImode to avoid reformatting penalties.
2328 Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
2329 whenever possible (upper half does contain padding). */
2330 enum x86_64_reg_class
2333 X86_64_INTEGER_CLASS
,
2334 X86_64_INTEGERSI_CLASS
,
2341 X86_64_COMPLEX_X87_CLASS
,
2345 #define MAX_CLASSES 4
2347 /* Table of constants used by fldpi, fldln2, etc.... */
2348 static REAL_VALUE_TYPE ext_80387_constants_table
[5];
2349 static bool ext_80387_constants_init
= 0;
2352 static struct machine_function
* ix86_init_machine_status (void);
2353 static rtx
ix86_function_value (const_tree
, const_tree
, bool);
2354 static bool ix86_function_value_regno_p (const unsigned int);
2355 static unsigned int ix86_function_arg_boundary (enum machine_mode
,
2357 static rtx
ix86_static_chain (const_tree
, bool);
2358 static int ix86_function_regparm (const_tree
, const_tree
);
2359 static void ix86_compute_frame_layout (struct ix86_frame
*);
2360 static bool ix86_expand_vector_init_one_nonzero (bool, enum machine_mode
,
2362 static void ix86_add_new_builtins (HOST_WIDE_INT
);
2363 static tree
ix86_canonical_va_list_type (tree
);
2364 static void predict_jump (int);
2365 static unsigned int split_stack_prologue_scratch_regno (void);
2366 static bool i386_asm_output_addr_const_extra (FILE *, rtx
);
2368 enum ix86_function_specific_strings
2370 IX86_FUNCTION_SPECIFIC_ARCH
,
2371 IX86_FUNCTION_SPECIFIC_TUNE
,
2372 IX86_FUNCTION_SPECIFIC_MAX
2375 static char *ix86_target_string (HOST_WIDE_INT
, int, const char *,
2376 const char *, enum fpmath_unit
, bool);
2377 static void ix86_debug_options (void) ATTRIBUTE_UNUSED
;
2378 static void ix86_function_specific_save (struct cl_target_option
*);
2379 static void ix86_function_specific_restore (struct cl_target_option
*);
2380 static void ix86_function_specific_print (FILE *, int,
2381 struct cl_target_option
*);
2382 static bool ix86_valid_target_attribute_p (tree
, tree
, tree
, int);
2383 static bool ix86_valid_target_attribute_inner_p (tree
, char *[],
2384 struct gcc_options
*);
2385 static bool ix86_can_inline_p (tree
, tree
);
2386 static void ix86_set_current_function (tree
);
2387 static unsigned int ix86_minimum_incoming_stack_boundary (bool);
2389 static enum calling_abi
ix86_function_abi (const_tree
);
2392 #ifndef SUBTARGET32_DEFAULT_CPU
2393 #define SUBTARGET32_DEFAULT_CPU "i386"
2396 /* The svr4 ABI for the i386 says that records and unions are returned
2398 #ifndef DEFAULT_PCC_STRUCT_RETURN
2399 #define DEFAULT_PCC_STRUCT_RETURN 1
2402 /* Whether -mtune= or -march= were specified */
2403 static int ix86_tune_defaulted
;
2404 static int ix86_arch_specified
;
2406 /* Vectorization library interface and handlers. */
2407 static tree (*ix86_veclib_handler
) (enum built_in_function
, tree
, tree
);
2409 static tree
ix86_veclibabi_svml (enum built_in_function
, tree
, tree
);
2410 static tree
ix86_veclibabi_acml (enum built_in_function
, tree
, tree
);
2412 /* Processor target table, indexed by processor number */
2415 const struct processor_costs
*cost
; /* Processor costs */
2416 const int align_loop
; /* Default alignments. */
2417 const int align_loop_max_skip
;
2418 const int align_jump
;
2419 const int align_jump_max_skip
;
2420 const int align_func
;
2423 static const struct ptt processor_target_table
[PROCESSOR_max
] =
2425 {&i386_cost
, 4, 3, 4, 3, 4},
2426 {&i486_cost
, 16, 15, 16, 15, 16},
2427 {&pentium_cost
, 16, 7, 16, 7, 16},
2428 {&pentiumpro_cost
, 16, 15, 16, 10, 16},
2429 {&geode_cost
, 0, 0, 0, 0, 0},
2430 {&k6_cost
, 32, 7, 32, 7, 32},
2431 {&athlon_cost
, 16, 7, 16, 7, 16},
2432 {&pentium4_cost
, 0, 0, 0, 0, 0},
2433 {&k8_cost
, 16, 7, 16, 7, 16},
2434 {&nocona_cost
, 0, 0, 0, 0, 0},
2436 {&core_cost
, 16, 10, 16, 10, 16},
2438 {&core_cost
, 16, 10, 16, 10, 16},
2439 {&generic32_cost
, 16, 7, 16, 7, 16},
2440 {&generic64_cost
, 16, 10, 16, 10, 16},
2441 {&amdfam10_cost
, 32, 24, 32, 7, 32},
2442 {&bdver1_cost
, 32, 24, 32, 7, 32},
2443 {&bdver2_cost
, 32, 24, 32, 7, 32},
2444 {&bdver3_cost
, 32, 24, 32, 7, 32},
2445 {&btver1_cost
, 32, 24, 32, 7, 32},
2446 {&btver2_cost
, 32, 24, 32, 7, 32},
2447 {&atom_cost
, 16, 15, 16, 7, 16}
2450 static const char *const cpu_names
[TARGET_CPU_DEFAULT_max
] =
2483 gate_insert_vzeroupper (void)
2485 return TARGET_VZEROUPPER
;
2489 rest_of_handle_insert_vzeroupper (void)
2493 /* vzeroupper instructions are inserted immediately after reload to
2494 account for possible spills from 256bit registers. The pass
2495 reuses mode switching infrastructure by re-running mode insertion
2496 pass, so disable entities that have already been processed. */
2497 for (i
= 0; i
< MAX_386_ENTITIES
; i
++)
2498 ix86_optimize_mode_switching
[i
] = 0;
2500 ix86_optimize_mode_switching
[AVX_U128
] = 1;
2502 /* Call optimize_mode_switching. */
2503 pass_mode_switching
.pass
.execute ();
2507 struct rtl_opt_pass pass_insert_vzeroupper
=
2511 "vzeroupper", /* name */
2512 OPTGROUP_NONE
, /* optinfo_flags */
2513 gate_insert_vzeroupper
, /* gate */
2514 rest_of_handle_insert_vzeroupper
, /* execute */
2517 0, /* static_pass_number */
2518 TV_NONE
, /* tv_id */
2519 0, /* properties_required */
2520 0, /* properties_provided */
2521 0, /* properties_destroyed */
2522 0, /* todo_flags_start */
2523 TODO_df_finish
| TODO_verify_rtl_sharing
|
2524 0, /* todo_flags_finish */
2528 /* Return true if a red-zone is in use. */
2531 ix86_using_red_zone (void)
2533 return TARGET_RED_ZONE
&& !TARGET_64BIT_MS_ABI
;
2536 /* Return a string that documents the current -m options. The caller is
2537 responsible for freeing the string. */
2540 ix86_target_string (HOST_WIDE_INT isa
, int flags
, const char *arch
,
2541 const char *tune
, enum fpmath_unit fpmath
,
2544 struct ix86_target_opts
2546 const char *option
; /* option string */
2547 HOST_WIDE_INT mask
; /* isa mask options */
2550 /* This table is ordered so that options like -msse4.2 that imply
2551 preceding options while match those first. */
2552 static struct ix86_target_opts isa_opts
[] =
2554 { "-mfma4", OPTION_MASK_ISA_FMA4
},
2555 { "-mfma", OPTION_MASK_ISA_FMA
},
2556 { "-mxop", OPTION_MASK_ISA_XOP
},
2557 { "-mlwp", OPTION_MASK_ISA_LWP
},
2558 { "-msse4a", OPTION_MASK_ISA_SSE4A
},
2559 { "-msse4.2", OPTION_MASK_ISA_SSE4_2
},
2560 { "-msse4.1", OPTION_MASK_ISA_SSE4_1
},
2561 { "-mssse3", OPTION_MASK_ISA_SSSE3
},
2562 { "-msse3", OPTION_MASK_ISA_SSE3
},
2563 { "-msse2", OPTION_MASK_ISA_SSE2
},
2564 { "-msse", OPTION_MASK_ISA_SSE
},
2565 { "-m3dnow", OPTION_MASK_ISA_3DNOW
},
2566 { "-m3dnowa", OPTION_MASK_ISA_3DNOW_A
},
2567 { "-mmmx", OPTION_MASK_ISA_MMX
},
2568 { "-mabm", OPTION_MASK_ISA_ABM
},
2569 { "-mbmi", OPTION_MASK_ISA_BMI
},
2570 { "-mbmi2", OPTION_MASK_ISA_BMI2
},
2571 { "-mlzcnt", OPTION_MASK_ISA_LZCNT
},
2572 { "-mhle", OPTION_MASK_ISA_HLE
},
2573 { "-mfxsr", OPTION_MASK_ISA_FXSR
},
2574 { "-mrdseed", OPTION_MASK_ISA_RDSEED
},
2575 { "-mprfchw", OPTION_MASK_ISA_PRFCHW
},
2576 { "-madx", OPTION_MASK_ISA_ADX
},
2577 { "-mtbm", OPTION_MASK_ISA_TBM
},
2578 { "-mpopcnt", OPTION_MASK_ISA_POPCNT
},
2579 { "-mmovbe", OPTION_MASK_ISA_MOVBE
},
2580 { "-mcrc32", OPTION_MASK_ISA_CRC32
},
2581 { "-maes", OPTION_MASK_ISA_AES
},
2582 { "-mpclmul", OPTION_MASK_ISA_PCLMUL
},
2583 { "-mfsgsbase", OPTION_MASK_ISA_FSGSBASE
},
2584 { "-mrdrnd", OPTION_MASK_ISA_RDRND
},
2585 { "-mf16c", OPTION_MASK_ISA_F16C
},
2586 { "-mrtm", OPTION_MASK_ISA_RTM
},
2587 { "-mxsave", OPTION_MASK_ISA_XSAVE
},
2588 { "-mxsaveopt", OPTION_MASK_ISA_XSAVEOPT
},
2592 static struct ix86_target_opts flag_opts
[] =
2594 { "-m128bit-long-double", MASK_128BIT_LONG_DOUBLE
},
2595 { "-mlong-double-64", MASK_LONG_DOUBLE_64
},
2596 { "-m80387", MASK_80387
},
2597 { "-maccumulate-outgoing-args", MASK_ACCUMULATE_OUTGOING_ARGS
},
2598 { "-malign-double", MASK_ALIGN_DOUBLE
},
2599 { "-mcld", MASK_CLD
},
2600 { "-mfp-ret-in-387", MASK_FLOAT_RETURNS
},
2601 { "-mieee-fp", MASK_IEEE_FP
},
2602 { "-minline-all-stringops", MASK_INLINE_ALL_STRINGOPS
},
2603 { "-minline-stringops-dynamically", MASK_INLINE_STRINGOPS_DYNAMICALLY
},
2604 { "-mms-bitfields", MASK_MS_BITFIELD_LAYOUT
},
2605 { "-mno-align-stringops", MASK_NO_ALIGN_STRINGOPS
},
2606 { "-mno-fancy-math-387", MASK_NO_FANCY_MATH_387
},
2607 { "-mno-push-args", MASK_NO_PUSH_ARGS
},
2608 { "-mno-red-zone", MASK_NO_RED_ZONE
},
2609 { "-momit-leaf-frame-pointer", MASK_OMIT_LEAF_FRAME_POINTER
},
2610 { "-mrecip", MASK_RECIP
},
2611 { "-mrtd", MASK_RTD
},
2612 { "-msseregparm", MASK_SSEREGPARM
},
2613 { "-mstack-arg-probe", MASK_STACK_PROBE
},
2614 { "-mtls-direct-seg-refs", MASK_TLS_DIRECT_SEG_REFS
},
2615 { "-mvect8-ret-in-mem", MASK_VECT8_RETURNS
},
2616 { "-m8bit-idiv", MASK_USE_8BIT_IDIV
},
2617 { "-mvzeroupper", MASK_VZEROUPPER
},
2618 { "-mavx256-split-unaligned-load", MASK_AVX256_SPLIT_UNALIGNED_LOAD
},
2619 { "-mavx256-split-unaligned-store", MASK_AVX256_SPLIT_UNALIGNED_STORE
},
2620 { "-mprefer-avx128", MASK_PREFER_AVX128
},
2623 const char *opts
[ARRAY_SIZE (isa_opts
) + ARRAY_SIZE (flag_opts
) + 6][2];
2626 char target_other
[40];
2636 memset (opts
, '\0', sizeof (opts
));
2638 /* Add -march= option. */
2641 opts
[num
][0] = "-march=";
2642 opts
[num
++][1] = arch
;
2645 /* Add -mtune= option. */
2648 opts
[num
][0] = "-mtune=";
2649 opts
[num
++][1] = tune
;
2652 /* Add -m32/-m64/-mx32. */
2653 if ((isa
& OPTION_MASK_ISA_64BIT
) != 0)
2655 if ((isa
& OPTION_MASK_ABI_64
) != 0)
2659 isa
&= ~ (OPTION_MASK_ISA_64BIT
2660 | OPTION_MASK_ABI_64
2661 | OPTION_MASK_ABI_X32
);
2665 opts
[num
++][0] = abi
;
2667 /* Pick out the options in isa options. */
2668 for (i
= 0; i
< ARRAY_SIZE (isa_opts
); i
++)
2670 if ((isa
& isa_opts
[i
].mask
) != 0)
2672 opts
[num
++][0] = isa_opts
[i
].option
;
2673 isa
&= ~ isa_opts
[i
].mask
;
2677 if (isa
&& add_nl_p
)
2679 opts
[num
++][0] = isa_other
;
2680 sprintf (isa_other
, "(other isa: %#" HOST_WIDE_INT_PRINT
"x)",
2684 /* Add flag options. */
2685 for (i
= 0; i
< ARRAY_SIZE (flag_opts
); i
++)
2687 if ((flags
& flag_opts
[i
].mask
) != 0)
2689 opts
[num
++][0] = flag_opts
[i
].option
;
2690 flags
&= ~ flag_opts
[i
].mask
;
2694 if (flags
&& add_nl_p
)
2696 opts
[num
++][0] = target_other
;
2697 sprintf (target_other
, "(other flags: %#x)", flags
);
2700 /* Add -fpmath= option. */
2703 opts
[num
][0] = "-mfpmath=";
2704 switch ((int) fpmath
)
2707 opts
[num
++][1] = "387";
2711 opts
[num
++][1] = "sse";
2714 case FPMATH_387
| FPMATH_SSE
:
2715 opts
[num
++][1] = "sse+387";
2727 gcc_assert (num
< ARRAY_SIZE (opts
));
2729 /* Size the string. */
2731 sep_len
= (add_nl_p
) ? 3 : 1;
2732 for (i
= 0; i
< num
; i
++)
2735 for (j
= 0; j
< 2; j
++)
2737 len
+= strlen (opts
[i
][j
]);
2740 /* Build the string. */
2741 ret
= ptr
= (char *) xmalloc (len
);
2744 for (i
= 0; i
< num
; i
++)
2748 for (j
= 0; j
< 2; j
++)
2749 len2
[j
] = (opts
[i
][j
]) ? strlen (opts
[i
][j
]) : 0;
2756 if (add_nl_p
&& line_len
+ len2
[0] + len2
[1] > 70)
2764 for (j
= 0; j
< 2; j
++)
2767 memcpy (ptr
, opts
[i
][j
], len2
[j
]);
2769 line_len
+= len2
[j
];
2774 gcc_assert (ret
+ len
>= ptr
);
2779 /* Return true, if profiling code should be emitted before
2780 prologue. Otherwise it returns false.
2781 Note: For x86 with "hotfix" it is sorried. */
2783 ix86_profile_before_prologue (void)
2785 return flag_fentry
!= 0;
2788 /* Function that is callable from the debugger to print the current
2791 ix86_debug_options (void)
2793 char *opts
= ix86_target_string (ix86_isa_flags
, target_flags
,
2794 ix86_arch_string
, ix86_tune_string
,
2799 fprintf (stderr
, "%s\n\n", opts
);
2803 fputs ("<no options>\n\n", stderr
);
2808 /* Override various settings based on options. If MAIN_ARGS_P, the
2809 options are from the command line, otherwise they are from
2813 ix86_option_override_internal (bool main_args_p
)
2816 unsigned int ix86_arch_mask
, ix86_tune_mask
;
2817 const bool ix86_tune_specified
= (ix86_tune_string
!= NULL
);
2822 #define PTA_3DNOW (HOST_WIDE_INT_1 << 0)
2823 #define PTA_3DNOW_A (HOST_WIDE_INT_1 << 1)
2824 #define PTA_64BIT (HOST_WIDE_INT_1 << 2)
2825 #define PTA_ABM (HOST_WIDE_INT_1 << 3)
2826 #define PTA_AES (HOST_WIDE_INT_1 << 4)
2827 #define PTA_AVX (HOST_WIDE_INT_1 << 5)
2828 #define PTA_BMI (HOST_WIDE_INT_1 << 6)
2829 #define PTA_CX16 (HOST_WIDE_INT_1 << 7)
2830 #define PTA_F16C (HOST_WIDE_INT_1 << 8)
2831 #define PTA_FMA (HOST_WIDE_INT_1 << 9)
2832 #define PTA_FMA4 (HOST_WIDE_INT_1 << 10)
2833 #define PTA_FSGSBASE (HOST_WIDE_INT_1 << 11)
2834 #define PTA_LWP (HOST_WIDE_INT_1 << 12)
2835 #define PTA_LZCNT (HOST_WIDE_INT_1 << 13)
2836 #define PTA_MMX (HOST_WIDE_INT_1 << 14)
2837 #define PTA_MOVBE (HOST_WIDE_INT_1 << 15)
2838 #define PTA_NO_SAHF (HOST_WIDE_INT_1 << 16)
2839 #define PTA_PCLMUL (HOST_WIDE_INT_1 << 17)
2840 #define PTA_POPCNT (HOST_WIDE_INT_1 << 18)
2841 #define PTA_PREFETCH_SSE (HOST_WIDE_INT_1 << 19)
2842 #define PTA_RDRND (HOST_WIDE_INT_1 << 20)
2843 #define PTA_SSE (HOST_WIDE_INT_1 << 21)
2844 #define PTA_SSE2 (HOST_WIDE_INT_1 << 22)
2845 #define PTA_SSE3 (HOST_WIDE_INT_1 << 23)
2846 #define PTA_SSE4_1 (HOST_WIDE_INT_1 << 24)
2847 #define PTA_SSE4_2 (HOST_WIDE_INT_1 << 25)
2848 #define PTA_SSE4A (HOST_WIDE_INT_1 << 26)
2849 #define PTA_SSSE3 (HOST_WIDE_INT_1 << 27)
2850 #define PTA_TBM (HOST_WIDE_INT_1 << 28)
2851 #define PTA_XOP (HOST_WIDE_INT_1 << 29)
2852 #define PTA_AVX2 (HOST_WIDE_INT_1 << 30)
2853 #define PTA_BMI2 (HOST_WIDE_INT_1 << 31)
2854 #define PTA_RTM (HOST_WIDE_INT_1 << 32)
2855 #define PTA_HLE (HOST_WIDE_INT_1 << 33)
2856 #define PTA_PRFCHW (HOST_WIDE_INT_1 << 34)
2857 #define PTA_RDSEED (HOST_WIDE_INT_1 << 35)
2858 #define PTA_ADX (HOST_WIDE_INT_1 << 36)
2859 #define PTA_FXSR (HOST_WIDE_INT_1 << 37)
2860 #define PTA_XSAVE (HOST_WIDE_INT_1 << 38)
2861 #define PTA_XSAVEOPT (HOST_WIDE_INT_1 << 39)
2863 /* if this reaches 64, need to widen struct pta flags below */
2867 const char *const name
; /* processor name or nickname. */
2868 const enum processor_type processor
;
2869 const enum attr_cpu schedule
;
2870 const unsigned HOST_WIDE_INT flags
;
2872 const processor_alias_table
[] =
2874 {"i386", PROCESSOR_I386
, CPU_NONE
, 0},
2875 {"i486", PROCESSOR_I486
, CPU_NONE
, 0},
2876 {"i586", PROCESSOR_PENTIUM
, CPU_PENTIUM
, 0},
2877 {"pentium", PROCESSOR_PENTIUM
, CPU_PENTIUM
, 0},
2878 {"pentium-mmx", PROCESSOR_PENTIUM
, CPU_PENTIUM
, PTA_MMX
},
2879 {"winchip-c6", PROCESSOR_I486
, CPU_NONE
, PTA_MMX
},
2880 {"winchip2", PROCESSOR_I486
, CPU_NONE
, PTA_MMX
| PTA_3DNOW
},
2881 {"c3", PROCESSOR_I486
, CPU_NONE
, PTA_MMX
| PTA_3DNOW
},
2882 {"c3-2", PROCESSOR_PENTIUMPRO
, CPU_PENTIUMPRO
, PTA_MMX
| PTA_SSE
},
2883 {"i686", PROCESSOR_PENTIUMPRO
, CPU_PENTIUMPRO
, 0},
2884 {"pentiumpro", PROCESSOR_PENTIUMPRO
, CPU_PENTIUMPRO
, 0},
2885 {"pentium2", PROCESSOR_PENTIUMPRO
, CPU_PENTIUMPRO
, PTA_MMX
| PTA_FXSR
},
2886 {"pentium3", PROCESSOR_PENTIUMPRO
, CPU_PENTIUMPRO
,
2887 PTA_MMX
| PTA_SSE
| PTA_FXSR
},
2888 {"pentium3m", PROCESSOR_PENTIUMPRO
, CPU_PENTIUMPRO
,
2889 PTA_MMX
| PTA_SSE
| PTA_FXSR
},
2890 {"pentium-m", PROCESSOR_PENTIUMPRO
, CPU_PENTIUMPRO
,
2891 PTA_MMX
| PTA_SSE
| PTA_SSE2
| PTA_FXSR
},
2892 {"pentium4", PROCESSOR_PENTIUM4
, CPU_NONE
,
2893 PTA_MMX
|PTA_SSE
| PTA_SSE2
| PTA_FXSR
},
2894 {"pentium4m", PROCESSOR_PENTIUM4
, CPU_NONE
,
2895 PTA_MMX
| PTA_SSE
| PTA_SSE2
| PTA_FXSR
},
2896 {"prescott", PROCESSOR_NOCONA
, CPU_NONE
,
2897 PTA_MMX
| PTA_SSE
| PTA_SSE2
| PTA_SSE3
| PTA_FXSR
},
2898 {"nocona", PROCESSOR_NOCONA
, CPU_NONE
,
2899 PTA_64BIT
| PTA_MMX
| PTA_SSE
| PTA_SSE2
| PTA_SSE3
2900 | PTA_CX16
| PTA_NO_SAHF
| PTA_FXSR
},
2901 {"core2", PROCESSOR_CORE2
, CPU_CORE2
,
2902 PTA_64BIT
| PTA_MMX
| PTA_SSE
| PTA_SSE2
| PTA_SSE3
2903 | PTA_SSSE3
| PTA_CX16
| PTA_FXSR
},
2904 {"corei7", PROCESSOR_COREI7
, CPU_COREI7
,
2905 PTA_64BIT
| PTA_MMX
| PTA_SSE
| PTA_SSE2
| PTA_SSE3
2906 | PTA_SSSE3
| PTA_SSE4_1
| PTA_SSE4_2
| PTA_CX16
| PTA_FXSR
},
2907 {"corei7-avx", PROCESSOR_COREI7
, CPU_COREI7
,
2908 PTA_64BIT
| PTA_MMX
| PTA_SSE
| PTA_SSE2
| PTA_SSE3
2909 | PTA_SSSE3
| PTA_SSE4_1
| PTA_SSE4_2
| PTA_AVX
2910 | PTA_CX16
| PTA_POPCNT
| PTA_AES
| PTA_PCLMUL
2911 | PTA_FXSR
| PTA_XSAVE
| PTA_XSAVEOPT
},
2912 {"core-avx-i", PROCESSOR_COREI7
, CPU_COREI7
,
2913 PTA_64BIT
| PTA_MMX
| PTA_SSE
| PTA_SSE2
| PTA_SSE3
2914 | PTA_SSSE3
| PTA_SSE4_1
| PTA_SSE4_2
| PTA_AVX
2915 | PTA_CX16
| PTA_POPCNT
| PTA_AES
| PTA_PCLMUL
| PTA_FSGSBASE
2916 | PTA_RDRND
| PTA_F16C
| PTA_FXSR
| PTA_XSAVE
| PTA_XSAVEOPT
},
2917 {"core-avx2", PROCESSOR_COREI7
, CPU_COREI7
,
2918 PTA_64BIT
| PTA_MMX
| PTA_SSE
| PTA_SSE2
| PTA_SSE3
2919 | PTA_SSSE3
| PTA_SSE4_1
| PTA_SSE4_2
| PTA_AVX
| PTA_AVX2
2920 | PTA_CX16
| PTA_POPCNT
| PTA_AES
| PTA_PCLMUL
| PTA_FSGSBASE
2921 | PTA_RDRND
| PTA_F16C
| PTA_BMI
| PTA_BMI2
| PTA_LZCNT
2922 | PTA_FMA
| PTA_MOVBE
| PTA_RTM
| PTA_HLE
| PTA_FXSR
| PTA_XSAVE
2924 {"atom", PROCESSOR_ATOM
, CPU_ATOM
,
2925 PTA_64BIT
| PTA_MMX
| PTA_SSE
| PTA_SSE2
| PTA_SSE3
2926 | PTA_SSSE3
| PTA_CX16
| PTA_MOVBE
| PTA_FXSR
},
2927 {"geode", PROCESSOR_GEODE
, CPU_GEODE
,
2928 PTA_MMX
| PTA_3DNOW
| PTA_3DNOW_A
| PTA_PREFETCH_SSE
},
2929 {"k6", PROCESSOR_K6
, CPU_K6
, PTA_MMX
},
2930 {"k6-2", PROCESSOR_K6
, CPU_K6
, PTA_MMX
| PTA_3DNOW
},
2931 {"k6-3", PROCESSOR_K6
, CPU_K6
, PTA_MMX
| PTA_3DNOW
},
2932 {"athlon", PROCESSOR_ATHLON
, CPU_ATHLON
,
2933 PTA_MMX
| PTA_3DNOW
| PTA_3DNOW_A
| PTA_PREFETCH_SSE
},
2934 {"athlon-tbird", PROCESSOR_ATHLON
, CPU_ATHLON
,
2935 PTA_MMX
| PTA_3DNOW
| PTA_3DNOW_A
| PTA_PREFETCH_SSE
},
2936 {"athlon-4", PROCESSOR_ATHLON
, CPU_ATHLON
,
2937 PTA_MMX
| PTA_3DNOW
| PTA_3DNOW_A
| PTA_SSE
},
2938 {"athlon-xp", PROCESSOR_ATHLON
, CPU_ATHLON
,
2939 PTA_MMX
| PTA_3DNOW
| PTA_3DNOW_A
| PTA_SSE
},
2940 {"athlon-mp", PROCESSOR_ATHLON
, CPU_ATHLON
,
2941 PTA_MMX
| PTA_3DNOW
| PTA_3DNOW_A
| PTA_SSE
},
2942 {"x86-64", PROCESSOR_K8
, CPU_K8
,
2943 PTA_64BIT
| PTA_MMX
| PTA_SSE
| PTA_SSE2
| PTA_NO_SAHF
},
2944 {"k8", PROCESSOR_K8
, CPU_K8
,
2945 PTA_64BIT
| PTA_MMX
| PTA_3DNOW
| PTA_3DNOW_A
| PTA_SSE
2946 | PTA_SSE2
| PTA_NO_SAHF
},
2947 {"k8-sse3", PROCESSOR_K8
, CPU_K8
,
2948 PTA_64BIT
| PTA_MMX
| PTA_3DNOW
| PTA_3DNOW_A
| PTA_SSE
2949 | PTA_SSE2
| PTA_SSE3
| PTA_NO_SAHF
},
2950 {"opteron", PROCESSOR_K8
, CPU_K8
,
2951 PTA_64BIT
| PTA_MMX
| PTA_3DNOW
| PTA_3DNOW_A
| PTA_SSE
2952 | PTA_SSE2
| PTA_NO_SAHF
},
2953 {"opteron-sse3", PROCESSOR_K8
, CPU_K8
,
2954 PTA_64BIT
| PTA_MMX
| PTA_3DNOW
| PTA_3DNOW_A
| PTA_SSE
2955 | PTA_SSE2
| PTA_SSE3
| PTA_NO_SAHF
},
2956 {"athlon64", PROCESSOR_K8
, CPU_K8
,
2957 PTA_64BIT
| PTA_MMX
| PTA_3DNOW
| PTA_3DNOW_A
| PTA_SSE
2958 | PTA_SSE2
| PTA_NO_SAHF
},
2959 {"athlon64-sse3", PROCESSOR_K8
, CPU_K8
,
2960 PTA_64BIT
| PTA_MMX
| PTA_3DNOW
| PTA_3DNOW_A
| PTA_SSE
2961 | PTA_SSE2
| PTA_SSE3
| PTA_NO_SAHF
},
2962 {"athlon-fx", PROCESSOR_K8
, CPU_K8
,
2963 PTA_64BIT
| PTA_MMX
| PTA_3DNOW
| PTA_3DNOW_A
| PTA_SSE
2964 | PTA_SSE2
| PTA_NO_SAHF
},
2965 {"amdfam10", PROCESSOR_AMDFAM10
, CPU_AMDFAM10
,
2966 PTA_64BIT
| PTA_MMX
| PTA_3DNOW
| PTA_3DNOW_A
| PTA_SSE
2967 | PTA_SSE2
| PTA_SSE3
| PTA_SSE4A
| PTA_CX16
| PTA_ABM
},
2968 {"barcelona", PROCESSOR_AMDFAM10
, CPU_AMDFAM10
,
2969 PTA_64BIT
| PTA_MMX
| PTA_3DNOW
| PTA_3DNOW_A
| PTA_SSE
2970 | PTA_SSE2
| PTA_SSE3
| PTA_SSE4A
| PTA_CX16
| PTA_ABM
},
2971 {"bdver1", PROCESSOR_BDVER1
, CPU_BDVER1
,
2972 PTA_64BIT
| PTA_MMX
| PTA_SSE
| PTA_SSE2
| PTA_SSE3
2973 | PTA_SSE4A
| PTA_CX16
| PTA_ABM
| PTA_SSSE3
| PTA_SSE4_1
2974 | PTA_SSE4_2
| PTA_AES
| PTA_PCLMUL
| PTA_AVX
| PTA_FMA4
2975 | PTA_XOP
| PTA_LWP
| PTA_PRFCHW
| PTA_FXSR
| PTA_XSAVE
},
2976 {"bdver2", PROCESSOR_BDVER2
, CPU_BDVER2
,
2977 PTA_64BIT
| PTA_MMX
| PTA_SSE
| PTA_SSE2
| PTA_SSE3
2978 | PTA_SSE4A
| PTA_CX16
| PTA_ABM
| PTA_SSSE3
| PTA_SSE4_1
2979 | PTA_SSE4_2
| PTA_AES
| PTA_PCLMUL
| PTA_AVX
| PTA_FMA4
2980 | PTA_XOP
| PTA_LWP
| PTA_BMI
| PTA_TBM
| PTA_F16C
2981 | PTA_FMA
| PTA_PRFCHW
| PTA_FXSR
| PTA_XSAVE
},
2982 {"bdver3", PROCESSOR_BDVER3
, CPU_BDVER3
,
2983 PTA_64BIT
| PTA_MMX
| PTA_SSE
| PTA_SSE2
| PTA_SSE3
2984 | PTA_SSE4A
| PTA_CX16
| PTA_ABM
| PTA_SSSE3
| PTA_SSE4_1
2985 | PTA_SSE4_2
| PTA_AES
| PTA_PCLMUL
| PTA_AVX
2986 | PTA_XOP
| PTA_LWP
| PTA_BMI
| PTA_TBM
| PTA_F16C
2987 | PTA_FMA
| PTA_PRFCHW
| PTA_FXSR
| PTA_XSAVE
2989 {"btver1", PROCESSOR_BTVER1
, CPU_GENERIC64
,
2990 PTA_64BIT
| PTA_MMX
| PTA_SSE
| PTA_SSE2
| PTA_SSE3
2991 | PTA_SSSE3
| PTA_SSE4A
|PTA_ABM
| PTA_CX16
| PTA_PRFCHW
2992 | PTA_FXSR
| PTA_XSAVE
},
2993 {"btver2", PROCESSOR_BTVER2
, CPU_GENERIC64
,
2994 PTA_64BIT
| PTA_MMX
| PTA_SSE
| PTA_SSE2
| PTA_SSE3
2995 | PTA_SSSE3
| PTA_SSE4A
|PTA_ABM
| PTA_CX16
| PTA_SSE4_1
2996 | PTA_SSE4_2
| PTA_AES
| PTA_PCLMUL
| PTA_AVX
2997 | PTA_BMI
| PTA_F16C
| PTA_MOVBE
| PTA_PRFCHW
2998 | PTA_FXSR
| PTA_XSAVE
| PTA_XSAVEOPT
},
3000 {"generic32", PROCESSOR_GENERIC32
, CPU_PENTIUMPRO
,
3001 PTA_HLE
/* flags are only used for -march switch. */ },
3002 {"generic64", PROCESSOR_GENERIC64
, CPU_GENERIC64
,
3004 | PTA_HLE
/* flags are only used for -march switch. */ },
3007 /* -mrecip options. */
3010 const char *string
; /* option name */
3011 unsigned int mask
; /* mask bits to set */
3013 const recip_options
[] =
3015 { "all", RECIP_MASK_ALL
},
3016 { "none", RECIP_MASK_NONE
},
3017 { "div", RECIP_MASK_DIV
},
3018 { "sqrt", RECIP_MASK_SQRT
},
3019 { "vec-div", RECIP_MASK_VEC_DIV
},
3020 { "vec-sqrt", RECIP_MASK_VEC_SQRT
},
3023 int const pta_size
= ARRAY_SIZE (processor_alias_table
);
3025 /* Set up prefix/suffix so the error messages refer to either the command
3026 line argument, or the attribute(target). */
3035 prefix
= "option(\"";
3040 /* Turn off both OPTION_MASK_ABI_64 and OPTION_MASK_ABI_X32 if
3041 TARGET_64BIT_DEFAULT is true and TARGET_64BIT is false. */
3042 if (TARGET_64BIT_DEFAULT
&& !TARGET_64BIT
)
3043 ix86_isa_flags
&= ~(OPTION_MASK_ABI_64
| OPTION_MASK_ABI_X32
);
3044 #ifdef TARGET_BI_ARCH
3047 #if TARGET_BI_ARCH == 1
3048 /* When TARGET_BI_ARCH == 1, by default, OPTION_MASK_ABI_64
3049 is on and OPTION_MASK_ABI_X32 is off. We turn off
3050 OPTION_MASK_ABI_64 if OPTION_MASK_ABI_X32 is turned on by
3053 ix86_isa_flags
&= ~OPTION_MASK_ABI_64
;
3055 /* When TARGET_BI_ARCH == 2, by default, OPTION_MASK_ABI_X32 is
3056 on and OPTION_MASK_ABI_64 is off. We turn off
3057 OPTION_MASK_ABI_X32 if OPTION_MASK_ABI_64 is turned on by
3060 ix86_isa_flags
&= ~OPTION_MASK_ABI_X32
;
3067 /* Always turn on OPTION_MASK_ISA_64BIT and turn off
3068 OPTION_MASK_ABI_64 for TARGET_X32. */
3069 ix86_isa_flags
|= OPTION_MASK_ISA_64BIT
;
3070 ix86_isa_flags
&= ~OPTION_MASK_ABI_64
;
3072 else if (TARGET_LP64
)
3074 /* Always turn on OPTION_MASK_ISA_64BIT and turn off
3075 OPTION_MASK_ABI_X32 for TARGET_LP64. */
3076 ix86_isa_flags
|= OPTION_MASK_ISA_64BIT
;
3077 ix86_isa_flags
&= ~OPTION_MASK_ABI_X32
;
3080 #ifdef SUBTARGET_OVERRIDE_OPTIONS
3081 SUBTARGET_OVERRIDE_OPTIONS
;
3084 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
3085 SUBSUBTARGET_OVERRIDE_OPTIONS
;
3088 /* -fPIC is the default for x86_64. */
3089 if (TARGET_MACHO
&& TARGET_64BIT
)
3092 /* Need to check -mtune=generic first. */
3093 if (ix86_tune_string
)
3095 if (!strcmp (ix86_tune_string
, "generic")
3096 || !strcmp (ix86_tune_string
, "i686")
3097 /* As special support for cross compilers we read -mtune=native
3098 as -mtune=generic. With native compilers we won't see the
3099 -mtune=native, as it was changed by the driver. */
3100 || !strcmp (ix86_tune_string
, "native"))
3103 ix86_tune_string
= "generic64";
3105 ix86_tune_string
= "generic32";
3107 /* If this call is for setting the option attribute, allow the
3108 generic32/generic64 that was previously set. */
3109 else if (!main_args_p
3110 && (!strcmp (ix86_tune_string
, "generic32")
3111 || !strcmp (ix86_tune_string
, "generic64")))
3113 else if (!strncmp (ix86_tune_string
, "generic", 7))
3114 error ("bad value (%s) for %stune=%s %s",
3115 ix86_tune_string
, prefix
, suffix
, sw
);
3116 else if (!strcmp (ix86_tune_string
, "x86-64"))
3117 warning (OPT_Wdeprecated
, "%stune=x86-64%s is deprecated; use "
3118 "%stune=k8%s or %stune=generic%s instead as appropriate",
3119 prefix
, suffix
, prefix
, suffix
, prefix
, suffix
);
3123 if (ix86_arch_string
)
3124 ix86_tune_string
= ix86_arch_string
;
3125 if (!ix86_tune_string
)
3127 ix86_tune_string
= cpu_names
[TARGET_CPU_DEFAULT
];
3128 ix86_tune_defaulted
= 1;
3131 /* ix86_tune_string is set to ix86_arch_string or defaulted. We
3132 need to use a sensible tune option. */
3133 if (!strcmp (ix86_tune_string
, "generic")
3134 || !strcmp (ix86_tune_string
, "x86-64")
3135 || !strcmp (ix86_tune_string
, "i686"))
3138 ix86_tune_string
= "generic64";
3140 ix86_tune_string
= "generic32";
3144 if (ix86_stringop_alg
== rep_prefix_8_byte
&& !TARGET_64BIT
)
3146 /* rep; movq isn't available in 32-bit code. */
3147 error ("-mstringop-strategy=rep_8byte not supported for 32-bit code");
3148 ix86_stringop_alg
= no_stringop
;
3151 if (!ix86_arch_string
)
3152 ix86_arch_string
= TARGET_64BIT
? "x86-64" : SUBTARGET32_DEFAULT_CPU
;
3154 ix86_arch_specified
= 1;
3156 if (global_options_set
.x_ix86_pmode
)
3158 if ((TARGET_LP64
&& ix86_pmode
== PMODE_SI
)
3159 || (!TARGET_64BIT
&& ix86_pmode
== PMODE_DI
))
3160 error ("address mode %qs not supported in the %s bit mode",
3161 TARGET_64BIT
? "short" : "long",
3162 TARGET_64BIT
? "64" : "32");
3165 ix86_pmode
= TARGET_LP64
? PMODE_DI
: PMODE_SI
;
3167 if (!global_options_set
.x_ix86_abi
)
3168 ix86_abi
= DEFAULT_ABI
;
3170 if (global_options_set
.x_ix86_cmodel
)
3172 switch (ix86_cmodel
)
3177 ix86_cmodel
= CM_SMALL_PIC
;
3179 error ("code model %qs not supported in the %s bit mode",
3186 ix86_cmodel
= CM_MEDIUM_PIC
;
3188 error ("code model %qs not supported in the %s bit mode",
3190 else if (TARGET_X32
)
3191 error ("code model %qs not supported in x32 mode",
3198 ix86_cmodel
= CM_LARGE_PIC
;
3200 error ("code model %qs not supported in the %s bit mode",
3202 else if (TARGET_X32
)
3203 error ("code model %qs not supported in x32 mode",
3209 error ("code model %s does not support PIC mode", "32");
3211 error ("code model %qs not supported in the %s bit mode",
3218 error ("code model %s does not support PIC mode", "kernel");
3219 ix86_cmodel
= CM_32
;
3222 error ("code model %qs not supported in the %s bit mode",
3232 /* For TARGET_64BIT and MS_ABI, force pic on, in order to enable the
3233 use of rip-relative addressing. This eliminates fixups that
3234 would otherwise be needed if this object is to be placed in a
3235 DLL, and is essentially just as efficient as direct addressing. */
3236 if (TARGET_64BIT
&& DEFAULT_ABI
== MS_ABI
)
3237 ix86_cmodel
= CM_SMALL_PIC
, flag_pic
= 1;
3238 else if (TARGET_64BIT
)
3239 ix86_cmodel
= flag_pic
? CM_SMALL_PIC
: CM_SMALL
;
3241 ix86_cmodel
= CM_32
;
3243 if (TARGET_MACHO
&& ix86_asm_dialect
== ASM_INTEL
)
3245 error ("-masm=intel not supported in this configuration");
3246 ix86_asm_dialect
= ASM_ATT
;
3248 if ((TARGET_64BIT
!= 0) != ((ix86_isa_flags
& OPTION_MASK_ISA_64BIT
) != 0))
3249 sorry ("%i-bit mode not compiled in",
3250 (ix86_isa_flags
& OPTION_MASK_ISA_64BIT
) ? 64 : 32);
3252 for (i
= 0; i
< pta_size
; i
++)
3253 if (! strcmp (ix86_arch_string
, processor_alias_table
[i
].name
))
3255 ix86_schedule
= processor_alias_table
[i
].schedule
;
3256 ix86_arch
= processor_alias_table
[i
].processor
;
3257 /* Default cpu tuning to the architecture. */
3258 ix86_tune
= ix86_arch
;
3260 if (TARGET_64BIT
&& !(processor_alias_table
[i
].flags
& PTA_64BIT
))
3261 error ("CPU you selected does not support x86-64 "
3264 if (processor_alias_table
[i
].flags
& PTA_MMX
3265 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_MMX
))
3266 ix86_isa_flags
|= OPTION_MASK_ISA_MMX
;
3267 if (processor_alias_table
[i
].flags
& PTA_3DNOW
3268 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_3DNOW
))
3269 ix86_isa_flags
|= OPTION_MASK_ISA_3DNOW
;
3270 if (processor_alias_table
[i
].flags
& PTA_3DNOW_A
3271 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_3DNOW_A
))
3272 ix86_isa_flags
|= OPTION_MASK_ISA_3DNOW_A
;
3273 if (processor_alias_table
[i
].flags
& PTA_SSE
3274 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_SSE
))
3275 ix86_isa_flags
|= OPTION_MASK_ISA_SSE
;
3276 if (processor_alias_table
[i
].flags
& PTA_SSE2
3277 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_SSE2
))
3278 ix86_isa_flags
|= OPTION_MASK_ISA_SSE2
;
3279 if (processor_alias_table
[i
].flags
& PTA_SSE3
3280 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_SSE3
))
3281 ix86_isa_flags
|= OPTION_MASK_ISA_SSE3
;
3282 if (processor_alias_table
[i
].flags
& PTA_SSSE3
3283 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_SSSE3
))
3284 ix86_isa_flags
|= OPTION_MASK_ISA_SSSE3
;
3285 if (processor_alias_table
[i
].flags
& PTA_SSE4_1
3286 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_SSE4_1
))
3287 ix86_isa_flags
|= OPTION_MASK_ISA_SSE4_1
;
3288 if (processor_alias_table
[i
].flags
& PTA_SSE4_2
3289 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_SSE4_2
))
3290 ix86_isa_flags
|= OPTION_MASK_ISA_SSE4_2
;
3291 if (processor_alias_table
[i
].flags
& PTA_AVX
3292 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_AVX
))
3293 ix86_isa_flags
|= OPTION_MASK_ISA_AVX
;
3294 if (processor_alias_table
[i
].flags
& PTA_AVX2
3295 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_AVX2
))
3296 ix86_isa_flags
|= OPTION_MASK_ISA_AVX2
;
3297 if (processor_alias_table
[i
].flags
& PTA_FMA
3298 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_FMA
))
3299 ix86_isa_flags
|= OPTION_MASK_ISA_FMA
;
3300 if (processor_alias_table
[i
].flags
& PTA_SSE4A
3301 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_SSE4A
))
3302 ix86_isa_flags
|= OPTION_MASK_ISA_SSE4A
;
3303 if (processor_alias_table
[i
].flags
& PTA_FMA4
3304 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_FMA4
))
3305 ix86_isa_flags
|= OPTION_MASK_ISA_FMA4
;
3306 if (processor_alias_table
[i
].flags
& PTA_XOP
3307 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_XOP
))
3308 ix86_isa_flags
|= OPTION_MASK_ISA_XOP
;
3309 if (processor_alias_table
[i
].flags
& PTA_LWP
3310 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_LWP
))
3311 ix86_isa_flags
|= OPTION_MASK_ISA_LWP
;
3312 if (processor_alias_table
[i
].flags
& PTA_ABM
3313 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_ABM
))
3314 ix86_isa_flags
|= OPTION_MASK_ISA_ABM
;
3315 if (processor_alias_table
[i
].flags
& PTA_BMI
3316 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_BMI
))
3317 ix86_isa_flags
|= OPTION_MASK_ISA_BMI
;
3318 if (processor_alias_table
[i
].flags
& (PTA_LZCNT
| PTA_ABM
)
3319 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_LZCNT
))
3320 ix86_isa_flags
|= OPTION_MASK_ISA_LZCNT
;
3321 if (processor_alias_table
[i
].flags
& PTA_TBM
3322 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_TBM
))
3323 ix86_isa_flags
|= OPTION_MASK_ISA_TBM
;
3324 if (processor_alias_table
[i
].flags
& PTA_BMI2
3325 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_BMI2
))
3326 ix86_isa_flags
|= OPTION_MASK_ISA_BMI2
;
3327 if (processor_alias_table
[i
].flags
& PTA_CX16
3328 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_CX16
))
3329 ix86_isa_flags
|= OPTION_MASK_ISA_CX16
;
3330 if (processor_alias_table
[i
].flags
& (PTA_POPCNT
| PTA_ABM
)
3331 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_POPCNT
))
3332 ix86_isa_flags
|= OPTION_MASK_ISA_POPCNT
;
3333 if (!(TARGET_64BIT
&& (processor_alias_table
[i
].flags
& PTA_NO_SAHF
))
3334 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_SAHF
))
3335 ix86_isa_flags
|= OPTION_MASK_ISA_SAHF
;
3336 if (processor_alias_table
[i
].flags
& PTA_MOVBE
3337 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_MOVBE
))
3338 ix86_isa_flags
|= OPTION_MASK_ISA_MOVBE
;
3339 if (processor_alias_table
[i
].flags
& PTA_AES
3340 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_AES
))
3341 ix86_isa_flags
|= OPTION_MASK_ISA_AES
;
3342 if (processor_alias_table
[i
].flags
& PTA_PCLMUL
3343 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_PCLMUL
))
3344 ix86_isa_flags
|= OPTION_MASK_ISA_PCLMUL
;
3345 if (processor_alias_table
[i
].flags
& PTA_FSGSBASE
3346 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_FSGSBASE
))
3347 ix86_isa_flags
|= OPTION_MASK_ISA_FSGSBASE
;
3348 if (processor_alias_table
[i
].flags
& PTA_RDRND
3349 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_RDRND
))
3350 ix86_isa_flags
|= OPTION_MASK_ISA_RDRND
;
3351 if (processor_alias_table
[i
].flags
& PTA_F16C
3352 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_F16C
))
3353 ix86_isa_flags
|= OPTION_MASK_ISA_F16C
;
3354 if (processor_alias_table
[i
].flags
& PTA_RTM
3355 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_RTM
))
3356 ix86_isa_flags
|= OPTION_MASK_ISA_RTM
;
3357 if (processor_alias_table
[i
].flags
& PTA_HLE
3358 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_HLE
))
3359 ix86_isa_flags
|= OPTION_MASK_ISA_HLE
;
3360 if (processor_alias_table
[i
].flags
& PTA_PRFCHW
3361 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_PRFCHW
))
3362 ix86_isa_flags
|= OPTION_MASK_ISA_PRFCHW
;
3363 if (processor_alias_table
[i
].flags
& PTA_RDSEED
3364 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_RDSEED
))
3365 ix86_isa_flags
|= OPTION_MASK_ISA_RDSEED
;
3366 if (processor_alias_table
[i
].flags
& PTA_ADX
3367 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_ADX
))
3368 ix86_isa_flags
|= OPTION_MASK_ISA_ADX
;
3369 if (processor_alias_table
[i
].flags
& PTA_FXSR
3370 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_FXSR
))
3371 ix86_isa_flags
|= OPTION_MASK_ISA_FXSR
;
3372 if (processor_alias_table
[i
].flags
& PTA_XSAVE
3373 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_XSAVE
))
3374 ix86_isa_flags
|= OPTION_MASK_ISA_XSAVE
;
3375 if (processor_alias_table
[i
].flags
& PTA_XSAVEOPT
3376 && !(ix86_isa_flags_explicit
& OPTION_MASK_ISA_XSAVEOPT
))
3377 ix86_isa_flags
|= OPTION_MASK_ISA_XSAVEOPT
;
3378 if (processor_alias_table
[i
].flags
& (PTA_PREFETCH_SSE
| PTA_SSE
))
3379 x86_prefetch_sse
= true;
3384 if (!strcmp (ix86_arch_string
, "generic"))
3385 error ("generic CPU can be used only for %stune=%s %s",
3386 prefix
, suffix
, sw
);
3387 else if (!strncmp (ix86_arch_string
, "generic", 7) || i
== pta_size
)
3388 error ("bad value (%s) for %sarch=%s %s",
3389 ix86_arch_string
, prefix
, suffix
, sw
);
3391 ix86_arch_mask
= 1u << ix86_arch
;
3392 for (i
= 0; i
< X86_ARCH_LAST
; ++i
)
3393 ix86_arch_features
[i
] = !!(initial_ix86_arch_features
[i
] & ix86_arch_mask
);
3395 for (i
= 0; i
< pta_size
; i
++)
3396 if (! strcmp (ix86_tune_string
, processor_alias_table
[i
].name
))
3398 ix86_schedule
= processor_alias_table
[i
].schedule
;
3399 ix86_tune
= processor_alias_table
[i
].processor
;
3402 if (!(processor_alias_table
[i
].flags
& PTA_64BIT
))
3404 if (ix86_tune_defaulted
)
3406 ix86_tune_string
= "x86-64";
3407 for (i
= 0; i
< pta_size
; i
++)
3408 if (! strcmp (ix86_tune_string
,
3409 processor_alias_table
[i
].name
))
3411 ix86_schedule
= processor_alias_table
[i
].schedule
;
3412 ix86_tune
= processor_alias_table
[i
].processor
;
3415 error ("CPU you selected does not support x86-64 "
3421 /* Adjust tuning when compiling for 32-bit ABI. */
3424 case PROCESSOR_GENERIC64
:
3425 ix86_tune
= PROCESSOR_GENERIC32
;
3426 ix86_schedule
= CPU_PENTIUMPRO
;
3433 /* Intel CPUs have always interpreted SSE prefetch instructions as
3434 NOPs; so, we can enable SSE prefetch instructions even when
3435 -mtune (rather than -march) points us to a processor that has them.
3436 However, the VIA C3 gives a SIGILL, so we only do that for i686 and
3437 higher processors. */
3439 && (processor_alias_table
[i
].flags
& (PTA_PREFETCH_SSE
| PTA_SSE
)))
3440 x86_prefetch_sse
= true;
3444 if (ix86_tune_specified
&& i
== pta_size
)
3445 error ("bad value (%s) for %stune=%s %s",
3446 ix86_tune_string
, prefix
, suffix
, sw
);
3448 ix86_tune_mask
= 1u << ix86_tune
;
3449 for (i
= 0; i
< X86_TUNE_LAST
; ++i
)
3450 ix86_tune_features
[i
] = !!(initial_ix86_tune_features
[i
] & ix86_tune_mask
);
3452 #ifndef USE_IX86_FRAME_POINTER
3453 #define USE_IX86_FRAME_POINTER 0
3456 #ifndef USE_X86_64_FRAME_POINTER
3457 #define USE_X86_64_FRAME_POINTER 0
3460 /* Set the default values for switches whose default depends on TARGET_64BIT
3461 in case they weren't overwritten by command line options. */
3464 if (optimize
>= 1 && !global_options_set
.x_flag_omit_frame_pointer
)
3465 flag_omit_frame_pointer
= !USE_X86_64_FRAME_POINTER
;
3466 if (flag_asynchronous_unwind_tables
== 2)
3467 flag_unwind_tables
= flag_asynchronous_unwind_tables
= 1;
3468 if (flag_pcc_struct_return
== 2)
3469 flag_pcc_struct_return
= 0;
3473 if (optimize
>= 1 && !global_options_set
.x_flag_omit_frame_pointer
)
3474 flag_omit_frame_pointer
= !(USE_IX86_FRAME_POINTER
|| optimize_size
);
3475 if (flag_asynchronous_unwind_tables
== 2)
3476 flag_asynchronous_unwind_tables
= !USE_IX86_FRAME_POINTER
;
3477 if (flag_pcc_struct_return
== 2)
3478 flag_pcc_struct_return
= DEFAULT_PCC_STRUCT_RETURN
;
3481 ix86_tune_cost
= processor_target_table
[ix86_tune
].cost
;
3483 ix86_cost
= &ix86_size_cost
;
3485 ix86_cost
= ix86_tune_cost
;
3487 /* Arrange to set up i386_stack_locals for all functions. */
3488 init_machine_status
= ix86_init_machine_status
;
3490 /* Validate -mregparm= value. */
3491 if (global_options_set
.x_ix86_regparm
)
3494 warning (0, "-mregparm is ignored in 64-bit mode");
3495 if (ix86_regparm
> REGPARM_MAX
)
3497 error ("-mregparm=%d is not between 0 and %d",
3498 ix86_regparm
, REGPARM_MAX
);
3503 ix86_regparm
= REGPARM_MAX
;
3505 /* Default align_* from the processor table. */
3506 if (align_loops
== 0)
3508 align_loops
= processor_target_table
[ix86_tune
].align_loop
;
3509 align_loops_max_skip
= processor_target_table
[ix86_tune
].align_loop_max_skip
;
3511 if (align_jumps
== 0)
3513 align_jumps
= processor_target_table
[ix86_tune
].align_jump
;
3514 align_jumps_max_skip
= processor_target_table
[ix86_tune
].align_jump_max_skip
;
3516 if (align_functions
== 0)
3518 align_functions
= processor_target_table
[ix86_tune
].align_func
;
3521 /* Provide default for -mbranch-cost= value. */
3522 if (!global_options_set
.x_ix86_branch_cost
)
3523 ix86_branch_cost
= ix86_cost
->branch_cost
;
3527 target_flags
|= TARGET_SUBTARGET64_DEFAULT
& ~target_flags_explicit
;
3529 /* Enable by default the SSE and MMX builtins. Do allow the user to
3530 explicitly disable any of these. In particular, disabling SSE and
3531 MMX for kernel code is extremely useful. */
3532 if (!ix86_arch_specified
)
3534 |= ((OPTION_MASK_ISA_SSE2
| OPTION_MASK_ISA_SSE
| OPTION_MASK_ISA_MMX
3535 | TARGET_SUBTARGET64_ISA_DEFAULT
) & ~ix86_isa_flags_explicit
);
3538 warning (0, "%srtd%s is ignored in 64bit mode", prefix
, suffix
);
3542 target_flags
|= TARGET_SUBTARGET32_DEFAULT
& ~target_flags_explicit
;
3544 if (!ix86_arch_specified
)
3546 |= TARGET_SUBTARGET32_ISA_DEFAULT
& ~ix86_isa_flags_explicit
;
3548 /* i386 ABI does not specify red zone. It still makes sense to use it
3549 when programmer takes care to stack from being destroyed. */
3550 if (!(target_flags_explicit
& MASK_NO_RED_ZONE
))
3551 target_flags
|= MASK_NO_RED_ZONE
;
3554 /* Keep nonleaf frame pointers. */
3555 if (flag_omit_frame_pointer
)
3556 target_flags
&= ~MASK_OMIT_LEAF_FRAME_POINTER
;
3557 else if (TARGET_OMIT_LEAF_FRAME_POINTER
)
3558 flag_omit_frame_pointer
= 1;
3560 /* If we're doing fast math, we don't care about comparison order
3561 wrt NaNs. This lets us use a shorter comparison sequence. */
3562 if (flag_finite_math_only
)
3563 target_flags
&= ~MASK_IEEE_FP
;
3565 /* If the architecture always has an FPU, turn off NO_FANCY_MATH_387,
3566 since the insns won't need emulation. */
3567 if (x86_arch_always_fancy_math_387
& ix86_arch_mask
)
3568 target_flags
&= ~MASK_NO_FANCY_MATH_387
;
3570 /* Likewise, if the target doesn't have a 387, or we've specified
3571 software floating point, don't use 387 inline intrinsics. */
3573 target_flags
|= MASK_NO_FANCY_MATH_387
;
3575 /* Turn on MMX builtins for -msse. */
3577 ix86_isa_flags
|= OPTION_MASK_ISA_MMX
& ~ix86_isa_flags_explicit
;
3579 /* Enable SSE prefetch. */
3580 if (TARGET_SSE
|| TARGET_PRFCHW
)
3581 x86_prefetch_sse
= true;
3583 /* Turn on popcnt instruction for -msse4.2 or -mabm. */
3584 if (TARGET_SSE4_2
|| TARGET_ABM
)
3585 ix86_isa_flags
|= OPTION_MASK_ISA_POPCNT
& ~ix86_isa_flags_explicit
;
3587 /* Turn on lzcnt instruction for -mabm. */
3589 ix86_isa_flags
|= OPTION_MASK_ISA_LZCNT
& ~ix86_isa_flags_explicit
;
3591 /* Validate -mpreferred-stack-boundary= value or default it to
3592 PREFERRED_STACK_BOUNDARY_DEFAULT. */
3593 ix86_preferred_stack_boundary
= PREFERRED_STACK_BOUNDARY_DEFAULT
;
3594 if (global_options_set
.x_ix86_preferred_stack_boundary_arg
)
3596 int min
= (TARGET_64BIT
? (TARGET_SSE
? 4 : 3) : 2);
3597 int max
= (TARGET_SEH
? 4 : 12);
3599 if (ix86_preferred_stack_boundary_arg
< min
3600 || ix86_preferred_stack_boundary_arg
> max
)
3603 error ("-mpreferred-stack-boundary is not supported "
3606 error ("-mpreferred-stack-boundary=%d is not between %d and %d",
3607 ix86_preferred_stack_boundary_arg
, min
, max
);
3610 ix86_preferred_stack_boundary
3611 = (1 << ix86_preferred_stack_boundary_arg
) * BITS_PER_UNIT
;
3614 /* Set the default value for -mstackrealign. */
3615 if (ix86_force_align_arg_pointer
== -1)
3616 ix86_force_align_arg_pointer
= STACK_REALIGN_DEFAULT
;
3618 ix86_default_incoming_stack_boundary
= PREFERRED_STACK_BOUNDARY
;
3620 /* Validate -mincoming-stack-boundary= value or default it to
3621 MIN_STACK_BOUNDARY/PREFERRED_STACK_BOUNDARY. */
3622 ix86_incoming_stack_boundary
= ix86_default_incoming_stack_boundary
;
3623 if (global_options_set
.x_ix86_incoming_stack_boundary_arg
)
3625 if (ix86_incoming_stack_boundary_arg
< (TARGET_64BIT
? 4 : 2)
3626 || ix86_incoming_stack_boundary_arg
> 12)
3627 error ("-mincoming-stack-boundary=%d is not between %d and 12",
3628 ix86_incoming_stack_boundary_arg
, TARGET_64BIT
? 4 : 2);
3631 ix86_user_incoming_stack_boundary
3632 = (1 << ix86_incoming_stack_boundary_arg
) * BITS_PER_UNIT
;
3633 ix86_incoming_stack_boundary
3634 = ix86_user_incoming_stack_boundary
;
3638 /* Accept -msseregparm only if at least SSE support is enabled. */
3639 if (TARGET_SSEREGPARM
3641 error ("%ssseregparm%s used without SSE enabled", prefix
, suffix
);
3643 if (global_options_set
.x_ix86_fpmath
)
3645 if (ix86_fpmath
& FPMATH_SSE
)
3649 warning (0, "SSE instruction set disabled, using 387 arithmetics");
3650 ix86_fpmath
= FPMATH_387
;
3652 else if ((ix86_fpmath
& FPMATH_387
) && !TARGET_80387
)
3654 warning (0, "387 instruction set disabled, using SSE arithmetics");
3655 ix86_fpmath
= FPMATH_SSE
;
3660 ix86_fpmath
= TARGET_FPMATH_DEFAULT
;
3662 /* If the i387 is disabled, then do not return values in it. */
3664 target_flags
&= ~MASK_FLOAT_RETURNS
;
3666 /* Use external vectorized library in vectorizing intrinsics. */
3667 if (global_options_set
.x_ix86_veclibabi_type
)
3668 switch (ix86_veclibabi_type
)
3670 case ix86_veclibabi_type_svml
:
3671 ix86_veclib_handler
= ix86_veclibabi_svml
;
3674 case ix86_veclibabi_type_acml
:
3675 ix86_veclib_handler
= ix86_veclibabi_acml
;
3682 if ((!USE_IX86_FRAME_POINTER
3683 || (x86_accumulate_outgoing_args
& ix86_tune_mask
))
3684 && !(target_flags_explicit
& MASK_ACCUMULATE_OUTGOING_ARGS
)
3686 target_flags
|= MASK_ACCUMULATE_OUTGOING_ARGS
;
3688 /* ??? Unwind info is not correct around the CFG unless either a frame
3689 pointer is present or M_A_O_A is set. Fixing this requires rewriting
3690 unwind info generation to be aware of the CFG and propagating states
3692 if ((flag_unwind_tables
|| flag_asynchronous_unwind_tables
3693 || flag_exceptions
|| flag_non_call_exceptions
)
3694 && flag_omit_frame_pointer
3695 && !(target_flags
& MASK_ACCUMULATE_OUTGOING_ARGS
))
3697 if (target_flags_explicit
& MASK_ACCUMULATE_OUTGOING_ARGS
)
3698 warning (0, "unwind tables currently require either a frame pointer "
3699 "or %saccumulate-outgoing-args%s for correctness",
3701 target_flags
|= MASK_ACCUMULATE_OUTGOING_ARGS
;
3704 /* If stack probes are required, the space used for large function
3705 arguments on the stack must also be probed, so enable
3706 -maccumulate-outgoing-args so this happens in the prologue. */
3707 if (TARGET_STACK_PROBE
3708 && !(target_flags
& MASK_ACCUMULATE_OUTGOING_ARGS
))
3710 if (target_flags_explicit
& MASK_ACCUMULATE_OUTGOING_ARGS
)
3711 warning (0, "stack probing requires %saccumulate-outgoing-args%s "
3712 "for correctness", prefix
, suffix
);
3713 target_flags
|= MASK_ACCUMULATE_OUTGOING_ARGS
;
3716 /* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */
3719 ASM_GENERATE_INTERNAL_LABEL (internal_label_prefix
, "LX", 0);
3720 p
= strchr (internal_label_prefix
, 'X');
3721 internal_label_prefix_len
= p
- internal_label_prefix
;
3725 /* When scheduling description is not available, disable scheduler pass
3726 so it won't slow down the compilation and make x87 code slower. */
3727 if (!TARGET_SCHEDULE
)
3728 flag_schedule_insns_after_reload
= flag_schedule_insns
= 0;
3730 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES
,
3731 ix86_tune_cost
->simultaneous_prefetches
,
3732 global_options
.x_param_values
,
3733 global_options_set
.x_param_values
);
3734 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE
,
3735 ix86_tune_cost
->prefetch_block
,
3736 global_options
.x_param_values
,
3737 global_options_set
.x_param_values
);
3738 maybe_set_param_value (PARAM_L1_CACHE_SIZE
,
3739 ix86_tune_cost
->l1_cache_size
,
3740 global_options
.x_param_values
,
3741 global_options_set
.x_param_values
);
3742 maybe_set_param_value (PARAM_L2_CACHE_SIZE
,
3743 ix86_tune_cost
->l2_cache_size
,
3744 global_options
.x_param_values
,
3745 global_options_set
.x_param_values
);
3747 /* Enable sw prefetching at -O3 for CPUS that prefetching is helpful. */
3748 if (flag_prefetch_loop_arrays
< 0
3750 && (optimize
>= 3 || flag_profile_use
)
3751 && TARGET_SOFTWARE_PREFETCHING_BENEFICIAL
)
3752 flag_prefetch_loop_arrays
= 1;
3754 /* If using typedef char *va_list, signal that __builtin_va_start (&ap, 0)
3755 can be optimized to ap = __builtin_next_arg (0). */
3756 if (!TARGET_64BIT
&& !flag_split_stack
)
3757 targetm
.expand_builtin_va_start
= NULL
;
3761 ix86_gen_leave
= gen_leave_rex64
;
3762 if (Pmode
== DImode
)
3764 ix86_gen_monitor
= gen_sse3_monitor64_di
;
3765 ix86_gen_tls_global_dynamic_64
= gen_tls_global_dynamic_64_di
;
3766 ix86_gen_tls_local_dynamic_base_64
3767 = gen_tls_local_dynamic_base_64_di
;
3771 ix86_gen_monitor
= gen_sse3_monitor64_si
;
3772 ix86_gen_tls_global_dynamic_64
= gen_tls_global_dynamic_64_si
;
3773 ix86_gen_tls_local_dynamic_base_64
3774 = gen_tls_local_dynamic_base_64_si
;
3779 ix86_gen_leave
= gen_leave
;
3780 ix86_gen_monitor
= gen_sse3_monitor
;
3783 if (Pmode
== DImode
)
3785 ix86_gen_add3
= gen_adddi3
;
3786 ix86_gen_sub3
= gen_subdi3
;
3787 ix86_gen_sub3_carry
= gen_subdi3_carry
;
3788 ix86_gen_one_cmpl2
= gen_one_cmpldi2
;
3789 ix86_gen_andsp
= gen_anddi3
;
3790 ix86_gen_allocate_stack_worker
= gen_allocate_stack_worker_probe_di
;
3791 ix86_gen_adjust_stack_and_probe
= gen_adjust_stack_and_probedi
;
3792 ix86_gen_probe_stack_range
= gen_probe_stack_rangedi
;
3796 ix86_gen_add3
= gen_addsi3
;
3797 ix86_gen_sub3
= gen_subsi3
;
3798 ix86_gen_sub3_carry
= gen_subsi3_carry
;
3799 ix86_gen_one_cmpl2
= gen_one_cmplsi2
;
3800 ix86_gen_andsp
= gen_andsi3
;
3801 ix86_gen_allocate_stack_worker
= gen_allocate_stack_worker_probe_si
;
3802 ix86_gen_adjust_stack_and_probe
= gen_adjust_stack_and_probesi
;
3803 ix86_gen_probe_stack_range
= gen_probe_stack_rangesi
;
3807 /* Use -mcld by default for 32-bit code if configured with --enable-cld. */
3809 target_flags
|= MASK_CLD
& ~target_flags_explicit
;
3812 if (!TARGET_64BIT
&& flag_pic
)
3814 if (flag_fentry
> 0)
3815 sorry ("-mfentry isn%'t supported for 32-bit in combination "
3819 else if (TARGET_SEH
)
3821 if (flag_fentry
== 0)
3822 sorry ("-mno-fentry isn%'t compatible with SEH");
3825 else if (flag_fentry
< 0)
3827 #if defined(PROFILE_BEFORE_PROLOGUE)
3836 /* When not optimize for size, enable vzeroupper optimization for
3837 TARGET_AVX with -fexpensive-optimizations and split 32-byte
3838 AVX unaligned load/store. */
3841 if (flag_expensive_optimizations
3842 && !(target_flags_explicit
& MASK_VZEROUPPER
))
3843 target_flags
|= MASK_VZEROUPPER
;
3844 if ((x86_avx256_split_unaligned_load
& ix86_tune_mask
)
3845 && !(target_flags_explicit
& MASK_AVX256_SPLIT_UNALIGNED_LOAD
))
3846 target_flags
|= MASK_AVX256_SPLIT_UNALIGNED_LOAD
;
3847 if ((x86_avx256_split_unaligned_store
& ix86_tune_mask
)
3848 && !(target_flags_explicit
& MASK_AVX256_SPLIT_UNALIGNED_STORE
))
3849 target_flags
|= MASK_AVX256_SPLIT_UNALIGNED_STORE
;
3850 /* Enable 128-bit AVX instruction generation
3851 for the auto-vectorizer. */
3852 if (TARGET_AVX128_OPTIMAL
3853 && !(target_flags_explicit
& MASK_PREFER_AVX128
))
3854 target_flags
|= MASK_PREFER_AVX128
;
3859 /* Disable vzeroupper pass if TARGET_AVX is disabled. */
3860 target_flags
&= ~MASK_VZEROUPPER
;
3863 if (ix86_recip_name
)
3865 char *p
= ASTRDUP (ix86_recip_name
);
3867 unsigned int mask
, i
;
3870 while ((q
= strtok (p
, ",")) != NULL
)
3881 if (!strcmp (q
, "default"))
3882 mask
= RECIP_MASK_ALL
;
3885 for (i
= 0; i
< ARRAY_SIZE (recip_options
); i
++)
3886 if (!strcmp (q
, recip_options
[i
].string
))
3888 mask
= recip_options
[i
].mask
;
3892 if (i
== ARRAY_SIZE (recip_options
))
3894 error ("unknown option for -mrecip=%s", q
);
3896 mask
= RECIP_MASK_NONE
;
3900 recip_mask_explicit
|= mask
;
3902 recip_mask
&= ~mask
;
3909 recip_mask
|= RECIP_MASK_ALL
& ~recip_mask_explicit
;
3910 else if (target_flags_explicit
& MASK_RECIP
)
3911 recip_mask
&= ~(RECIP_MASK_ALL
& ~recip_mask_explicit
);
3913 /* Default long double to 64-bit for Bionic. */
3914 if (TARGET_HAS_BIONIC
3915 && !(target_flags_explicit
& MASK_LONG_DOUBLE_64
))
3916 target_flags
|= MASK_LONG_DOUBLE_64
;
3918 /* Save the initial options in case the user does function specific
3921 target_option_default_node
= target_option_current_node
3922 = build_target_option_node ();
3925 /* Implement the TARGET_OPTION_OVERRIDE hook. */
3928 ix86_option_override (void)
3930 static struct register_pass_info insert_vzeroupper_info
3931 = { &pass_insert_vzeroupper
.pass
, "reload",
3932 1, PASS_POS_INSERT_AFTER
3935 ix86_option_override_internal (true);
3938 /* This needs to be done at start up. It's convenient to do it here. */
3939 register_pass (&insert_vzeroupper_info
);
3942 /* Update register usage after having seen the compiler flags. */
3945 ix86_conditional_register_usage (void)
3950 /* The PIC register, if it exists, is fixed. */
3951 j
= PIC_OFFSET_TABLE_REGNUM
;
3952 if (j
!= INVALID_REGNUM
)
3953 fixed_regs
[j
] = call_used_regs
[j
] = 1;
3955 /* For 32-bit targets, squash the REX registers. */
3958 for (i
= FIRST_REX_INT_REG
; i
<= LAST_REX_INT_REG
; i
++)
3959 fixed_regs
[i
] = call_used_regs
[i
] = 1, reg_names
[i
] = "";
3960 for (i
= FIRST_REX_SSE_REG
; i
<= LAST_REX_SSE_REG
; i
++)
3961 fixed_regs
[i
] = call_used_regs
[i
] = 1, reg_names
[i
] = "";
3964 /* See the definition of CALL_USED_REGISTERS in i386.h. */
3965 c_mask
= (TARGET_64BIT_MS_ABI
? (1 << 3)
3966 : TARGET_64BIT
? (1 << 2)
3969 CLEAR_HARD_REG_SET (reg_class_contents
[(int)CLOBBERED_REGS
]);
3971 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
3973 /* Set/reset conditionally defined registers from
3974 CALL_USED_REGISTERS initializer. */
3975 if (call_used_regs
[i
] > 1)
3976 call_used_regs
[i
] = !!(call_used_regs
[i
] & c_mask
);
3978 /* Calculate registers of CLOBBERED_REGS register set
3979 as call used registers from GENERAL_REGS register set. */
3980 if (TEST_HARD_REG_BIT (reg_class_contents
[(int)GENERAL_REGS
], i
)
3981 && call_used_regs
[i
])
3982 SET_HARD_REG_BIT (reg_class_contents
[(int)CLOBBERED_REGS
], i
);
3985 /* If MMX is disabled, squash the registers. */
3987 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
3988 if (TEST_HARD_REG_BIT (reg_class_contents
[(int)MMX_REGS
], i
))
3989 fixed_regs
[i
] = call_used_regs
[i
] = 1, reg_names
[i
] = "";
3991 /* If SSE is disabled, squash the registers. */
3993 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
3994 if (TEST_HARD_REG_BIT (reg_class_contents
[(int)SSE_REGS
], i
))
3995 fixed_regs
[i
] = call_used_regs
[i
] = 1, reg_names
[i
] = "";
3997 /* If the FPU is disabled, squash the registers. */
3998 if (! (TARGET_80387
|| TARGET_FLOAT_RETURNS_IN_80387
))
3999 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
4000 if (TEST_HARD_REG_BIT (reg_class_contents
[(int)FLOAT_REGS
], i
))
4001 fixed_regs
[i
] = call_used_regs
[i
] = 1, reg_names
[i
] = "";
4005 /* Save the current options */
4008 ix86_function_specific_save (struct cl_target_option
*ptr
)
4010 ptr
->arch
= ix86_arch
;
4011 ptr
->schedule
= ix86_schedule
;
4012 ptr
->tune
= ix86_tune
;
4013 ptr
->branch_cost
= ix86_branch_cost
;
4014 ptr
->tune_defaulted
= ix86_tune_defaulted
;
4015 ptr
->arch_specified
= ix86_arch_specified
;
4016 ptr
->x_ix86_isa_flags_explicit
= ix86_isa_flags_explicit
;
4017 ptr
->ix86_target_flags_explicit
= target_flags_explicit
;
4018 ptr
->x_recip_mask_explicit
= recip_mask_explicit
;
4020 /* The fields are char but the variables are not; make sure the
4021 values fit in the fields. */
4022 gcc_assert (ptr
->arch
== ix86_arch
);
4023 gcc_assert (ptr
->schedule
== ix86_schedule
);
4024 gcc_assert (ptr
->tune
== ix86_tune
);
4025 gcc_assert (ptr
->branch_cost
== ix86_branch_cost
);
4028 /* Restore the current options */
4031 ix86_function_specific_restore (struct cl_target_option
*ptr
)
4033 enum processor_type old_tune
= ix86_tune
;
4034 enum processor_type old_arch
= ix86_arch
;
4035 unsigned int ix86_arch_mask
, ix86_tune_mask
;
4038 ix86_arch
= (enum processor_type
) ptr
->arch
;
4039 ix86_schedule
= (enum attr_cpu
) ptr
->schedule
;
4040 ix86_tune
= (enum processor_type
) ptr
->tune
;
4041 ix86_branch_cost
= ptr
->branch_cost
;
4042 ix86_tune_defaulted
= ptr
->tune_defaulted
;
4043 ix86_arch_specified
= ptr
->arch_specified
;
4044 ix86_isa_flags_explicit
= ptr
->x_ix86_isa_flags_explicit
;
4045 target_flags_explicit
= ptr
->ix86_target_flags_explicit
;
4046 recip_mask_explicit
= ptr
->x_recip_mask_explicit
;
4048 /* Recreate the arch feature tests if the arch changed */
4049 if (old_arch
!= ix86_arch
)
4051 ix86_arch_mask
= 1u << ix86_arch
;
4052 for (i
= 0; i
< X86_ARCH_LAST
; ++i
)
4053 ix86_arch_features
[i
]
4054 = !!(initial_ix86_arch_features
[i
] & ix86_arch_mask
);
4057 /* Recreate the tune optimization tests */
4058 if (old_tune
!= ix86_tune
)
4060 ix86_tune_mask
= 1u << ix86_tune
;
4061 for (i
= 0; i
< X86_TUNE_LAST
; ++i
)
4062 ix86_tune_features
[i
]
4063 = !!(initial_ix86_tune_features
[i
] & ix86_tune_mask
);
4067 /* Print the current options */
4070 ix86_function_specific_print (FILE *file
, int indent
,
4071 struct cl_target_option
*ptr
)
4074 = ix86_target_string (ptr
->x_ix86_isa_flags
, ptr
->x_target_flags
,
4075 NULL
, NULL
, ptr
->x_ix86_fpmath
, false);
4077 fprintf (file
, "%*sarch = %d (%s)\n",
4080 ((ptr
->arch
< TARGET_CPU_DEFAULT_max
)
4081 ? cpu_names
[ptr
->arch
]
4084 fprintf (file
, "%*stune = %d (%s)\n",
4087 ((ptr
->tune
< TARGET_CPU_DEFAULT_max
)
4088 ? cpu_names
[ptr
->tune
]
4091 fprintf (file
, "%*sbranch_cost = %d\n", indent
, "", ptr
->branch_cost
);
4095 fprintf (file
, "%*s%s\n", indent
, "", target_string
);
4096 free (target_string
);
4101 /* Inner function to process the attribute((target(...))), take an argument and
4102 set the current options from the argument. If we have a list, recursively go
4106 ix86_valid_target_attribute_inner_p (tree args
, char *p_strings
[],
4107 struct gcc_options
*enum_opts_set
)
4112 #define IX86_ATTR_ISA(S,O) { S, sizeof (S)-1, ix86_opt_isa, O, 0 }
4113 #define IX86_ATTR_STR(S,O) { S, sizeof (S)-1, ix86_opt_str, O, 0 }
4114 #define IX86_ATTR_ENUM(S,O) { S, sizeof (S)-1, ix86_opt_enum, O, 0 }
4115 #define IX86_ATTR_YES(S,O,M) { S, sizeof (S)-1, ix86_opt_yes, O, M }
4116 #define IX86_ATTR_NO(S,O,M) { S, sizeof (S)-1, ix86_opt_no, O, M }
4132 enum ix86_opt_type type
;
4137 IX86_ATTR_ISA ("3dnow", OPT_m3dnow
),
4138 IX86_ATTR_ISA ("abm", OPT_mabm
),
4139 IX86_ATTR_ISA ("bmi", OPT_mbmi
),
4140 IX86_ATTR_ISA ("bmi2", OPT_mbmi2
),
4141 IX86_ATTR_ISA ("lzcnt", OPT_mlzcnt
),
4142 IX86_ATTR_ISA ("tbm", OPT_mtbm
),
4143 IX86_ATTR_ISA ("aes", OPT_maes
),
4144 IX86_ATTR_ISA ("avx", OPT_mavx
),
4145 IX86_ATTR_ISA ("avx2", OPT_mavx2
),
4146 IX86_ATTR_ISA ("mmx", OPT_mmmx
),
4147 IX86_ATTR_ISA ("pclmul", OPT_mpclmul
),
4148 IX86_ATTR_ISA ("popcnt", OPT_mpopcnt
),
4149 IX86_ATTR_ISA ("sse", OPT_msse
),
4150 IX86_ATTR_ISA ("sse2", OPT_msse2
),
4151 IX86_ATTR_ISA ("sse3", OPT_msse3
),
4152 IX86_ATTR_ISA ("sse4", OPT_msse4
),
4153 IX86_ATTR_ISA ("sse4.1", OPT_msse4_1
),
4154 IX86_ATTR_ISA ("sse4.2", OPT_msse4_2
),
4155 IX86_ATTR_ISA ("sse4a", OPT_msse4a
),
4156 IX86_ATTR_ISA ("ssse3", OPT_mssse3
),
4157 IX86_ATTR_ISA ("fma4", OPT_mfma4
),
4158 IX86_ATTR_ISA ("fma", OPT_mfma
),
4159 IX86_ATTR_ISA ("xop", OPT_mxop
),
4160 IX86_ATTR_ISA ("lwp", OPT_mlwp
),
4161 IX86_ATTR_ISA ("fsgsbase", OPT_mfsgsbase
),
4162 IX86_ATTR_ISA ("rdrnd", OPT_mrdrnd
),
4163 IX86_ATTR_ISA ("f16c", OPT_mf16c
),
4164 IX86_ATTR_ISA ("rtm", OPT_mrtm
),
4165 IX86_ATTR_ISA ("hle", OPT_mhle
),
4166 IX86_ATTR_ISA ("prfchw", OPT_mprfchw
),
4167 IX86_ATTR_ISA ("rdseed", OPT_mrdseed
),
4168 IX86_ATTR_ISA ("adx", OPT_madx
),
4169 IX86_ATTR_ISA ("fxsr", OPT_mfxsr
),
4170 IX86_ATTR_ISA ("xsave", OPT_mxsave
),
4171 IX86_ATTR_ISA ("xsaveopt", OPT_mxsaveopt
),
4174 IX86_ATTR_ENUM ("fpmath=", OPT_mfpmath_
),
4176 /* string options */
4177 IX86_ATTR_STR ("arch=", IX86_FUNCTION_SPECIFIC_ARCH
),
4178 IX86_ATTR_STR ("tune=", IX86_FUNCTION_SPECIFIC_TUNE
),
4181 IX86_ATTR_YES ("cld",
4185 IX86_ATTR_NO ("fancy-math-387",
4186 OPT_mfancy_math_387
,
4187 MASK_NO_FANCY_MATH_387
),
4189 IX86_ATTR_YES ("ieee-fp",
4193 IX86_ATTR_YES ("inline-all-stringops",
4194 OPT_minline_all_stringops
,
4195 MASK_INLINE_ALL_STRINGOPS
),
4197 IX86_ATTR_YES ("inline-stringops-dynamically",
4198 OPT_minline_stringops_dynamically
,
4199 MASK_INLINE_STRINGOPS_DYNAMICALLY
),
4201 IX86_ATTR_NO ("align-stringops",
4202 OPT_mno_align_stringops
,
4203 MASK_NO_ALIGN_STRINGOPS
),
4205 IX86_ATTR_YES ("recip",
4211 /* If this is a list, recurse to get the options. */
4212 if (TREE_CODE (args
) == TREE_LIST
)
4216 for (; args
; args
= TREE_CHAIN (args
))
4217 if (TREE_VALUE (args
)
4218 && !ix86_valid_target_attribute_inner_p (TREE_VALUE (args
),
4219 p_strings
, enum_opts_set
))
4225 else if (TREE_CODE (args
) != STRING_CST
)
4228 /* Handle multiple arguments separated by commas. */
4229 next_optstr
= ASTRDUP (TREE_STRING_POINTER (args
));
4231 while (next_optstr
&& *next_optstr
!= '\0')
4233 char *p
= next_optstr
;
4235 char *comma
= strchr (next_optstr
, ',');
4236 const char *opt_string
;
4237 size_t len
, opt_len
;
4242 enum ix86_opt_type type
= ix86_opt_unknown
;
4248 len
= comma
- next_optstr
;
4249 next_optstr
= comma
+ 1;
4257 /* Recognize no-xxx. */
4258 if (len
> 3 && p
[0] == 'n' && p
[1] == 'o' && p
[2] == '-')
4267 /* Find the option. */
4270 for (i
= 0; i
< ARRAY_SIZE (attrs
); i
++)
4272 type
= attrs
[i
].type
;
4273 opt_len
= attrs
[i
].len
;
4274 if (ch
== attrs
[i
].string
[0]
4275 && ((type
!= ix86_opt_str
&& type
!= ix86_opt_enum
)
4278 && memcmp (p
, attrs
[i
].string
, opt_len
) == 0)
4281 mask
= attrs
[i
].mask
;
4282 opt_string
= attrs
[i
].string
;
4287 /* Process the option. */
4290 error ("attribute(target(\"%s\")) is unknown", orig_p
);
4294 else if (type
== ix86_opt_isa
)
4296 struct cl_decoded_option decoded
;
4298 generate_option (opt
, NULL
, opt_set_p
, CL_TARGET
, &decoded
);
4299 ix86_handle_option (&global_options
, &global_options_set
,
4300 &decoded
, input_location
);
4303 else if (type
== ix86_opt_yes
|| type
== ix86_opt_no
)
4305 if (type
== ix86_opt_no
)
4306 opt_set_p
= !opt_set_p
;
4309 target_flags
|= mask
;
4311 target_flags
&= ~mask
;
4314 else if (type
== ix86_opt_str
)
4318 error ("option(\"%s\") was already specified", opt_string
);
4322 p_strings
[opt
] = xstrdup (p
+ opt_len
);
4325 else if (type
== ix86_opt_enum
)
4330 arg_ok
= opt_enum_arg_to_value (opt
, p
+ opt_len
, &value
, CL_TARGET
);
4332 set_option (&global_options
, enum_opts_set
, opt
, value
,
4333 p
+ opt_len
, DK_UNSPECIFIED
, input_location
,
4337 error ("attribute(target(\"%s\")) is unknown", orig_p
);
4349 /* Return a TARGET_OPTION_NODE tree of the target options listed or NULL. */
4352 ix86_valid_target_attribute_tree (tree args
)
4354 const char *orig_arch_string
= ix86_arch_string
;
4355 const char *orig_tune_string
= ix86_tune_string
;
4356 enum fpmath_unit orig_fpmath_set
= global_options_set
.x_ix86_fpmath
;
4357 int orig_tune_defaulted
= ix86_tune_defaulted
;
4358 int orig_arch_specified
= ix86_arch_specified
;
4359 char *option_strings
[IX86_FUNCTION_SPECIFIC_MAX
] = { NULL
, NULL
};
4362 struct cl_target_option
*def
4363 = TREE_TARGET_OPTION (target_option_default_node
);
4364 struct gcc_options enum_opts_set
;
4366 memset (&enum_opts_set
, 0, sizeof (enum_opts_set
));
4368 /* Process each of the options on the chain. */
4369 if (! ix86_valid_target_attribute_inner_p (args
, option_strings
,
4373 /* If the changed options are different from the default, rerun
4374 ix86_option_override_internal, and then save the options away.
4375 The string options are are attribute options, and will be undone
4376 when we copy the save structure. */
4377 if (ix86_isa_flags
!= def
->x_ix86_isa_flags
4378 || target_flags
!= def
->x_target_flags
4379 || option_strings
[IX86_FUNCTION_SPECIFIC_ARCH
]
4380 || option_strings
[IX86_FUNCTION_SPECIFIC_TUNE
]
4381 || enum_opts_set
.x_ix86_fpmath
)
4383 /* If we are using the default tune= or arch=, undo the string assigned,
4384 and use the default. */
4385 if (option_strings
[IX86_FUNCTION_SPECIFIC_ARCH
])
4386 ix86_arch_string
= option_strings
[IX86_FUNCTION_SPECIFIC_ARCH
];
4387 else if (!orig_arch_specified
)
4388 ix86_arch_string
= NULL
;
4390 if (option_strings
[IX86_FUNCTION_SPECIFIC_TUNE
])
4391 ix86_tune_string
= option_strings
[IX86_FUNCTION_SPECIFIC_TUNE
];
4392 else if (orig_tune_defaulted
)
4393 ix86_tune_string
= NULL
;
4395 /* If fpmath= is not set, and we now have sse2 on 32-bit, use it. */
4396 if (enum_opts_set
.x_ix86_fpmath
)
4397 global_options_set
.x_ix86_fpmath
= (enum fpmath_unit
) 1;
4398 else if (!TARGET_64BIT
&& TARGET_SSE
)
4400 ix86_fpmath
= (enum fpmath_unit
) (FPMATH_SSE
| FPMATH_387
);
4401 global_options_set
.x_ix86_fpmath
= (enum fpmath_unit
) 1;
4404 /* Do any overrides, such as arch=xxx, or tune=xxx support. */
4405 ix86_option_override_internal (false);
4407 /* Add any builtin functions with the new isa if any. */
4408 ix86_add_new_builtins (ix86_isa_flags
);
4410 /* Save the current options unless we are validating options for
4412 t
= build_target_option_node ();
4414 ix86_arch_string
= orig_arch_string
;
4415 ix86_tune_string
= orig_tune_string
;
4416 global_options_set
.x_ix86_fpmath
= orig_fpmath_set
;
4418 /* Free up memory allocated to hold the strings */
4419 for (i
= 0; i
< IX86_FUNCTION_SPECIFIC_MAX
; i
++)
4420 free (option_strings
[i
]);
4426 /* Hook to validate attribute((target("string"))). */
4429 ix86_valid_target_attribute_p (tree fndecl
,
4430 tree
ARG_UNUSED (name
),
4432 int ARG_UNUSED (flags
))
4434 struct cl_target_option cur_target
;
4436 tree old_optimize
= build_optimization_node ();
4437 tree new_target
, new_optimize
;
4438 tree func_optimize
= DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl
);
4440 /* If the function changed the optimization levels as well as setting target
4441 options, start with the optimizations specified. */
4442 if (func_optimize
&& func_optimize
!= old_optimize
)
4443 cl_optimization_restore (&global_options
,
4444 TREE_OPTIMIZATION (func_optimize
));
4446 /* The target attributes may also change some optimization flags, so update
4447 the optimization options if necessary. */
4448 cl_target_option_save (&cur_target
, &global_options
);
4449 new_target
= ix86_valid_target_attribute_tree (args
);
4450 new_optimize
= build_optimization_node ();
4457 DECL_FUNCTION_SPECIFIC_TARGET (fndecl
) = new_target
;
4459 if (old_optimize
!= new_optimize
)
4460 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl
) = new_optimize
;
4463 cl_target_option_restore (&global_options
, &cur_target
);
4465 if (old_optimize
!= new_optimize
)
4466 cl_optimization_restore (&global_options
,
4467 TREE_OPTIMIZATION (old_optimize
));
4473 /* Hook to determine if one function can safely inline another. */
4476 ix86_can_inline_p (tree caller
, tree callee
)
4479 tree caller_tree
= DECL_FUNCTION_SPECIFIC_TARGET (caller
);
4480 tree callee_tree
= DECL_FUNCTION_SPECIFIC_TARGET (callee
);
4482 /* If callee has no option attributes, then it is ok to inline. */
4486 /* If caller has no option attributes, but callee does then it is not ok to
4488 else if (!caller_tree
)
4493 struct cl_target_option
*caller_opts
= TREE_TARGET_OPTION (caller_tree
);
4494 struct cl_target_option
*callee_opts
= TREE_TARGET_OPTION (callee_tree
);
4496 /* Callee's isa options should a subset of the caller's, i.e. a SSE4 function
4497 can inline a SSE2 function but a SSE2 function can't inline a SSE4
4499 if ((caller_opts
->x_ix86_isa_flags
& callee_opts
->x_ix86_isa_flags
)
4500 != callee_opts
->x_ix86_isa_flags
)
4503 /* See if we have the same non-isa options. */
4504 else if (caller_opts
->x_target_flags
!= callee_opts
->x_target_flags
)
4507 /* See if arch, tune, etc. are the same. */
4508 else if (caller_opts
->arch
!= callee_opts
->arch
)
4511 else if (caller_opts
->tune
!= callee_opts
->tune
)
4514 else if (caller_opts
->x_ix86_fpmath
!= callee_opts
->x_ix86_fpmath
)
4517 else if (caller_opts
->branch_cost
!= callee_opts
->branch_cost
)
4528 /* Remember the last target of ix86_set_current_function. */
4529 static GTY(()) tree ix86_previous_fndecl
;
4531 /* Establish appropriate back-end context for processing the function
4532 FNDECL. The argument might be NULL to indicate processing at top
4533 level, outside of any function scope. */
4535 ix86_set_current_function (tree fndecl
)
4537 /* Only change the context if the function changes. This hook is called
4538 several times in the course of compiling a function, and we don't want to
4539 slow things down too much or call target_reinit when it isn't safe. */
4540 if (fndecl
&& fndecl
!= ix86_previous_fndecl
)
4542 tree old_tree
= (ix86_previous_fndecl
4543 ? DECL_FUNCTION_SPECIFIC_TARGET (ix86_previous_fndecl
)
4546 tree new_tree
= (fndecl
4547 ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl
)
4550 ix86_previous_fndecl
= fndecl
;
4551 if (old_tree
== new_tree
)
4556 cl_target_option_restore (&global_options
,
4557 TREE_TARGET_OPTION (new_tree
));
4563 struct cl_target_option
*def
4564 = TREE_TARGET_OPTION (target_option_current_node
);
4566 cl_target_option_restore (&global_options
, def
);
4573 /* Return true if this goes in large data/bss. */
4576 ix86_in_large_data_p (tree exp
)
4578 if (ix86_cmodel
!= CM_MEDIUM
&& ix86_cmodel
!= CM_MEDIUM_PIC
)
4581 /* Functions are never large data. */
4582 if (TREE_CODE (exp
) == FUNCTION_DECL
)
4585 if (TREE_CODE (exp
) == VAR_DECL
&& DECL_SECTION_NAME (exp
))
4587 const char *section
= TREE_STRING_POINTER (DECL_SECTION_NAME (exp
));
4588 if (strcmp (section
, ".ldata") == 0
4589 || strcmp (section
, ".lbss") == 0)
4595 HOST_WIDE_INT size
= int_size_in_bytes (TREE_TYPE (exp
));
4597 /* If this is an incomplete type with size 0, then we can't put it
4598 in data because it might be too big when completed. */
4599 if (!size
|| size
> ix86_section_threshold
)
4606 /* Switch to the appropriate section for output of DECL.
4607 DECL is either a `VAR_DECL' node or a constant of some sort.
4608 RELOC indicates whether forming the initial value of DECL requires
4609 link-time relocations. */
4611 static section
* x86_64_elf_select_section (tree
, int, unsigned HOST_WIDE_INT
)
4615 x86_64_elf_select_section (tree decl
, int reloc
,
4616 unsigned HOST_WIDE_INT align
)
4618 if ((ix86_cmodel
== CM_MEDIUM
|| ix86_cmodel
== CM_MEDIUM_PIC
)
4619 && ix86_in_large_data_p (decl
))
4621 const char *sname
= NULL
;
4622 unsigned int flags
= SECTION_WRITE
;
4623 switch (categorize_decl_for_section (decl
, reloc
))
4628 case SECCAT_DATA_REL
:
4629 sname
= ".ldata.rel";
4631 case SECCAT_DATA_REL_LOCAL
:
4632 sname
= ".ldata.rel.local";
4634 case SECCAT_DATA_REL_RO
:
4635 sname
= ".ldata.rel.ro";
4637 case SECCAT_DATA_REL_RO_LOCAL
:
4638 sname
= ".ldata.rel.ro.local";
4642 flags
|= SECTION_BSS
;
4645 case SECCAT_RODATA_MERGE_STR
:
4646 case SECCAT_RODATA_MERGE_STR_INIT
:
4647 case SECCAT_RODATA_MERGE_CONST
:
4651 case SECCAT_SRODATA
:
4658 /* We don't split these for medium model. Place them into
4659 default sections and hope for best. */
4664 /* We might get called with string constants, but get_named_section
4665 doesn't like them as they are not DECLs. Also, we need to set
4666 flags in that case. */
4668 return get_section (sname
, flags
, NULL
);
4669 return get_named_section (decl
, sname
, reloc
);
4672 return default_elf_select_section (decl
, reloc
, align
);
4675 /* Build up a unique section name, expressed as a
4676 STRING_CST node, and assign it to DECL_SECTION_NAME (decl).
4677 RELOC indicates whether the initial value of EXP requires
4678 link-time relocations. */
4680 static void ATTRIBUTE_UNUSED
4681 x86_64_elf_unique_section (tree decl
, int reloc
)
4683 if ((ix86_cmodel
== CM_MEDIUM
|| ix86_cmodel
== CM_MEDIUM_PIC
)
4684 && ix86_in_large_data_p (decl
))
4686 const char *prefix
= NULL
;
4687 /* We only need to use .gnu.linkonce if we don't have COMDAT groups. */
4688 bool one_only
= DECL_ONE_ONLY (decl
) && !HAVE_COMDAT_GROUP
;
4690 switch (categorize_decl_for_section (decl
, reloc
))
4693 case SECCAT_DATA_REL
:
4694 case SECCAT_DATA_REL_LOCAL
:
4695 case SECCAT_DATA_REL_RO
:
4696 case SECCAT_DATA_REL_RO_LOCAL
:
4697 prefix
= one_only
? ".ld" : ".ldata";
4700 prefix
= one_only
? ".lb" : ".lbss";
4703 case SECCAT_RODATA_MERGE_STR
:
4704 case SECCAT_RODATA_MERGE_STR_INIT
:
4705 case SECCAT_RODATA_MERGE_CONST
:
4706 prefix
= one_only
? ".lr" : ".lrodata";
4708 case SECCAT_SRODATA
:
4715 /* We don't split these for medium model. Place them into
4716 default sections and hope for best. */
4721 const char *name
, *linkonce
;
4724 name
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
));
4725 name
= targetm
.strip_name_encoding (name
);
4727 /* If we're using one_only, then there needs to be a .gnu.linkonce
4728 prefix to the section name. */
4729 linkonce
= one_only
? ".gnu.linkonce" : "";
4731 string
= ACONCAT ((linkonce
, prefix
, ".", name
, NULL
));
4733 DECL_SECTION_NAME (decl
) = build_string (strlen (string
), string
);
4737 default_unique_section (decl
, reloc
);
4740 #ifdef COMMON_ASM_OP
4741 /* This says how to output assembler code to declare an
4742 uninitialized external linkage data object.
4744 For medium model x86-64 we need to use .largecomm opcode for
4747 x86_elf_aligned_common (FILE *file
,
4748 const char *name
, unsigned HOST_WIDE_INT size
,
4751 if ((ix86_cmodel
== CM_MEDIUM
|| ix86_cmodel
== CM_MEDIUM_PIC
)
4752 && size
> (unsigned int)ix86_section_threshold
)
4753 fputs (".largecomm\t", file
);
4755 fputs (COMMON_ASM_OP
, file
);
4756 assemble_name (file
, name
);
4757 fprintf (file
, "," HOST_WIDE_INT_PRINT_UNSIGNED
",%u\n",
4758 size
, align
/ BITS_PER_UNIT
);
4762 /* Utility function for targets to use in implementing
4763 ASM_OUTPUT_ALIGNED_BSS. */
4766 x86_output_aligned_bss (FILE *file
, tree decl ATTRIBUTE_UNUSED
,
4767 const char *name
, unsigned HOST_WIDE_INT size
,
4770 if ((ix86_cmodel
== CM_MEDIUM
|| ix86_cmodel
== CM_MEDIUM_PIC
)
4771 && size
> (unsigned int)ix86_section_threshold
)
4772 switch_to_section (get_named_section (decl
, ".lbss", 0));
4774 switch_to_section (bss_section
);
4775 ASM_OUTPUT_ALIGN (file
, floor_log2 (align
/ BITS_PER_UNIT
));
4776 #ifdef ASM_DECLARE_OBJECT_NAME
4777 last_assemble_variable_decl
= decl
;
4778 ASM_DECLARE_OBJECT_NAME (file
, name
, decl
);
4780 /* Standard thing is just output label for the object. */
4781 ASM_OUTPUT_LABEL (file
, name
);
4782 #endif /* ASM_DECLARE_OBJECT_NAME */
4783 ASM_OUTPUT_SKIP (file
, size
? size
: 1);
4786 /* Decide whether we must probe the stack before any space allocation
4787 on this target. It's essentially TARGET_STACK_PROBE except when
4788 -fstack-check causes the stack to be already probed differently. */
4791 ix86_target_stack_probe (void)
4793 /* Do not probe the stack twice if static stack checking is enabled. */
4794 if (flag_stack_check
== STATIC_BUILTIN_STACK_CHECK
)
4797 return TARGET_STACK_PROBE
;
4800 /* Decide whether we can make a sibling call to a function. DECL is the
4801 declaration of the function being targeted by the call and EXP is the
4802 CALL_EXPR representing the call. */
4805 ix86_function_ok_for_sibcall (tree decl
, tree exp
)
4807 tree type
, decl_or_type
;
4810 /* If we are generating position-independent code, we cannot sibcall
4811 optimize any indirect call, or a direct call to a global function,
4812 as the PLT requires %ebx be live. (Darwin does not have a PLT.) */
4816 && (!decl
|| !targetm
.binds_local_p (decl
)))
4819 /* If we need to align the outgoing stack, then sibcalling would
4820 unalign the stack, which may break the called function. */
4821 if (ix86_minimum_incoming_stack_boundary (true)
4822 < PREFERRED_STACK_BOUNDARY
)
4827 decl_or_type
= decl
;
4828 type
= TREE_TYPE (decl
);
4832 /* We're looking at the CALL_EXPR, we need the type of the function. */
4833 type
= CALL_EXPR_FN (exp
); /* pointer expression */
4834 type
= TREE_TYPE (type
); /* pointer type */
4835 type
= TREE_TYPE (type
); /* function type */
4836 decl_or_type
= type
;
4839 /* Check that the return value locations are the same. Like
4840 if we are returning floats on the 80387 register stack, we cannot
4841 make a sibcall from a function that doesn't return a float to a
4842 function that does or, conversely, from a function that does return
4843 a float to a function that doesn't; the necessary stack adjustment
4844 would not be executed. This is also the place we notice
4845 differences in the return value ABI. Note that it is ok for one
4846 of the functions to have void return type as long as the return
4847 value of the other is passed in a register. */
4848 a
= ix86_function_value (TREE_TYPE (exp
), decl_or_type
, false);
4849 b
= ix86_function_value (TREE_TYPE (DECL_RESULT (cfun
->decl
)),
4851 if (STACK_REG_P (a
) || STACK_REG_P (b
))
4853 if (!rtx_equal_p (a
, b
))
4856 else if (VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun
->decl
))))
4858 else if (!rtx_equal_p (a
, b
))
4863 /* The SYSV ABI has more call-clobbered registers;
4864 disallow sibcalls from MS to SYSV. */
4865 if (cfun
->machine
->call_abi
== MS_ABI
4866 && ix86_function_type_abi (type
) == SYSV_ABI
)
4871 /* If this call is indirect, we'll need to be able to use a
4872 call-clobbered register for the address of the target function.
4873 Make sure that all such registers are not used for passing
4874 parameters. Note that DLLIMPORT functions are indirect. */
4876 || (TARGET_DLLIMPORT_DECL_ATTRIBUTES
&& DECL_DLLIMPORT_P (decl
)))
4878 if (ix86_function_regparm (type
, NULL
) >= 3)
4880 /* ??? Need to count the actual number of registers to be used,
4881 not the possible number of registers. Fix later. */
4887 /* Otherwise okay. That also includes certain types of indirect calls. */
4891 /* Handle "cdecl", "stdcall", "fastcall", "regparm", "thiscall",
4892 and "sseregparm" calling convention attributes;
4893 arguments as in struct attribute_spec.handler. */
4896 ix86_handle_cconv_attribute (tree
*node
, tree name
,
4898 int flags ATTRIBUTE_UNUSED
,
4901 if (TREE_CODE (*node
) != FUNCTION_TYPE
4902 && TREE_CODE (*node
) != METHOD_TYPE
4903 && TREE_CODE (*node
) != FIELD_DECL
4904 && TREE_CODE (*node
) != TYPE_DECL
)
4906 warning (OPT_Wattributes
, "%qE attribute only applies to functions",
4908 *no_add_attrs
= true;
4912 /* Can combine regparm with all attributes but fastcall, and thiscall. */
4913 if (is_attribute_p ("regparm", name
))
4917 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node
)))
4919 error ("fastcall and regparm attributes are not compatible");
4922 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node
)))
4924 error ("regparam and thiscall attributes are not compatible");
4927 cst
= TREE_VALUE (args
);
4928 if (TREE_CODE (cst
) != INTEGER_CST
)
4930 warning (OPT_Wattributes
,
4931 "%qE attribute requires an integer constant argument",
4933 *no_add_attrs
= true;
4935 else if (compare_tree_int (cst
, REGPARM_MAX
) > 0)
4937 warning (OPT_Wattributes
, "argument to %qE attribute larger than %d",
4939 *no_add_attrs
= true;
4947 /* Do not warn when emulating the MS ABI. */
4948 if ((TREE_CODE (*node
) != FUNCTION_TYPE
4949 && TREE_CODE (*node
) != METHOD_TYPE
)
4950 || ix86_function_type_abi (*node
) != MS_ABI
)
4951 warning (OPT_Wattributes
, "%qE attribute ignored",
4953 *no_add_attrs
= true;
4957 /* Can combine fastcall with stdcall (redundant) and sseregparm. */
4958 if (is_attribute_p ("fastcall", name
))
4960 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node
)))
4962 error ("fastcall and cdecl attributes are not compatible");
4964 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node
)))
4966 error ("fastcall and stdcall attributes are not compatible");
4968 if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node
)))
4970 error ("fastcall and regparm attributes are not compatible");
4972 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node
)))
4974 error ("fastcall and thiscall attributes are not compatible");
4978 /* Can combine stdcall with fastcall (redundant), regparm and
4980 else if (is_attribute_p ("stdcall", name
))
4982 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node
)))
4984 error ("stdcall and cdecl attributes are not compatible");
4986 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node
)))
4988 error ("stdcall and fastcall attributes are not compatible");
4990 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node
)))
4992 error ("stdcall and thiscall attributes are not compatible");
4996 /* Can combine cdecl with regparm and sseregparm. */
4997 else if (is_attribute_p ("cdecl", name
))
4999 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node
)))
5001 error ("stdcall and cdecl attributes are not compatible");
5003 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node
)))
5005 error ("fastcall and cdecl attributes are not compatible");
5007 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node
)))
5009 error ("cdecl and thiscall attributes are not compatible");
5012 else if (is_attribute_p ("thiscall", name
))
5014 if (TREE_CODE (*node
) != METHOD_TYPE
&& pedantic
)
5015 warning (OPT_Wattributes
, "%qE attribute is used for none class-method",
5017 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node
)))
5019 error ("stdcall and thiscall attributes are not compatible");
5021 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node
)))
5023 error ("fastcall and thiscall attributes are not compatible");
5025 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node
)))
5027 error ("cdecl and thiscall attributes are not compatible");
5031 /* Can combine sseregparm with all attributes. */
5036 /* The transactional memory builtins are implicitly regparm or fastcall
5037 depending on the ABI. Override the generic do-nothing attribute that
5038 these builtins were declared with, and replace it with one of the two
5039 attributes that we expect elsewhere. */
5042 ix86_handle_tm_regparm_attribute (tree
*node
, tree name ATTRIBUTE_UNUSED
,
5043 tree args ATTRIBUTE_UNUSED
,
5044 int flags ATTRIBUTE_UNUSED
,
5049 /* In no case do we want to add the placeholder attribute. */
5050 *no_add_attrs
= true;
5052 /* The 64-bit ABI is unchanged for transactional memory. */
5056 /* ??? Is there a better way to validate 32-bit windows? We have
5057 cfun->machine->call_abi, but that seems to be set only for 64-bit. */
5058 if (CHECK_STACK_LIMIT
> 0)
5059 alt
= tree_cons (get_identifier ("fastcall"), NULL
, NULL
);
5062 alt
= tree_cons (NULL
, build_int_cst (NULL
, 2), NULL
);
5063 alt
= tree_cons (get_identifier ("regparm"), alt
, NULL
);
5065 decl_attributes (node
, alt
, flags
);
5070 /* This function determines from TYPE the calling-convention. */
5073 ix86_get_callcvt (const_tree type
)
5075 unsigned int ret
= 0;
5080 return IX86_CALLCVT_CDECL
;
5082 attrs
= TYPE_ATTRIBUTES (type
);
5083 if (attrs
!= NULL_TREE
)
5085 if (lookup_attribute ("cdecl", attrs
))
5086 ret
|= IX86_CALLCVT_CDECL
;
5087 else if (lookup_attribute ("stdcall", attrs
))
5088 ret
|= IX86_CALLCVT_STDCALL
;
5089 else if (lookup_attribute ("fastcall", attrs
))
5090 ret
|= IX86_CALLCVT_FASTCALL
;
5091 else if (lookup_attribute ("thiscall", attrs
))
5092 ret
|= IX86_CALLCVT_THISCALL
;
5094 /* Regparam isn't allowed for thiscall and fastcall. */
5095 if ((ret
& (IX86_CALLCVT_THISCALL
| IX86_CALLCVT_FASTCALL
)) == 0)
5097 if (lookup_attribute ("regparm", attrs
))
5098 ret
|= IX86_CALLCVT_REGPARM
;
5099 if (lookup_attribute ("sseregparm", attrs
))
5100 ret
|= IX86_CALLCVT_SSEREGPARM
;
5103 if (IX86_BASE_CALLCVT(ret
) != 0)
5107 is_stdarg
= stdarg_p (type
);
5108 if (TARGET_RTD
&& !is_stdarg
)
5109 return IX86_CALLCVT_STDCALL
| ret
;
5113 || TREE_CODE (type
) != METHOD_TYPE
5114 || ix86_function_type_abi (type
) != MS_ABI
)
5115 return IX86_CALLCVT_CDECL
| ret
;
5117 return IX86_CALLCVT_THISCALL
;
5120 /* Return 0 if the attributes for two types are incompatible, 1 if they
5121 are compatible, and 2 if they are nearly compatible (which causes a
5122 warning to be generated). */
5125 ix86_comp_type_attributes (const_tree type1
, const_tree type2
)
5127 unsigned int ccvt1
, ccvt2
;
5129 if (TREE_CODE (type1
) != FUNCTION_TYPE
5130 && TREE_CODE (type1
) != METHOD_TYPE
)
5133 ccvt1
= ix86_get_callcvt (type1
);
5134 ccvt2
= ix86_get_callcvt (type2
);
5137 if (ix86_function_regparm (type1
, NULL
)
5138 != ix86_function_regparm (type2
, NULL
))
5144 /* Return the regparm value for a function with the indicated TYPE and DECL.
5145 DECL may be NULL when calling function indirectly
5146 or considering a libcall. */
5149 ix86_function_regparm (const_tree type
, const_tree decl
)
5156 return (ix86_function_type_abi (type
) == SYSV_ABI
5157 ? X86_64_REGPARM_MAX
: X86_64_MS_REGPARM_MAX
);
5158 ccvt
= ix86_get_callcvt (type
);
5159 regparm
= ix86_regparm
;
5161 if ((ccvt
& IX86_CALLCVT_REGPARM
) != 0)
5163 attr
= lookup_attribute ("regparm", TYPE_ATTRIBUTES (type
));
5166 regparm
= TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr
)));
5170 else if ((ccvt
& IX86_CALLCVT_FASTCALL
) != 0)
5172 else if ((ccvt
& IX86_CALLCVT_THISCALL
) != 0)
5175 /* Use register calling convention for local functions when possible. */
5177 && TREE_CODE (decl
) == FUNCTION_DECL
5179 && !(profile_flag
&& !flag_fentry
))
5181 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
5182 struct cgraph_local_info
*i
= cgraph_local_info (CONST_CAST_TREE (decl
));
5183 if (i
&& i
->local
&& i
->can_change_signature
)
5185 int local_regparm
, globals
= 0, regno
;
5187 /* Make sure no regparm register is taken by a
5188 fixed register variable. */
5189 for (local_regparm
= 0; local_regparm
< REGPARM_MAX
; local_regparm
++)
5190 if (fixed_regs
[local_regparm
])
5193 /* We don't want to use regparm(3) for nested functions as
5194 these use a static chain pointer in the third argument. */
5195 if (local_regparm
== 3 && DECL_STATIC_CHAIN (decl
))
5198 /* In 32-bit mode save a register for the split stack. */
5199 if (!TARGET_64BIT
&& local_regparm
== 3 && flag_split_stack
)
5202 /* Each fixed register usage increases register pressure,
5203 so less registers should be used for argument passing.
5204 This functionality can be overriden by an explicit
5206 for (regno
= AX_REG
; regno
<= DI_REG
; regno
++)
5207 if (fixed_regs
[regno
])
5211 = globals
< local_regparm
? local_regparm
- globals
: 0;
5213 if (local_regparm
> regparm
)
5214 regparm
= local_regparm
;
5221 /* Return 1 or 2, if we can pass up to SSE_REGPARM_MAX SFmode (1) and
5222 DFmode (2) arguments in SSE registers for a function with the
5223 indicated TYPE and DECL. DECL may be NULL when calling function
5224 indirectly or considering a libcall. Otherwise return 0. */
5227 ix86_function_sseregparm (const_tree type
, const_tree decl
, bool warn
)
5229 gcc_assert (!TARGET_64BIT
);
5231 /* Use SSE registers to pass SFmode and DFmode arguments if requested
5232 by the sseregparm attribute. */
5233 if (TARGET_SSEREGPARM
5234 || (type
&& lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type
))))
5241 error ("calling %qD with attribute sseregparm without "
5242 "SSE/SSE2 enabled", decl
);
5244 error ("calling %qT with attribute sseregparm without "
5245 "SSE/SSE2 enabled", type
);
5253 /* For local functions, pass up to SSE_REGPARM_MAX SFmode
5254 (and DFmode for SSE2) arguments in SSE registers. */
5255 if (decl
&& TARGET_SSE_MATH
&& optimize
5256 && !(profile_flag
&& !flag_fentry
))
5258 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
5259 struct cgraph_local_info
*i
= cgraph_local_info (CONST_CAST_TREE(decl
));
5260 if (i
&& i
->local
&& i
->can_change_signature
)
5261 return TARGET_SSE2
? 2 : 1;
5267 /* Return true if EAX is live at the start of the function. Used by
5268 ix86_expand_prologue to determine if we need special help before
5269 calling allocate_stack_worker. */
5272 ix86_eax_live_at_start_p (void)
5274 /* Cheat. Don't bother working forward from ix86_function_regparm
5275 to the function type to whether an actual argument is located in
5276 eax. Instead just look at cfg info, which is still close enough
5277 to correct at this point. This gives false positives for broken
5278 functions that might use uninitialized data that happens to be
5279 allocated in eax, but who cares? */
5280 return REGNO_REG_SET_P (df_get_live_out (ENTRY_BLOCK_PTR
), 0);
5284 ix86_keep_aggregate_return_pointer (tree fntype
)
5290 attr
= lookup_attribute ("callee_pop_aggregate_return",
5291 TYPE_ATTRIBUTES (fntype
));
5293 return (TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr
))) == 0);
5295 /* For 32-bit MS-ABI the default is to keep aggregate
5297 if (ix86_function_type_abi (fntype
) == MS_ABI
)
5300 return KEEP_AGGREGATE_RETURN_POINTER
!= 0;
5303 /* Value is the number of bytes of arguments automatically
5304 popped when returning from a subroutine call.
5305 FUNDECL is the declaration node of the function (as a tree),
5306 FUNTYPE is the data type of the function (as a tree),
5307 or for a library call it is an identifier node for the subroutine name.
5308 SIZE is the number of bytes of arguments passed on the stack.
5310 On the 80386, the RTD insn may be used to pop them if the number
5311 of args is fixed, but if the number is variable then the caller
5312 must pop them all. RTD can't be used for library calls now
5313 because the library is compiled with the Unix compiler.
5314 Use of RTD is a selectable option, since it is incompatible with
5315 standard Unix calling sequences. If the option is not selected,
5316 the caller must always pop the args.
5318 The attribute stdcall is equivalent to RTD on a per module basis. */
5321 ix86_return_pops_args (tree fundecl
, tree funtype
, int size
)
5325 /* None of the 64-bit ABIs pop arguments. */
5329 ccvt
= ix86_get_callcvt (funtype
);
5331 if ((ccvt
& (IX86_CALLCVT_STDCALL
| IX86_CALLCVT_FASTCALL
5332 | IX86_CALLCVT_THISCALL
)) != 0
5333 && ! stdarg_p (funtype
))
5336 /* Lose any fake structure return argument if it is passed on the stack. */
5337 if (aggregate_value_p (TREE_TYPE (funtype
), fundecl
)
5338 && !ix86_keep_aggregate_return_pointer (funtype
))
5340 int nregs
= ix86_function_regparm (funtype
, fundecl
);
5342 return GET_MODE_SIZE (Pmode
);
5348 /* Implement the TARGET_LEGITIMATE_COMBINED_INSN hook. */
5351 ix86_legitimate_combined_insn (rtx insn
)
5353 /* Check operand constraints in case hard registers were propagated
5354 into insn pattern. This check prevents combine pass from
5355 generating insn patterns with invalid hard register operands.
5356 These invalid insns can eventually confuse reload to error out
5357 with a spill failure. See also PRs 46829 and 46843. */
5358 if ((INSN_CODE (insn
) = recog (PATTERN (insn
), insn
, 0)) >= 0)
5362 extract_insn (insn
);
5363 preprocess_constraints ();
5365 for (i
= 0; i
< recog_data
.n_operands
; i
++)
5367 rtx op
= recog_data
.operand
[i
];
5368 enum machine_mode mode
= GET_MODE (op
);
5369 struct operand_alternative
*op_alt
;
5374 /* A unary operator may be accepted by the predicate, but it
5375 is irrelevant for matching constraints. */
5379 if (GET_CODE (op
) == SUBREG
)
5381 if (REG_P (SUBREG_REG (op
))
5382 && REGNO (SUBREG_REG (op
)) < FIRST_PSEUDO_REGISTER
)
5383 offset
= subreg_regno_offset (REGNO (SUBREG_REG (op
)),
5384 GET_MODE (SUBREG_REG (op
)),
5387 op
= SUBREG_REG (op
);
5390 if (!(REG_P (op
) && HARD_REGISTER_P (op
)))
5393 op_alt
= recog_op_alt
[i
];
5395 /* Operand has no constraints, anything is OK. */
5396 win
= !recog_data
.n_alternatives
;
5398 for (j
= 0; j
< recog_data
.n_alternatives
; j
++)
5400 if (op_alt
[j
].anything_ok
5401 || (op_alt
[j
].matches
!= -1
5403 (recog_data
.operand
[i
],
5404 recog_data
.operand
[op_alt
[j
].matches
]))
5405 || reg_fits_class_p (op
, op_alt
[j
].cl
, offset
, mode
))
5420 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
5422 static unsigned HOST_WIDE_INT
5423 ix86_asan_shadow_offset (void)
5425 return (unsigned HOST_WIDE_INT
) 1 << (TARGET_LP64
? 44 : 29);
5428 /* Argument support functions. */
5430 /* Return true when register may be used to pass function parameters. */
5432 ix86_function_arg_regno_p (int regno
)
5435 const int *parm_regs
;
5440 return (regno
< REGPARM_MAX
5441 || (TARGET_SSE
&& SSE_REGNO_P (regno
) && !fixed_regs
[regno
]));
5443 return (regno
< REGPARM_MAX
5444 || (TARGET_MMX
&& MMX_REGNO_P (regno
)
5445 && (regno
< FIRST_MMX_REG
+ MMX_REGPARM_MAX
))
5446 || (TARGET_SSE
&& SSE_REGNO_P (regno
)
5447 && (regno
< FIRST_SSE_REG
+ SSE_REGPARM_MAX
)));
5452 if (SSE_REGNO_P (regno
) && TARGET_SSE
)
5457 if (TARGET_SSE
&& SSE_REGNO_P (regno
)
5458 && (regno
< FIRST_SSE_REG
+ SSE_REGPARM_MAX
))
5462 /* TODO: The function should depend on current function ABI but
5463 builtins.c would need updating then. Therefore we use the
5466 /* RAX is used as hidden argument to va_arg functions. */
5467 if (ix86_abi
== SYSV_ABI
&& regno
== AX_REG
)
5470 if (ix86_abi
== MS_ABI
)
5471 parm_regs
= x86_64_ms_abi_int_parameter_registers
;
5473 parm_regs
= x86_64_int_parameter_registers
;
5474 for (i
= 0; i
< (ix86_abi
== MS_ABI
5475 ? X86_64_MS_REGPARM_MAX
: X86_64_REGPARM_MAX
); i
++)
5476 if (regno
== parm_regs
[i
])
5481 /* Return if we do not know how to pass TYPE solely in registers. */
5484 ix86_must_pass_in_stack (enum machine_mode mode
, const_tree type
)
5486 if (must_pass_in_stack_var_size_or_pad (mode
, type
))
5489 /* For 32-bit, we want TImode aggregates to go on the stack. But watch out!
5490 The layout_type routine is crafty and tries to trick us into passing
5491 currently unsupported vector types on the stack by using TImode. */
5492 return (!TARGET_64BIT
&& mode
== TImode
5493 && type
&& TREE_CODE (type
) != VECTOR_TYPE
);
5496 /* It returns the size, in bytes, of the area reserved for arguments passed
5497 in registers for the function represented by fndecl dependent to the used
5500 ix86_reg_parm_stack_space (const_tree fndecl
)
5502 enum calling_abi call_abi
= SYSV_ABI
;
5503 if (fndecl
!= NULL_TREE
&& TREE_CODE (fndecl
) == FUNCTION_DECL
)
5504 call_abi
= ix86_function_abi (fndecl
);
5506 call_abi
= ix86_function_type_abi (fndecl
);
5507 if (TARGET_64BIT
&& call_abi
== MS_ABI
)
5512 /* Returns value SYSV_ABI, MS_ABI dependent on fntype, specifying the
5515 ix86_function_type_abi (const_tree fntype
)
5517 if (fntype
!= NULL_TREE
&& TYPE_ATTRIBUTES (fntype
) != NULL_TREE
)
5519 enum calling_abi abi
= ix86_abi
;
5520 if (abi
== SYSV_ABI
)
5522 if (lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (fntype
)))
5525 else if (lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (fntype
)))
5533 ix86_function_ms_hook_prologue (const_tree fn
)
5535 if (fn
&& lookup_attribute ("ms_hook_prologue", DECL_ATTRIBUTES (fn
)))
5537 if (decl_function_context (fn
) != NULL_TREE
)
5538 error_at (DECL_SOURCE_LOCATION (fn
),
5539 "ms_hook_prologue is not compatible with nested function");
5546 static enum calling_abi
5547 ix86_function_abi (const_tree fndecl
)
5551 return ix86_function_type_abi (TREE_TYPE (fndecl
));
5554 /* Returns value SYSV_ABI, MS_ABI dependent on cfun, specifying the
5557 ix86_cfun_abi (void)
5561 return cfun
->machine
->call_abi
;
5564 /* Write the extra assembler code needed to declare a function properly. */
5567 ix86_asm_output_function_label (FILE *asm_out_file
, const char *fname
,
5570 bool is_ms_hook
= ix86_function_ms_hook_prologue (decl
);
5574 int i
, filler_count
= (TARGET_64BIT
? 32 : 16);
5575 unsigned int filler_cc
= 0xcccccccc;
5577 for (i
= 0; i
< filler_count
; i
+= 4)
5578 fprintf (asm_out_file
, ASM_LONG
" %#x\n", filler_cc
);
5581 #ifdef SUBTARGET_ASM_UNWIND_INIT
5582 SUBTARGET_ASM_UNWIND_INIT (asm_out_file
);
5585 ASM_OUTPUT_LABEL (asm_out_file
, fname
);
5587 /* Output magic byte marker, if hot-patch attribute is set. */
5592 /* leaq [%rsp + 0], %rsp */
5593 asm_fprintf (asm_out_file
, ASM_BYTE
5594 "0x48, 0x8d, 0xa4, 0x24, 0x00, 0x00, 0x00, 0x00\n");
5598 /* movl.s %edi, %edi
5600 movl.s %esp, %ebp */
5601 asm_fprintf (asm_out_file
, ASM_BYTE
5602 "0x8b, 0xff, 0x55, 0x8b, 0xec\n");
5608 extern void init_regs (void);
5610 /* Implementation of call abi switching target hook. Specific to FNDECL
5611 the specific call register sets are set. See also
5612 ix86_conditional_register_usage for more details. */
5614 ix86_call_abi_override (const_tree fndecl
)
5616 if (fndecl
== NULL_TREE
)
5617 cfun
->machine
->call_abi
= ix86_abi
;
5619 cfun
->machine
->call_abi
= ix86_function_type_abi (TREE_TYPE (fndecl
));
5622 /* 64-bit MS and SYSV ABI have different set of call used registers. Avoid
5623 expensive re-initialization of init_regs each time we switch function context
5624 since this is needed only during RTL expansion. */
5626 ix86_maybe_switch_abi (void)
5629 call_used_regs
[SI_REG
] == (cfun
->machine
->call_abi
== MS_ABI
))
5633 /* Initialize a variable CUM of type CUMULATIVE_ARGS
5634 for a call to a function whose data type is FNTYPE.
5635 For a library call, FNTYPE is 0. */
5638 init_cumulative_args (CUMULATIVE_ARGS
*cum
, /* Argument info to initialize */
5639 tree fntype
, /* tree ptr for function decl */
5640 rtx libname
, /* SYMBOL_REF of library name or 0 */
5644 struct cgraph_local_info
*i
;
5646 memset (cum
, 0, sizeof (*cum
));
5650 i
= cgraph_local_info (fndecl
);
5651 cum
->call_abi
= ix86_function_abi (fndecl
);
5656 cum
->call_abi
= ix86_function_type_abi (fntype
);
5659 cum
->caller
= caller
;
5661 /* Set up the number of registers to use for passing arguments. */
5663 if (TARGET_64BIT
&& cum
->call_abi
== MS_ABI
&& !ACCUMULATE_OUTGOING_ARGS
)
5664 sorry ("ms_abi attribute requires -maccumulate-outgoing-args "
5665 "or subtarget optimization implying it");
5666 cum
->nregs
= ix86_regparm
;
5669 cum
->nregs
= (cum
->call_abi
== SYSV_ABI
5670 ? X86_64_REGPARM_MAX
5671 : X86_64_MS_REGPARM_MAX
);
5675 cum
->sse_nregs
= SSE_REGPARM_MAX
;
5678 cum
->sse_nregs
= (cum
->call_abi
== SYSV_ABI
5679 ? X86_64_SSE_REGPARM_MAX
5680 : X86_64_MS_SSE_REGPARM_MAX
);
5684 cum
->mmx_nregs
= MMX_REGPARM_MAX
;
5685 cum
->warn_avx
= true;
5686 cum
->warn_sse
= true;
5687 cum
->warn_mmx
= true;
5689 /* Because type might mismatch in between caller and callee, we need to
5690 use actual type of function for local calls.
5691 FIXME: cgraph_analyze can be told to actually record if function uses
5692 va_start so for local functions maybe_vaarg can be made aggressive
5694 FIXME: once typesytem is fixed, we won't need this code anymore. */
5695 if (i
&& i
->local
&& i
->can_change_signature
)
5696 fntype
= TREE_TYPE (fndecl
);
5697 cum
->maybe_vaarg
= (fntype
5698 ? (!prototype_p (fntype
) || stdarg_p (fntype
))
5703 /* If there are variable arguments, then we won't pass anything
5704 in registers in 32-bit mode. */
5705 if (stdarg_p (fntype
))
5716 /* Use ecx and edx registers if function has fastcall attribute,
5717 else look for regparm information. */
5720 unsigned int ccvt
= ix86_get_callcvt (fntype
);
5721 if ((ccvt
& IX86_CALLCVT_THISCALL
) != 0)
5724 cum
->fastcall
= 1; /* Same first register as in fastcall. */
5726 else if ((ccvt
& IX86_CALLCVT_FASTCALL
) != 0)
5732 cum
->nregs
= ix86_function_regparm (fntype
, fndecl
);
5735 /* Set up the number of SSE registers used for passing SFmode
5736 and DFmode arguments. Warn for mismatching ABI. */
5737 cum
->float_in_sse
= ix86_function_sseregparm (fntype
, fndecl
, true);
5741 /* Return the "natural" mode for TYPE. In most cases, this is just TYPE_MODE.
5742 But in the case of vector types, it is some vector mode.
5744 When we have only some of our vector isa extensions enabled, then there
5745 are some modes for which vector_mode_supported_p is false. For these
5746 modes, the generic vector support in gcc will choose some non-vector mode
5747 in order to implement the type. By computing the natural mode, we'll
5748 select the proper ABI location for the operand and not depend on whatever
5749 the middle-end decides to do with these vector types.
5751 The midde-end can't deal with the vector types > 16 bytes. In this
5752 case, we return the original mode and warn ABI change if CUM isn't
5755 static enum machine_mode
5756 type_natural_mode (const_tree type
, const CUMULATIVE_ARGS
*cum
)
5758 enum machine_mode mode
= TYPE_MODE (type
);
5760 if (TREE_CODE (type
) == VECTOR_TYPE
&& !VECTOR_MODE_P (mode
))
5762 HOST_WIDE_INT size
= int_size_in_bytes (type
);
5763 if ((size
== 8 || size
== 16 || size
== 32)
5764 /* ??? Generic code allows us to create width 1 vectors. Ignore. */
5765 && TYPE_VECTOR_SUBPARTS (type
) > 1)
5767 enum machine_mode innermode
= TYPE_MODE (TREE_TYPE (type
));
5769 if (TREE_CODE (TREE_TYPE (type
)) == REAL_TYPE
)
5770 mode
= MIN_MODE_VECTOR_FLOAT
;
5772 mode
= MIN_MODE_VECTOR_INT
;
5774 /* Get the mode which has this inner mode and number of units. */
5775 for (; mode
!= VOIDmode
; mode
= GET_MODE_WIDER_MODE (mode
))
5776 if (GET_MODE_NUNITS (mode
) == TYPE_VECTOR_SUBPARTS (type
)
5777 && GET_MODE_INNER (mode
) == innermode
)
5779 if (size
== 32 && !TARGET_AVX
)
5781 static bool warnedavx
;
5788 warning (0, "AVX vector argument without AVX "
5789 "enabled changes the ABI");
5791 return TYPE_MODE (type
);
5793 else if ((size
== 8 || size
== 16) && !TARGET_SSE
)
5795 static bool warnedsse
;
5802 warning (0, "SSE vector argument without SSE "
5803 "enabled changes the ABI");
5818 /* We want to pass a value in REGNO whose "natural" mode is MODE. However,
5819 this may not agree with the mode that the type system has chosen for the
5820 register, which is ORIG_MODE. If ORIG_MODE is not BLKmode, then we can
5821 go ahead and use it. Otherwise we have to build a PARALLEL instead. */
5824 gen_reg_or_parallel (enum machine_mode mode
, enum machine_mode orig_mode
,
5829 if (orig_mode
!= BLKmode
)
5830 tmp
= gen_rtx_REG (orig_mode
, regno
);
5833 tmp
= gen_rtx_REG (mode
, regno
);
5834 tmp
= gen_rtx_EXPR_LIST (VOIDmode
, tmp
, const0_rtx
);
5835 tmp
= gen_rtx_PARALLEL (orig_mode
, gen_rtvec (1, tmp
));
5841 /* x86-64 register passing implementation. See x86-64 ABI for details. Goal
5842 of this code is to classify each 8bytes of incoming argument by the register
5843 class and assign registers accordingly. */
5845 /* Return the union class of CLASS1 and CLASS2.
5846 See the x86-64 PS ABI for details. */
5848 static enum x86_64_reg_class
5849 merge_classes (enum x86_64_reg_class class1
, enum x86_64_reg_class class2
)
5851 /* Rule #1: If both classes are equal, this is the resulting class. */
5852 if (class1
== class2
)
5855 /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
5857 if (class1
== X86_64_NO_CLASS
)
5859 if (class2
== X86_64_NO_CLASS
)
5862 /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
5863 if (class1
== X86_64_MEMORY_CLASS
|| class2
== X86_64_MEMORY_CLASS
)
5864 return X86_64_MEMORY_CLASS
;
5866 /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
5867 if ((class1
== X86_64_INTEGERSI_CLASS
&& class2
== X86_64_SSESF_CLASS
)
5868 || (class2
== X86_64_INTEGERSI_CLASS
&& class1
== X86_64_SSESF_CLASS
))
5869 return X86_64_INTEGERSI_CLASS
;
5870 if (class1
== X86_64_INTEGER_CLASS
|| class1
== X86_64_INTEGERSI_CLASS
5871 || class2
== X86_64_INTEGER_CLASS
|| class2
== X86_64_INTEGERSI_CLASS
)
5872 return X86_64_INTEGER_CLASS
;
5874 /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class,
5876 if (class1
== X86_64_X87_CLASS
5877 || class1
== X86_64_X87UP_CLASS
5878 || class1
== X86_64_COMPLEX_X87_CLASS
5879 || class2
== X86_64_X87_CLASS
5880 || class2
== X86_64_X87UP_CLASS
5881 || class2
== X86_64_COMPLEX_X87_CLASS
)
5882 return X86_64_MEMORY_CLASS
;
5884 /* Rule #6: Otherwise class SSE is used. */
5885 return X86_64_SSE_CLASS
;
5888 /* Classify the argument of type TYPE and mode MODE.
5889 CLASSES will be filled by the register class used to pass each word
5890 of the operand. The number of words is returned. In case the parameter
5891 should be passed in memory, 0 is returned. As a special case for zero
5892 sized containers, classes[0] will be NO_CLASS and 1 is returned.
5894 BIT_OFFSET is used internally for handling records and specifies offset
5895 of the offset in bits modulo 256 to avoid overflow cases.
5897 See the x86-64 PS ABI for details.
5901 classify_argument (enum machine_mode mode
, const_tree type
,
5902 enum x86_64_reg_class classes
[MAX_CLASSES
], int bit_offset
)
5904 HOST_WIDE_INT bytes
=
5905 (mode
== BLKmode
) ? int_size_in_bytes (type
) : (int) GET_MODE_SIZE (mode
);
5907 = (bytes
+ (bit_offset
% 64) / 8 + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
5909 /* Variable sized entities are always passed/returned in memory. */
5913 if (mode
!= VOIDmode
5914 && targetm
.calls
.must_pass_in_stack (mode
, type
))
5917 if (type
&& AGGREGATE_TYPE_P (type
))
5921 enum x86_64_reg_class subclasses
[MAX_CLASSES
];
5923 /* On x86-64 we pass structures larger than 32 bytes on the stack. */
5927 for (i
= 0; i
< words
; i
++)
5928 classes
[i
] = X86_64_NO_CLASS
;
5930 /* Zero sized arrays or structures are NO_CLASS. We return 0 to
5931 signalize memory class, so handle it as special case. */
5934 classes
[0] = X86_64_NO_CLASS
;
5938 /* Classify each field of record and merge classes. */
5939 switch (TREE_CODE (type
))
5942 /* And now merge the fields of structure. */
5943 for (field
= TYPE_FIELDS (type
); field
; field
= DECL_CHAIN (field
))
5945 if (TREE_CODE (field
) == FIELD_DECL
)
5949 if (TREE_TYPE (field
) == error_mark_node
)
5952 /* Bitfields are always classified as integer. Handle them
5953 early, since later code would consider them to be
5954 misaligned integers. */
5955 if (DECL_BIT_FIELD (field
))
5957 for (i
= (int_bit_position (field
)
5958 + (bit_offset
% 64)) / 8 / 8;
5959 i
< ((int_bit_position (field
) + (bit_offset
% 64))
5960 + tree_low_cst (DECL_SIZE (field
), 0)
5963 merge_classes (X86_64_INTEGER_CLASS
,
5970 type
= TREE_TYPE (field
);
5972 /* Flexible array member is ignored. */
5973 if (TYPE_MODE (type
) == BLKmode
5974 && TREE_CODE (type
) == ARRAY_TYPE
5975 && TYPE_SIZE (type
) == NULL_TREE
5976 && TYPE_DOMAIN (type
) != NULL_TREE
5977 && (TYPE_MAX_VALUE (TYPE_DOMAIN (type
))
5982 if (!warned
&& warn_psabi
)
5985 inform (input_location
,
5986 "the ABI of passing struct with"
5987 " a flexible array member has"
5988 " changed in GCC 4.4");
5992 num
= classify_argument (TYPE_MODE (type
), type
,
5994 (int_bit_position (field
)
5995 + bit_offset
) % 256);
5998 pos
= (int_bit_position (field
)
5999 + (bit_offset
% 64)) / 8 / 8;
6000 for (i
= 0; i
< num
&& (i
+ pos
) < words
; i
++)
6002 merge_classes (subclasses
[i
], classes
[i
+ pos
]);
6009 /* Arrays are handled as small records. */
6012 num
= classify_argument (TYPE_MODE (TREE_TYPE (type
)),
6013 TREE_TYPE (type
), subclasses
, bit_offset
);
6017 /* The partial classes are now full classes. */
6018 if (subclasses
[0] == X86_64_SSESF_CLASS
&& bytes
!= 4)
6019 subclasses
[0] = X86_64_SSE_CLASS
;
6020 if (subclasses
[0] == X86_64_INTEGERSI_CLASS
6021 && !((bit_offset
% 64) == 0 && bytes
== 4))
6022 subclasses
[0] = X86_64_INTEGER_CLASS
;
6024 for (i
= 0; i
< words
; i
++)
6025 classes
[i
] = subclasses
[i
% num
];
6030 case QUAL_UNION_TYPE
:
6031 /* Unions are similar to RECORD_TYPE but offset is always 0.
6033 for (field
= TYPE_FIELDS (type
); field
; field
= DECL_CHAIN (field
))
6035 if (TREE_CODE (field
) == FIELD_DECL
)
6039 if (TREE_TYPE (field
) == error_mark_node
)
6042 num
= classify_argument (TYPE_MODE (TREE_TYPE (field
)),
6043 TREE_TYPE (field
), subclasses
,
6047 for (i
= 0; i
< num
; i
++)
6048 classes
[i
] = merge_classes (subclasses
[i
], classes
[i
]);
6059 /* When size > 16 bytes, if the first one isn't
6060 X86_64_SSE_CLASS or any other ones aren't
6061 X86_64_SSEUP_CLASS, everything should be passed in
6063 if (classes
[0] != X86_64_SSE_CLASS
)
6066 for (i
= 1; i
< words
; i
++)
6067 if (classes
[i
] != X86_64_SSEUP_CLASS
)
6071 /* Final merger cleanup. */
6072 for (i
= 0; i
< words
; i
++)
6074 /* If one class is MEMORY, everything should be passed in
6076 if (classes
[i
] == X86_64_MEMORY_CLASS
)
6079 /* The X86_64_SSEUP_CLASS should be always preceded by
6080 X86_64_SSE_CLASS or X86_64_SSEUP_CLASS. */
6081 if (classes
[i
] == X86_64_SSEUP_CLASS
6082 && classes
[i
- 1] != X86_64_SSE_CLASS
6083 && classes
[i
- 1] != X86_64_SSEUP_CLASS
)
6085 /* The first one should never be X86_64_SSEUP_CLASS. */
6086 gcc_assert (i
!= 0);
6087 classes
[i
] = X86_64_SSE_CLASS
;
6090 /* If X86_64_X87UP_CLASS isn't preceded by X86_64_X87_CLASS,
6091 everything should be passed in memory. */
6092 if (classes
[i
] == X86_64_X87UP_CLASS
6093 && (classes
[i
- 1] != X86_64_X87_CLASS
))
6097 /* The first one should never be X86_64_X87UP_CLASS. */
6098 gcc_assert (i
!= 0);
6099 if (!warned
&& warn_psabi
)
6102 inform (input_location
,
6103 "the ABI of passing union with long double"
6104 " has changed in GCC 4.4");
6112 /* Compute alignment needed. We align all types to natural boundaries with
6113 exception of XFmode that is aligned to 64bits. */
6114 if (mode
!= VOIDmode
&& mode
!= BLKmode
)
6116 int mode_alignment
= GET_MODE_BITSIZE (mode
);
6119 mode_alignment
= 128;
6120 else if (mode
== XCmode
)
6121 mode_alignment
= 256;
6122 if (COMPLEX_MODE_P (mode
))
6123 mode_alignment
/= 2;
6124 /* Misaligned fields are always returned in memory. */
6125 if (bit_offset
% mode_alignment
)
6129 /* for V1xx modes, just use the base mode */
6130 if (VECTOR_MODE_P (mode
) && mode
!= V1DImode
&& mode
!= V1TImode
6131 && GET_MODE_SIZE (GET_MODE_INNER (mode
)) == bytes
)
6132 mode
= GET_MODE_INNER (mode
);
6134 /* Classification of atomic types. */
6139 classes
[0] = X86_64_SSE_CLASS
;
6142 classes
[0] = X86_64_SSE_CLASS
;
6143 classes
[1] = X86_64_SSEUP_CLASS
;
6153 int size
= (bit_offset
% 64)+ (int) GET_MODE_BITSIZE (mode
);
6157 classes
[0] = X86_64_INTEGERSI_CLASS
;
6160 else if (size
<= 64)
6162 classes
[0] = X86_64_INTEGER_CLASS
;
6165 else if (size
<= 64+32)
6167 classes
[0] = X86_64_INTEGER_CLASS
;
6168 classes
[1] = X86_64_INTEGERSI_CLASS
;
6171 else if (size
<= 64+64)
6173 classes
[0] = classes
[1] = X86_64_INTEGER_CLASS
;
6181 classes
[0] = classes
[1] = X86_64_INTEGER_CLASS
;
6185 /* OImode shouldn't be used directly. */
6190 if (!(bit_offset
% 64))
6191 classes
[0] = X86_64_SSESF_CLASS
;
6193 classes
[0] = X86_64_SSE_CLASS
;
6196 classes
[0] = X86_64_SSEDF_CLASS
;
6199 classes
[0] = X86_64_X87_CLASS
;
6200 classes
[1] = X86_64_X87UP_CLASS
;
6203 classes
[0] = X86_64_SSE_CLASS
;
6204 classes
[1] = X86_64_SSEUP_CLASS
;
6207 classes
[0] = X86_64_SSE_CLASS
;
6208 if (!(bit_offset
% 64))
6214 if (!warned
&& warn_psabi
)
6217 inform (input_location
,
6218 "the ABI of passing structure with complex float"
6219 " member has changed in GCC 4.4");
6221 classes
[1] = X86_64_SSESF_CLASS
;
6225 classes
[0] = X86_64_SSEDF_CLASS
;
6226 classes
[1] = X86_64_SSEDF_CLASS
;
6229 classes
[0] = X86_64_COMPLEX_X87_CLASS
;
6232 /* This modes is larger than 16 bytes. */
6240 classes
[0] = X86_64_SSE_CLASS
;
6241 classes
[1] = X86_64_SSEUP_CLASS
;
6242 classes
[2] = X86_64_SSEUP_CLASS
;
6243 classes
[3] = X86_64_SSEUP_CLASS
;
6251 classes
[0] = X86_64_SSE_CLASS
;
6252 classes
[1] = X86_64_SSEUP_CLASS
;
6260 classes
[0] = X86_64_SSE_CLASS
;
6266 gcc_assert (VECTOR_MODE_P (mode
));
6271 gcc_assert (GET_MODE_CLASS (GET_MODE_INNER (mode
)) == MODE_INT
);
6273 if (bit_offset
+ GET_MODE_BITSIZE (mode
) <= 32)
6274 classes
[0] = X86_64_INTEGERSI_CLASS
;
6276 classes
[0] = X86_64_INTEGER_CLASS
;
6277 classes
[1] = X86_64_INTEGER_CLASS
;
6278 return 1 + (bytes
> 8);
6282 /* Examine the argument and return set number of register required in each
6283 class. Return 0 iff parameter should be passed in memory. */
6285 examine_argument (enum machine_mode mode
, const_tree type
, int in_return
,
6286 int *int_nregs
, int *sse_nregs
)
6288 enum x86_64_reg_class regclass
[MAX_CLASSES
];
6289 int n
= classify_argument (mode
, type
, regclass
, 0);
6295 for (n
--; n
>= 0; n
--)
6296 switch (regclass
[n
])
6298 case X86_64_INTEGER_CLASS
:
6299 case X86_64_INTEGERSI_CLASS
:
6302 case X86_64_SSE_CLASS
:
6303 case X86_64_SSESF_CLASS
:
6304 case X86_64_SSEDF_CLASS
:
6307 case X86_64_NO_CLASS
:
6308 case X86_64_SSEUP_CLASS
:
6310 case X86_64_X87_CLASS
:
6311 case X86_64_X87UP_CLASS
:
6315 case X86_64_COMPLEX_X87_CLASS
:
6316 return in_return
? 2 : 0;
6317 case X86_64_MEMORY_CLASS
:
6323 /* Construct container for the argument used by GCC interface. See
6324 FUNCTION_ARG for the detailed description. */
6327 construct_container (enum machine_mode mode
, enum machine_mode orig_mode
,
6328 const_tree type
, int in_return
, int nintregs
, int nsseregs
,
6329 const int *intreg
, int sse_regno
)
6331 /* The following variables hold the static issued_error state. */
6332 static bool issued_sse_arg_error
;
6333 static bool issued_sse_ret_error
;
6334 static bool issued_x87_ret_error
;
6336 enum machine_mode tmpmode
;
6338 (mode
== BLKmode
) ? int_size_in_bytes (type
) : (int) GET_MODE_SIZE (mode
);
6339 enum x86_64_reg_class regclass
[MAX_CLASSES
];
6343 int needed_sseregs
, needed_intregs
;
6344 rtx exp
[MAX_CLASSES
];
6347 n
= classify_argument (mode
, type
, regclass
, 0);
6350 if (!examine_argument (mode
, type
, in_return
, &needed_intregs
,
6353 if (needed_intregs
> nintregs
|| needed_sseregs
> nsseregs
)
6356 /* We allowed the user to turn off SSE for kernel mode. Don't crash if
6357 some less clueful developer tries to use floating-point anyway. */
6358 if (needed_sseregs
&& !TARGET_SSE
)
6362 if (!issued_sse_ret_error
)
6364 error ("SSE register return with SSE disabled");
6365 issued_sse_ret_error
= true;
6368 else if (!issued_sse_arg_error
)
6370 error ("SSE register argument with SSE disabled");
6371 issued_sse_arg_error
= true;
6376 /* Likewise, error if the ABI requires us to return values in the
6377 x87 registers and the user specified -mno-80387. */
6378 if (!TARGET_80387
&& in_return
)
6379 for (i
= 0; i
< n
; i
++)
6380 if (regclass
[i
] == X86_64_X87_CLASS
6381 || regclass
[i
] == X86_64_X87UP_CLASS
6382 || regclass
[i
] == X86_64_COMPLEX_X87_CLASS
)
6384 if (!issued_x87_ret_error
)
6386 error ("x87 register return with x87 disabled");
6387 issued_x87_ret_error
= true;
6392 /* First construct simple cases. Avoid SCmode, since we want to use
6393 single register to pass this type. */
6394 if (n
== 1 && mode
!= SCmode
)
6395 switch (regclass
[0])
6397 case X86_64_INTEGER_CLASS
:
6398 case X86_64_INTEGERSI_CLASS
:
6399 return gen_rtx_REG (mode
, intreg
[0]);
6400 case X86_64_SSE_CLASS
:
6401 case X86_64_SSESF_CLASS
:
6402 case X86_64_SSEDF_CLASS
:
6403 if (mode
!= BLKmode
)
6404 return gen_reg_or_parallel (mode
, orig_mode
,
6405 SSE_REGNO (sse_regno
));
6407 case X86_64_X87_CLASS
:
6408 case X86_64_COMPLEX_X87_CLASS
:
6409 return gen_rtx_REG (mode
, FIRST_STACK_REG
);
6410 case X86_64_NO_CLASS
:
6411 /* Zero sized array, struct or class. */
6417 && regclass
[0] == X86_64_SSE_CLASS
6418 && regclass
[1] == X86_64_SSEUP_CLASS
6420 return gen_reg_or_parallel (mode
, orig_mode
,
6421 SSE_REGNO (sse_regno
));
6423 && regclass
[0] == X86_64_SSE_CLASS
6424 && regclass
[1] == X86_64_SSEUP_CLASS
6425 && regclass
[2] == X86_64_SSEUP_CLASS
6426 && regclass
[3] == X86_64_SSEUP_CLASS
6428 return gen_reg_or_parallel (mode
, orig_mode
,
6429 SSE_REGNO (sse_regno
));
6431 && regclass
[0] == X86_64_X87_CLASS
6432 && regclass
[1] == X86_64_X87UP_CLASS
)
6433 return gen_rtx_REG (XFmode
, FIRST_STACK_REG
);
6436 && regclass
[0] == X86_64_INTEGER_CLASS
6437 && regclass
[1] == X86_64_INTEGER_CLASS
6438 && (mode
== CDImode
|| mode
== TImode
|| mode
== TFmode
)
6439 && intreg
[0] + 1 == intreg
[1])
6440 return gen_rtx_REG (mode
, intreg
[0]);
6442 /* Otherwise figure out the entries of the PARALLEL. */
6443 for (i
= 0; i
< n
; i
++)
6447 switch (regclass
[i
])
6449 case X86_64_NO_CLASS
:
6451 case X86_64_INTEGER_CLASS
:
6452 case X86_64_INTEGERSI_CLASS
:
6453 /* Merge TImodes on aligned occasions here too. */
6454 if (i
* 8 + 8 > bytes
)
6456 = mode_for_size ((bytes
- i
* 8) * BITS_PER_UNIT
, MODE_INT
, 0);
6457 else if (regclass
[i
] == X86_64_INTEGERSI_CLASS
)
6461 /* We've requested 24 bytes we
6462 don't have mode for. Use DImode. */
6463 if (tmpmode
== BLKmode
)
6466 = gen_rtx_EXPR_LIST (VOIDmode
,
6467 gen_rtx_REG (tmpmode
, *intreg
),
6471 case X86_64_SSESF_CLASS
:
6473 = gen_rtx_EXPR_LIST (VOIDmode
,
6474 gen_rtx_REG (SFmode
,
6475 SSE_REGNO (sse_regno
)),
6479 case X86_64_SSEDF_CLASS
:
6481 = gen_rtx_EXPR_LIST (VOIDmode
,
6482 gen_rtx_REG (DFmode
,
6483 SSE_REGNO (sse_regno
)),
6487 case X86_64_SSE_CLASS
:
6495 if (i
== 0 && regclass
[1] == X86_64_SSEUP_CLASS
)
6505 && regclass
[1] == X86_64_SSEUP_CLASS
6506 && regclass
[2] == X86_64_SSEUP_CLASS
6507 && regclass
[3] == X86_64_SSEUP_CLASS
);
6515 = gen_rtx_EXPR_LIST (VOIDmode
,
6516 gen_rtx_REG (tmpmode
,
6517 SSE_REGNO (sse_regno
)),
6526 /* Empty aligned struct, union or class. */
6530 ret
= gen_rtx_PARALLEL (mode
, rtvec_alloc (nexps
));
6531 for (i
= 0; i
< nexps
; i
++)
6532 XVECEXP (ret
, 0, i
) = exp
[i
];
6536 /* Update the data in CUM to advance over an argument of mode MODE
6537 and data type TYPE. (TYPE is null for libcalls where that information
6538 may not be available.) */
6541 function_arg_advance_32 (CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
6542 const_tree type
, HOST_WIDE_INT bytes
,
6543 HOST_WIDE_INT words
)
6559 cum
->words
+= words
;
6560 cum
->nregs
-= words
;
6561 cum
->regno
+= words
;
6563 if (cum
->nregs
<= 0)
6571 /* OImode shouldn't be used directly. */
6575 if (cum
->float_in_sse
< 2)
6578 if (cum
->float_in_sse
< 1)
6595 if (!type
|| !AGGREGATE_TYPE_P (type
))
6597 cum
->sse_words
+= words
;
6598 cum
->sse_nregs
-= 1;
6599 cum
->sse_regno
+= 1;
6600 if (cum
->sse_nregs
<= 0)
6614 if (!type
|| !AGGREGATE_TYPE_P (type
))
6616 cum
->mmx_words
+= words
;
6617 cum
->mmx_nregs
-= 1;
6618 cum
->mmx_regno
+= 1;
6619 if (cum
->mmx_nregs
<= 0)
6630 function_arg_advance_64 (CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
6631 const_tree type
, HOST_WIDE_INT words
, bool named
)
6633 int int_nregs
, sse_nregs
;
6635 /* Unnamed 256bit vector mode parameters are passed on stack. */
6636 if (!named
&& VALID_AVX256_REG_MODE (mode
))
6639 if (examine_argument (mode
, type
, 0, &int_nregs
, &sse_nregs
)
6640 && sse_nregs
<= cum
->sse_nregs
&& int_nregs
<= cum
->nregs
)
6642 cum
->nregs
-= int_nregs
;
6643 cum
->sse_nregs
-= sse_nregs
;
6644 cum
->regno
+= int_nregs
;
6645 cum
->sse_regno
+= sse_nregs
;
6649 int align
= ix86_function_arg_boundary (mode
, type
) / BITS_PER_WORD
;
6650 cum
->words
= (cum
->words
+ align
- 1) & ~(align
- 1);
6651 cum
->words
+= words
;
6656 function_arg_advance_ms_64 (CUMULATIVE_ARGS
*cum
, HOST_WIDE_INT bytes
,
6657 HOST_WIDE_INT words
)
6659 /* Otherwise, this should be passed indirect. */
6660 gcc_assert (bytes
== 1 || bytes
== 2 || bytes
== 4 || bytes
== 8);
6662 cum
->words
+= words
;
6670 /* Update the data in CUM to advance over an argument of mode MODE and
6671 data type TYPE. (TYPE is null for libcalls where that information
6672 may not be available.) */
6675 ix86_function_arg_advance (cumulative_args_t cum_v
, enum machine_mode mode
,
6676 const_tree type
, bool named
)
6678 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
6679 HOST_WIDE_INT bytes
, words
;
6681 if (mode
== BLKmode
)
6682 bytes
= int_size_in_bytes (type
);
6684 bytes
= GET_MODE_SIZE (mode
);
6685 words
= (bytes
+ UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
6688 mode
= type_natural_mode (type
, NULL
);
6690 if (TARGET_64BIT
&& (cum
? cum
->call_abi
: ix86_abi
) == MS_ABI
)
6691 function_arg_advance_ms_64 (cum
, bytes
, words
);
6692 else if (TARGET_64BIT
)
6693 function_arg_advance_64 (cum
, mode
, type
, words
, named
);
6695 function_arg_advance_32 (cum
, mode
, type
, bytes
, words
);
6698 /* Define where to put the arguments to a function.
6699 Value is zero to push the argument on the stack,
6700 or a hard register in which to store the argument.
6702 MODE is the argument's machine mode.
6703 TYPE is the data type of the argument (as a tree).
6704 This is null for libcalls where that information may
6706 CUM is a variable of type CUMULATIVE_ARGS which gives info about
6707 the preceding args and about the function being called.
6708 NAMED is nonzero if this argument is a named parameter
6709 (otherwise it is an extra parameter matching an ellipsis). */
6712 function_arg_32 (const CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
6713 enum machine_mode orig_mode
, const_tree type
,
6714 HOST_WIDE_INT bytes
, HOST_WIDE_INT words
)
6716 static bool warnedsse
, warnedmmx
;
6718 /* Avoid the AL settings for the Unix64 ABI. */
6719 if (mode
== VOIDmode
)
6735 if (words
<= cum
->nregs
)
6737 int regno
= cum
->regno
;
6739 /* Fastcall allocates the first two DWORD (SImode) or
6740 smaller arguments to ECX and EDX if it isn't an
6746 || (type
&& AGGREGATE_TYPE_P (type
)))
6749 /* ECX not EAX is the first allocated register. */
6750 if (regno
== AX_REG
)
6753 return gen_rtx_REG (mode
, regno
);
6758 if (cum
->float_in_sse
< 2)
6761 if (cum
->float_in_sse
< 1)
6765 /* In 32bit, we pass TImode in xmm registers. */
6772 if (!type
|| !AGGREGATE_TYPE_P (type
))
6774 if (!TARGET_SSE
&& !warnedsse
&& cum
->warn_sse
)
6777 warning (0, "SSE vector argument without SSE enabled "
6781 return gen_reg_or_parallel (mode
, orig_mode
,
6782 cum
->sse_regno
+ FIRST_SSE_REG
);
6787 /* OImode shouldn't be used directly. */
6796 if (!type
|| !AGGREGATE_TYPE_P (type
))
6799 return gen_reg_or_parallel (mode
, orig_mode
,
6800 cum
->sse_regno
+ FIRST_SSE_REG
);
6810 if (!type
|| !AGGREGATE_TYPE_P (type
))
6812 if (!TARGET_MMX
&& !warnedmmx
&& cum
->warn_mmx
)
6815 warning (0, "MMX vector argument without MMX enabled "
6819 return gen_reg_or_parallel (mode
, orig_mode
,
6820 cum
->mmx_regno
+ FIRST_MMX_REG
);
6829 function_arg_64 (const CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
6830 enum machine_mode orig_mode
, const_tree type
, bool named
)
6832 /* Handle a hidden AL argument containing number of registers
6833 for varargs x86-64 functions. */
6834 if (mode
== VOIDmode
)
6835 return GEN_INT (cum
->maybe_vaarg
6836 ? (cum
->sse_nregs
< 0
6837 ? X86_64_SSE_REGPARM_MAX
6852 /* Unnamed 256bit vector mode parameters are passed on stack. */
6858 return construct_container (mode
, orig_mode
, type
, 0, cum
->nregs
,
6860 &x86_64_int_parameter_registers
[cum
->regno
],
6865 function_arg_ms_64 (const CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
6866 enum machine_mode orig_mode
, bool named
,
6867 HOST_WIDE_INT bytes
)
6871 /* We need to add clobber for MS_ABI->SYSV ABI calls in expand_call.
6872 We use value of -2 to specify that current function call is MSABI. */
6873 if (mode
== VOIDmode
)
6874 return GEN_INT (-2);
6876 /* If we've run out of registers, it goes on the stack. */
6877 if (cum
->nregs
== 0)
6880 regno
= x86_64_ms_abi_int_parameter_registers
[cum
->regno
];
6882 /* Only floating point modes are passed in anything but integer regs. */
6883 if (TARGET_SSE
&& (mode
== SFmode
|| mode
== DFmode
))
6886 regno
= cum
->regno
+ FIRST_SSE_REG
;
6891 /* Unnamed floating parameters are passed in both the
6892 SSE and integer registers. */
6893 t1
= gen_rtx_REG (mode
, cum
->regno
+ FIRST_SSE_REG
);
6894 t2
= gen_rtx_REG (mode
, regno
);
6895 t1
= gen_rtx_EXPR_LIST (VOIDmode
, t1
, const0_rtx
);
6896 t2
= gen_rtx_EXPR_LIST (VOIDmode
, t2
, const0_rtx
);
6897 return gen_rtx_PARALLEL (mode
, gen_rtvec (2, t1
, t2
));
6900 /* Handle aggregated types passed in register. */
6901 if (orig_mode
== BLKmode
)
6903 if (bytes
> 0 && bytes
<= 8)
6904 mode
= (bytes
> 4 ? DImode
: SImode
);
6905 if (mode
== BLKmode
)
6909 return gen_reg_or_parallel (mode
, orig_mode
, regno
);
6912 /* Return where to put the arguments to a function.
6913 Return zero to push the argument on the stack, or a hard register in which to store the argument.
6915 MODE is the argument's machine mode. TYPE is the data type of the
6916 argument. It is null for libcalls where that information may not be
6917 available. CUM gives information about the preceding args and about
6918 the function being called. NAMED is nonzero if this argument is a
6919 named parameter (otherwise it is an extra parameter matching an
6923 ix86_function_arg (cumulative_args_t cum_v
, enum machine_mode omode
,
6924 const_tree type
, bool named
)
6926 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
6927 enum machine_mode mode
= omode
;
6928 HOST_WIDE_INT bytes
, words
;
6931 if (mode
== BLKmode
)
6932 bytes
= int_size_in_bytes (type
);
6934 bytes
= GET_MODE_SIZE (mode
);
6935 words
= (bytes
+ UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
6937 /* To simplify the code below, represent vector types with a vector mode
6938 even if MMX/SSE are not active. */
6939 if (type
&& TREE_CODE (type
) == VECTOR_TYPE
)
6940 mode
= type_natural_mode (type
, cum
);
6942 if (TARGET_64BIT
&& (cum
? cum
->call_abi
: ix86_abi
) == MS_ABI
)
6943 arg
= function_arg_ms_64 (cum
, mode
, omode
, named
, bytes
);
6944 else if (TARGET_64BIT
)
6945 arg
= function_arg_64 (cum
, mode
, omode
, type
, named
);
6947 arg
= function_arg_32 (cum
, mode
, omode
, type
, bytes
, words
);
6952 /* A C expression that indicates when an argument must be passed by
6953 reference. If nonzero for an argument, a copy of that argument is
6954 made in memory and a pointer to the argument is passed instead of
6955 the argument itself. The pointer is passed in whatever way is
6956 appropriate for passing a pointer to that type. */
6959 ix86_pass_by_reference (cumulative_args_t cum_v ATTRIBUTE_UNUSED
,
6960 enum machine_mode mode ATTRIBUTE_UNUSED
,
6961 const_tree type
, bool named ATTRIBUTE_UNUSED
)
6963 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
6965 /* See Windows x64 Software Convention. */
6966 if (TARGET_64BIT
&& (cum
? cum
->call_abi
: ix86_abi
) == MS_ABI
)
6968 int msize
= (int) GET_MODE_SIZE (mode
);
6971 /* Arrays are passed by reference. */
6972 if (TREE_CODE (type
) == ARRAY_TYPE
)
6975 if (AGGREGATE_TYPE_P (type
))
6977 /* Structs/unions of sizes other than 8, 16, 32, or 64 bits
6978 are passed by reference. */
6979 msize
= int_size_in_bytes (type
);
6983 /* __m128 is passed by reference. */
6985 case 1: case 2: case 4: case 8:
6991 else if (TARGET_64BIT
&& type
&& int_size_in_bytes (type
) == -1)
6997 /* Return true when TYPE should be 128bit aligned for 32bit argument
6998 passing ABI. XXX: This function is obsolete and is only used for
6999 checking psABI compatibility with previous versions of GCC. */
7002 ix86_compat_aligned_value_p (const_tree type
)
7004 enum machine_mode mode
= TYPE_MODE (type
);
7005 if (((TARGET_SSE
&& SSE_REG_MODE_P (mode
))
7009 && (!TYPE_USER_ALIGN (type
) || TYPE_ALIGN (type
) > 128))
7011 if (TYPE_ALIGN (type
) < 128)
7014 if (AGGREGATE_TYPE_P (type
))
7016 /* Walk the aggregates recursively. */
7017 switch (TREE_CODE (type
))
7021 case QUAL_UNION_TYPE
:
7025 /* Walk all the structure fields. */
7026 for (field
= TYPE_FIELDS (type
); field
; field
= DECL_CHAIN (field
))
7028 if (TREE_CODE (field
) == FIELD_DECL
7029 && ix86_compat_aligned_value_p (TREE_TYPE (field
)))
7036 /* Just for use if some languages passes arrays by value. */
7037 if (ix86_compat_aligned_value_p (TREE_TYPE (type
)))
7048 /* Return the alignment boundary for MODE and TYPE with alignment ALIGN.
7049 XXX: This function is obsolete and is only used for checking psABI
7050 compatibility with previous versions of GCC. */
7053 ix86_compat_function_arg_boundary (enum machine_mode mode
,
7054 const_tree type
, unsigned int align
)
7056 /* In 32bit, only _Decimal128 and __float128 are aligned to their
7057 natural boundaries. */
7058 if (!TARGET_64BIT
&& mode
!= TDmode
&& mode
!= TFmode
)
7060 /* i386 ABI defines all arguments to be 4 byte aligned. We have to
7061 make an exception for SSE modes since these require 128bit
7064 The handling here differs from field_alignment. ICC aligns MMX
7065 arguments to 4 byte boundaries, while structure fields are aligned
7066 to 8 byte boundaries. */
7069 if (!(TARGET_SSE
&& SSE_REG_MODE_P (mode
)))
7070 align
= PARM_BOUNDARY
;
7074 if (!ix86_compat_aligned_value_p (type
))
7075 align
= PARM_BOUNDARY
;
7078 if (align
> BIGGEST_ALIGNMENT
)
7079 align
= BIGGEST_ALIGNMENT
;
7083 /* Return true when TYPE should be 128bit aligned for 32bit argument
7087 ix86_contains_aligned_value_p (const_tree type
)
7089 enum machine_mode mode
= TYPE_MODE (type
);
7091 if (mode
== XFmode
|| mode
== XCmode
)
7094 if (TYPE_ALIGN (type
) < 128)
7097 if (AGGREGATE_TYPE_P (type
))
7099 /* Walk the aggregates recursively. */
7100 switch (TREE_CODE (type
))
7104 case QUAL_UNION_TYPE
:
7108 /* Walk all the structure fields. */
7109 for (field
= TYPE_FIELDS (type
);
7111 field
= DECL_CHAIN (field
))
7113 if (TREE_CODE (field
) == FIELD_DECL
7114 && ix86_contains_aligned_value_p (TREE_TYPE (field
)))
7121 /* Just for use if some languages passes arrays by value. */
7122 if (ix86_contains_aligned_value_p (TREE_TYPE (type
)))
7131 return TYPE_ALIGN (type
) >= 128;
7136 /* Gives the alignment boundary, in bits, of an argument with the
7137 specified mode and type. */
7140 ix86_function_arg_boundary (enum machine_mode mode
, const_tree type
)
7145 /* Since the main variant type is used for call, we convert it to
7146 the main variant type. */
7147 type
= TYPE_MAIN_VARIANT (type
);
7148 align
= TYPE_ALIGN (type
);
7151 align
= GET_MODE_ALIGNMENT (mode
);
7152 if (align
< PARM_BOUNDARY
)
7153 align
= PARM_BOUNDARY
;
7157 unsigned int saved_align
= align
;
7161 /* i386 ABI defines XFmode arguments to be 4 byte aligned. */
7164 if (mode
== XFmode
|| mode
== XCmode
)
7165 align
= PARM_BOUNDARY
;
7167 else if (!ix86_contains_aligned_value_p (type
))
7168 align
= PARM_BOUNDARY
;
7171 align
= PARM_BOUNDARY
;
7176 && align
!= ix86_compat_function_arg_boundary (mode
, type
,
7180 inform (input_location
,
7181 "The ABI for passing parameters with %d-byte"
7182 " alignment has changed in GCC 4.6",
7183 align
/ BITS_PER_UNIT
);
7190 /* Return true if N is a possible register number of function value. */
7193 ix86_function_value_regno_p (const unsigned int regno
)
7200 case FIRST_FLOAT_REG
:
7201 /* TODO: The function should depend on current function ABI but
7202 builtins.c would need updating then. Therefore we use the
7204 if (TARGET_64BIT
&& ix86_abi
== MS_ABI
)
7206 return TARGET_FLOAT_RETURNS_IN_80387
;
7212 if (TARGET_MACHO
|| TARGET_64BIT
)
7220 /* Define how to find the value returned by a function.
7221 VALTYPE is the data type of the value (as a tree).
7222 If the precise function being called is known, FUNC is its FUNCTION_DECL;
7223 otherwise, FUNC is 0. */
7226 function_value_32 (enum machine_mode orig_mode
, enum machine_mode mode
,
7227 const_tree fntype
, const_tree fn
)
7231 /* 8-byte vector modes in %mm0. See ix86_return_in_memory for where
7232 we normally prevent this case when mmx is not available. However
7233 some ABIs may require the result to be returned like DImode. */
7234 if (VECTOR_MODE_P (mode
) && GET_MODE_SIZE (mode
) == 8)
7235 regno
= FIRST_MMX_REG
;
7237 /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where
7238 we prevent this case when sse is not available. However some ABIs
7239 may require the result to be returned like integer TImode. */
7240 else if (mode
== TImode
7241 || (VECTOR_MODE_P (mode
) && GET_MODE_SIZE (mode
) == 16))
7242 regno
= FIRST_SSE_REG
;
7244 /* 32-byte vector modes in %ymm0. */
7245 else if (VECTOR_MODE_P (mode
) && GET_MODE_SIZE (mode
) == 32)
7246 regno
= FIRST_SSE_REG
;
7248 /* Floating point return values in %st(0) (unless -mno-fp-ret-in-387). */
7249 else if (X87_FLOAT_MODE_P (mode
) && TARGET_FLOAT_RETURNS_IN_80387
)
7250 regno
= FIRST_FLOAT_REG
;
7252 /* Most things go in %eax. */
7255 /* Override FP return register with %xmm0 for local functions when
7256 SSE math is enabled or for functions with sseregparm attribute. */
7257 if ((fn
|| fntype
) && (mode
== SFmode
|| mode
== DFmode
))
7259 int sse_level
= ix86_function_sseregparm (fntype
, fn
, false);
7260 if ((sse_level
>= 1 && mode
== SFmode
)
7261 || (sse_level
== 2 && mode
== DFmode
))
7262 regno
= FIRST_SSE_REG
;
7265 /* OImode shouldn't be used directly. */
7266 gcc_assert (mode
!= OImode
);
7268 return gen_rtx_REG (orig_mode
, regno
);
7272 function_value_64 (enum machine_mode orig_mode
, enum machine_mode mode
,
7277 /* Handle libcalls, which don't provide a type node. */
7278 if (valtype
== NULL
)
7292 regno
= FIRST_SSE_REG
;
7296 regno
= FIRST_FLOAT_REG
;
7304 return gen_rtx_REG (mode
, regno
);
7306 else if (POINTER_TYPE_P (valtype
))
7308 /* Pointers are always returned in word_mode. */
7312 ret
= construct_container (mode
, orig_mode
, valtype
, 1,
7313 X86_64_REGPARM_MAX
, X86_64_SSE_REGPARM_MAX
,
7314 x86_64_int_return_registers
, 0);
7316 /* For zero sized structures, construct_container returns NULL, but we
7317 need to keep rest of compiler happy by returning meaningful value. */
7319 ret
= gen_rtx_REG (orig_mode
, AX_REG
);
7325 function_value_ms_64 (enum machine_mode orig_mode
, enum machine_mode mode
)
7327 unsigned int regno
= AX_REG
;
7331 switch (GET_MODE_SIZE (mode
))
7334 if((SCALAR_INT_MODE_P (mode
) || VECTOR_MODE_P (mode
))
7335 && !COMPLEX_MODE_P (mode
))
7336 regno
= FIRST_SSE_REG
;
7340 if (mode
== SFmode
|| mode
== DFmode
)
7341 regno
= FIRST_SSE_REG
;
7347 return gen_rtx_REG (orig_mode
, regno
);
7351 ix86_function_value_1 (const_tree valtype
, const_tree fntype_or_decl
,
7352 enum machine_mode orig_mode
, enum machine_mode mode
)
7354 const_tree fn
, fntype
;
7357 if (fntype_or_decl
&& DECL_P (fntype_or_decl
))
7358 fn
= fntype_or_decl
;
7359 fntype
= fn
? TREE_TYPE (fn
) : fntype_or_decl
;
7361 if (TARGET_64BIT
&& ix86_function_type_abi (fntype
) == MS_ABI
)
7362 return function_value_ms_64 (orig_mode
, mode
);
7363 else if (TARGET_64BIT
)
7364 return function_value_64 (orig_mode
, mode
, valtype
);
7366 return function_value_32 (orig_mode
, mode
, fntype
, fn
);
7370 ix86_function_value (const_tree valtype
, const_tree fntype_or_decl
,
7371 bool outgoing ATTRIBUTE_UNUSED
)
7373 enum machine_mode mode
, orig_mode
;
7375 orig_mode
= TYPE_MODE (valtype
);
7376 mode
= type_natural_mode (valtype
, NULL
);
7377 return ix86_function_value_1 (valtype
, fntype_or_decl
, orig_mode
, mode
);
7380 /* Pointer function arguments and return values are promoted to
7383 static enum machine_mode
7384 ix86_promote_function_mode (const_tree type
, enum machine_mode mode
,
7385 int *punsignedp
, const_tree fntype
,
7388 if (type
!= NULL_TREE
&& POINTER_TYPE_P (type
))
7390 *punsignedp
= POINTERS_EXTEND_UNSIGNED
;
7393 return default_promote_function_mode (type
, mode
, punsignedp
, fntype
,
7397 /* Return true if a structure, union or array with MODE containing FIELD
7398 should be accessed using BLKmode. */
7401 ix86_member_type_forces_blk (const_tree field
, enum machine_mode mode
)
7403 /* Union with XFmode must be in BLKmode. */
7404 return (mode
== XFmode
7405 && (TREE_CODE (DECL_FIELD_CONTEXT (field
)) == UNION_TYPE
7406 || TREE_CODE (DECL_FIELD_CONTEXT (field
)) == QUAL_UNION_TYPE
));
7410 ix86_libcall_value (enum machine_mode mode
)
7412 return ix86_function_value_1 (NULL
, NULL
, mode
, mode
);
7415 /* Return true iff type is returned in memory. */
7417 static bool ATTRIBUTE_UNUSED
7418 return_in_memory_32 (const_tree type
, enum machine_mode mode
)
7422 if (mode
== BLKmode
)
7425 size
= int_size_in_bytes (type
);
7427 if (MS_AGGREGATE_RETURN
&& AGGREGATE_TYPE_P (type
) && size
<= 8)
7430 if (VECTOR_MODE_P (mode
) || mode
== TImode
)
7432 /* User-created vectors small enough to fit in EAX. */
7436 /* MMX/3dNow values are returned in MM0,
7437 except when it doesn't exits or the ABI prescribes otherwise. */
7439 return !TARGET_MMX
|| TARGET_VECT8_RETURNS
;
7441 /* SSE values are returned in XMM0, except when it doesn't exist. */
7445 /* AVX values are returned in YMM0, except when it doesn't exist. */
7456 /* OImode shouldn't be used directly. */
7457 gcc_assert (mode
!= OImode
);
7462 static bool ATTRIBUTE_UNUSED
7463 return_in_memory_64 (const_tree type
, enum machine_mode mode
)
7465 int needed_intregs
, needed_sseregs
;
7466 return !examine_argument (mode
, type
, 1, &needed_intregs
, &needed_sseregs
);
7469 static bool ATTRIBUTE_UNUSED
7470 return_in_memory_ms_64 (const_tree type
, enum machine_mode mode
)
7472 HOST_WIDE_INT size
= int_size_in_bytes (type
);
7474 /* __m128 is returned in xmm0. */
7475 if ((SCALAR_INT_MODE_P (mode
) || VECTOR_MODE_P (mode
))
7476 && !COMPLEX_MODE_P (mode
) && (GET_MODE_SIZE (mode
) == 16 || size
== 16))
7479 /* Otherwise, the size must be exactly in [1248]. */
7480 return size
!= 1 && size
!= 2 && size
!= 4 && size
!= 8;
7484 ix86_return_in_memory (const_tree type
, const_tree fntype ATTRIBUTE_UNUSED
)
7486 #ifdef SUBTARGET_RETURN_IN_MEMORY
7487 return SUBTARGET_RETURN_IN_MEMORY (type
, fntype
);
7489 const enum machine_mode mode
= type_natural_mode (type
, NULL
);
7493 if (ix86_function_type_abi (fntype
) == MS_ABI
)
7494 return return_in_memory_ms_64 (type
, mode
);
7496 return return_in_memory_64 (type
, mode
);
7499 return return_in_memory_32 (type
, mode
);
7503 /* When returning SSE vector types, we have a choice of either
7504 (1) being abi incompatible with a -march switch, or
7505 (2) generating an error.
7506 Given no good solution, I think the safest thing is one warning.
7507 The user won't be able to use -Werror, but....
7509 Choose the STRUCT_VALUE_RTX hook because that's (at present) only
7510 called in response to actually generating a caller or callee that
7511 uses such a type. As opposed to TARGET_RETURN_IN_MEMORY, which is called
7512 via aggregate_value_p for general type probing from tree-ssa. */
7515 ix86_struct_value_rtx (tree type
, int incoming ATTRIBUTE_UNUSED
)
7517 static bool warnedsse
, warnedmmx
;
7519 if (!TARGET_64BIT
&& type
)
7521 /* Look at the return type of the function, not the function type. */
7522 enum machine_mode mode
= TYPE_MODE (TREE_TYPE (type
));
7524 if (!TARGET_SSE
&& !warnedsse
)
7527 || (VECTOR_MODE_P (mode
) && GET_MODE_SIZE (mode
) == 16))
7530 warning (0, "SSE vector return without SSE enabled "
7535 if (!TARGET_MMX
&& !warnedmmx
)
7537 if (VECTOR_MODE_P (mode
) && GET_MODE_SIZE (mode
) == 8)
7540 warning (0, "MMX vector return without MMX enabled "
7550 /* Create the va_list data type. */
7552 /* Returns the calling convention specific va_list date type.
7553 The argument ABI can be DEFAULT_ABI, MS_ABI, or SYSV_ABI. */
7556 ix86_build_builtin_va_list_abi (enum calling_abi abi
)
7558 tree f_gpr
, f_fpr
, f_ovf
, f_sav
, record
, type_decl
;
7560 /* For i386 we use plain pointer to argument area. */
7561 if (!TARGET_64BIT
|| abi
== MS_ABI
)
7562 return build_pointer_type (char_type_node
);
7564 record
= lang_hooks
.types
.make_type (RECORD_TYPE
);
7565 type_decl
= build_decl (BUILTINS_LOCATION
,
7566 TYPE_DECL
, get_identifier ("__va_list_tag"), record
);
7568 f_gpr
= build_decl (BUILTINS_LOCATION
,
7569 FIELD_DECL
, get_identifier ("gp_offset"),
7570 unsigned_type_node
);
7571 f_fpr
= build_decl (BUILTINS_LOCATION
,
7572 FIELD_DECL
, get_identifier ("fp_offset"),
7573 unsigned_type_node
);
7574 f_ovf
= build_decl (BUILTINS_LOCATION
,
7575 FIELD_DECL
, get_identifier ("overflow_arg_area"),
7577 f_sav
= build_decl (BUILTINS_LOCATION
,
7578 FIELD_DECL
, get_identifier ("reg_save_area"),
7581 va_list_gpr_counter_field
= f_gpr
;
7582 va_list_fpr_counter_field
= f_fpr
;
7584 DECL_FIELD_CONTEXT (f_gpr
) = record
;
7585 DECL_FIELD_CONTEXT (f_fpr
) = record
;
7586 DECL_FIELD_CONTEXT (f_ovf
) = record
;
7587 DECL_FIELD_CONTEXT (f_sav
) = record
;
7589 TYPE_STUB_DECL (record
) = type_decl
;
7590 TYPE_NAME (record
) = type_decl
;
7591 TYPE_FIELDS (record
) = f_gpr
;
7592 DECL_CHAIN (f_gpr
) = f_fpr
;
7593 DECL_CHAIN (f_fpr
) = f_ovf
;
7594 DECL_CHAIN (f_ovf
) = f_sav
;
7596 layout_type (record
);
7598 /* The correct type is an array type of one element. */
7599 return build_array_type (record
, build_index_type (size_zero_node
));
7602 /* Setup the builtin va_list data type and for 64-bit the additional
7603 calling convention specific va_list data types. */
7606 ix86_build_builtin_va_list (void)
7608 tree ret
= ix86_build_builtin_va_list_abi (ix86_abi
);
7610 /* Initialize abi specific va_list builtin types. */
7614 if (ix86_abi
== MS_ABI
)
7616 t
= ix86_build_builtin_va_list_abi (SYSV_ABI
);
7617 if (TREE_CODE (t
) != RECORD_TYPE
)
7618 t
= build_variant_type_copy (t
);
7619 sysv_va_list_type_node
= t
;
7624 if (TREE_CODE (t
) != RECORD_TYPE
)
7625 t
= build_variant_type_copy (t
);
7626 sysv_va_list_type_node
= t
;
7628 if (ix86_abi
!= MS_ABI
)
7630 t
= ix86_build_builtin_va_list_abi (MS_ABI
);
7631 if (TREE_CODE (t
) != RECORD_TYPE
)
7632 t
= build_variant_type_copy (t
);
7633 ms_va_list_type_node
= t
;
7638 if (TREE_CODE (t
) != RECORD_TYPE
)
7639 t
= build_variant_type_copy (t
);
7640 ms_va_list_type_node
= t
;
7647 /* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
7650 setup_incoming_varargs_64 (CUMULATIVE_ARGS
*cum
)
7656 /* GPR size of varargs save area. */
7657 if (cfun
->va_list_gpr_size
)
7658 ix86_varargs_gpr_size
= X86_64_REGPARM_MAX
* UNITS_PER_WORD
;
7660 ix86_varargs_gpr_size
= 0;
7662 /* FPR size of varargs save area. We don't need it if we don't pass
7663 anything in SSE registers. */
7664 if (TARGET_SSE
&& cfun
->va_list_fpr_size
)
7665 ix86_varargs_fpr_size
= X86_64_SSE_REGPARM_MAX
* 16;
7667 ix86_varargs_fpr_size
= 0;
7669 if (! ix86_varargs_gpr_size
&& ! ix86_varargs_fpr_size
)
7672 save_area
= frame_pointer_rtx
;
7673 set
= get_varargs_alias_set ();
7675 max
= cum
->regno
+ cfun
->va_list_gpr_size
/ UNITS_PER_WORD
;
7676 if (max
> X86_64_REGPARM_MAX
)
7677 max
= X86_64_REGPARM_MAX
;
7679 for (i
= cum
->regno
; i
< max
; i
++)
7681 mem
= gen_rtx_MEM (word_mode
,
7682 plus_constant (Pmode
, save_area
, i
* UNITS_PER_WORD
));
7683 MEM_NOTRAP_P (mem
) = 1;
7684 set_mem_alias_set (mem
, set
);
7685 emit_move_insn (mem
,
7686 gen_rtx_REG (word_mode
,
7687 x86_64_int_parameter_registers
[i
]));
7690 if (ix86_varargs_fpr_size
)
7692 enum machine_mode smode
;
7695 /* Now emit code to save SSE registers. The AX parameter contains number
7696 of SSE parameter registers used to call this function, though all we
7697 actually check here is the zero/non-zero status. */
7699 label
= gen_label_rtx ();
7700 test
= gen_rtx_EQ (VOIDmode
, gen_rtx_REG (QImode
, AX_REG
), const0_rtx
);
7701 emit_jump_insn (gen_cbranchqi4 (test
, XEXP (test
, 0), XEXP (test
, 1),
7704 /* ??? If !TARGET_SSE_TYPELESS_STORES, would we perform better if
7705 we used movdqa (i.e. TImode) instead? Perhaps even better would
7706 be if we could determine the real mode of the data, via a hook
7707 into pass_stdarg. Ignore all that for now. */
7709 if (crtl
->stack_alignment_needed
< GET_MODE_ALIGNMENT (smode
))
7710 crtl
->stack_alignment_needed
= GET_MODE_ALIGNMENT (smode
);
7712 max
= cum
->sse_regno
+ cfun
->va_list_fpr_size
/ 16;
7713 if (max
> X86_64_SSE_REGPARM_MAX
)
7714 max
= X86_64_SSE_REGPARM_MAX
;
7716 for (i
= cum
->sse_regno
; i
< max
; ++i
)
7718 mem
= plus_constant (Pmode
, save_area
,
7719 i
* 16 + ix86_varargs_gpr_size
);
7720 mem
= gen_rtx_MEM (smode
, mem
);
7721 MEM_NOTRAP_P (mem
) = 1;
7722 set_mem_alias_set (mem
, set
);
7723 set_mem_align (mem
, GET_MODE_ALIGNMENT (smode
));
7725 emit_move_insn (mem
, gen_rtx_REG (smode
, SSE_REGNO (i
)));
7733 setup_incoming_varargs_ms_64 (CUMULATIVE_ARGS
*cum
)
7735 alias_set_type set
= get_varargs_alias_set ();
7738 /* Reset to zero, as there might be a sysv vaarg used
7740 ix86_varargs_gpr_size
= 0;
7741 ix86_varargs_fpr_size
= 0;
7743 for (i
= cum
->regno
; i
< X86_64_MS_REGPARM_MAX
; i
++)
7747 mem
= gen_rtx_MEM (Pmode
,
7748 plus_constant (Pmode
, virtual_incoming_args_rtx
,
7749 i
* UNITS_PER_WORD
));
7750 MEM_NOTRAP_P (mem
) = 1;
7751 set_mem_alias_set (mem
, set
);
7753 reg
= gen_rtx_REG (Pmode
, x86_64_ms_abi_int_parameter_registers
[i
]);
7754 emit_move_insn (mem
, reg
);
7759 ix86_setup_incoming_varargs (cumulative_args_t cum_v
, enum machine_mode mode
,
7760 tree type
, int *pretend_size ATTRIBUTE_UNUSED
,
7763 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
7764 CUMULATIVE_ARGS next_cum
;
7767 /* This argument doesn't appear to be used anymore. Which is good,
7768 because the old code here didn't suppress rtl generation. */
7769 gcc_assert (!no_rtl
);
7774 fntype
= TREE_TYPE (current_function_decl
);
7776 /* For varargs, we do not want to skip the dummy va_dcl argument.
7777 For stdargs, we do want to skip the last named argument. */
7779 if (stdarg_p (fntype
))
7780 ix86_function_arg_advance (pack_cumulative_args (&next_cum
), mode
, type
,
7783 if (cum
->call_abi
== MS_ABI
)
7784 setup_incoming_varargs_ms_64 (&next_cum
);
7786 setup_incoming_varargs_64 (&next_cum
);
7789 /* Checks if TYPE is of kind va_list char *. */
7792 is_va_list_char_pointer (tree type
)
7796 /* For 32-bit it is always true. */
7799 canonic
= ix86_canonical_va_list_type (type
);
7800 return (canonic
== ms_va_list_type_node
7801 || (ix86_abi
== MS_ABI
&& canonic
== va_list_type_node
));
7804 /* Implement va_start. */
7807 ix86_va_start (tree valist
, rtx nextarg
)
7809 HOST_WIDE_INT words
, n_gpr
, n_fpr
;
7810 tree f_gpr
, f_fpr
, f_ovf
, f_sav
;
7811 tree gpr
, fpr
, ovf
, sav
, t
;
7815 if (flag_split_stack
7816 && cfun
->machine
->split_stack_varargs_pointer
== NULL_RTX
)
7818 unsigned int scratch_regno
;
7820 /* When we are splitting the stack, we can't refer to the stack
7821 arguments using internal_arg_pointer, because they may be on
7822 the old stack. The split stack prologue will arrange to
7823 leave a pointer to the old stack arguments in a scratch
7824 register, which we here copy to a pseudo-register. The split
7825 stack prologue can't set the pseudo-register directly because
7826 it (the prologue) runs before any registers have been saved. */
7828 scratch_regno
= split_stack_prologue_scratch_regno ();
7829 if (scratch_regno
!= INVALID_REGNUM
)
7833 reg
= gen_reg_rtx (Pmode
);
7834 cfun
->machine
->split_stack_varargs_pointer
= reg
;
7837 emit_move_insn (reg
, gen_rtx_REG (Pmode
, scratch_regno
));
7841 push_topmost_sequence ();
7842 emit_insn_after (seq
, entry_of_function ());
7843 pop_topmost_sequence ();
7847 /* Only 64bit target needs something special. */
7848 if (!TARGET_64BIT
|| is_va_list_char_pointer (TREE_TYPE (valist
)))
7850 if (cfun
->machine
->split_stack_varargs_pointer
== NULL_RTX
)
7851 std_expand_builtin_va_start (valist
, nextarg
);
7856 va_r
= expand_expr (valist
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
7857 next
= expand_binop (ptr_mode
, add_optab
,
7858 cfun
->machine
->split_stack_varargs_pointer
,
7859 crtl
->args
.arg_offset_rtx
,
7860 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
7861 convert_move (va_r
, next
, 0);
7866 f_gpr
= TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node
));
7867 f_fpr
= DECL_CHAIN (f_gpr
);
7868 f_ovf
= DECL_CHAIN (f_fpr
);
7869 f_sav
= DECL_CHAIN (f_ovf
);
7871 valist
= build_simple_mem_ref (valist
);
7872 TREE_TYPE (valist
) = TREE_TYPE (sysv_va_list_type_node
);
7873 /* The following should be folded into the MEM_REF offset. */
7874 gpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_gpr
), unshare_expr (valist
),
7876 fpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_fpr
), unshare_expr (valist
),
7878 ovf
= build3 (COMPONENT_REF
, TREE_TYPE (f_ovf
), unshare_expr (valist
),
7880 sav
= build3 (COMPONENT_REF
, TREE_TYPE (f_sav
), unshare_expr (valist
),
7883 /* Count number of gp and fp argument registers used. */
7884 words
= crtl
->args
.info
.words
;
7885 n_gpr
= crtl
->args
.info
.regno
;
7886 n_fpr
= crtl
->args
.info
.sse_regno
;
7888 if (cfun
->va_list_gpr_size
)
7890 type
= TREE_TYPE (gpr
);
7891 t
= build2 (MODIFY_EXPR
, type
,
7892 gpr
, build_int_cst (type
, n_gpr
* 8));
7893 TREE_SIDE_EFFECTS (t
) = 1;
7894 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
7897 if (TARGET_SSE
&& cfun
->va_list_fpr_size
)
7899 type
= TREE_TYPE (fpr
);
7900 t
= build2 (MODIFY_EXPR
, type
, fpr
,
7901 build_int_cst (type
, n_fpr
* 16 + 8*X86_64_REGPARM_MAX
));
7902 TREE_SIDE_EFFECTS (t
) = 1;
7903 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
7906 /* Find the overflow area. */
7907 type
= TREE_TYPE (ovf
);
7908 if (cfun
->machine
->split_stack_varargs_pointer
== NULL_RTX
)
7909 ovf_rtx
= crtl
->args
.internal_arg_pointer
;
7911 ovf_rtx
= cfun
->machine
->split_stack_varargs_pointer
;
7912 t
= make_tree (type
, ovf_rtx
);
7914 t
= fold_build_pointer_plus_hwi (t
, words
* UNITS_PER_WORD
);
7915 t
= build2 (MODIFY_EXPR
, type
, ovf
, t
);
7916 TREE_SIDE_EFFECTS (t
) = 1;
7917 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
7919 if (ix86_varargs_gpr_size
|| ix86_varargs_fpr_size
)
7921 /* Find the register save area.
7922 Prologue of the function save it right above stack frame. */
7923 type
= TREE_TYPE (sav
);
7924 t
= make_tree (type
, frame_pointer_rtx
);
7925 if (!ix86_varargs_gpr_size
)
7926 t
= fold_build_pointer_plus_hwi (t
, -8 * X86_64_REGPARM_MAX
);
7927 t
= build2 (MODIFY_EXPR
, type
, sav
, t
);
7928 TREE_SIDE_EFFECTS (t
) = 1;
7929 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
7933 /* Implement va_arg. */
7936 ix86_gimplify_va_arg (tree valist
, tree type
, gimple_seq
*pre_p
,
7939 static const int intreg
[6] = { 0, 1, 2, 3, 4, 5 };
7940 tree f_gpr
, f_fpr
, f_ovf
, f_sav
;
7941 tree gpr
, fpr
, ovf
, sav
, t
;
7943 tree lab_false
, lab_over
= NULL_TREE
;
7948 enum machine_mode nat_mode
;
7949 unsigned int arg_boundary
;
7951 /* Only 64bit target needs something special. */
7952 if (!TARGET_64BIT
|| is_va_list_char_pointer (TREE_TYPE (valist
)))
7953 return std_gimplify_va_arg_expr (valist
, type
, pre_p
, post_p
);
7955 f_gpr
= TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node
));
7956 f_fpr
= DECL_CHAIN (f_gpr
);
7957 f_ovf
= DECL_CHAIN (f_fpr
);
7958 f_sav
= DECL_CHAIN (f_ovf
);
7960 gpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_gpr
),
7961 build_va_arg_indirect_ref (valist
), f_gpr
, NULL_TREE
);
7962 valist
= build_va_arg_indirect_ref (valist
);
7963 fpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_fpr
), valist
, f_fpr
, NULL_TREE
);
7964 ovf
= build3 (COMPONENT_REF
, TREE_TYPE (f_ovf
), valist
, f_ovf
, NULL_TREE
);
7965 sav
= build3 (COMPONENT_REF
, TREE_TYPE (f_sav
), valist
, f_sav
, NULL_TREE
);
7967 indirect_p
= pass_by_reference (NULL
, TYPE_MODE (type
), type
, false);
7969 type
= build_pointer_type (type
);
7970 size
= int_size_in_bytes (type
);
7971 rsize
= (size
+ UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
7973 nat_mode
= type_natural_mode (type
, NULL
);
7982 /* Unnamed 256bit vector mode parameters are passed on stack. */
7983 if (!TARGET_64BIT_MS_ABI
)
7990 container
= construct_container (nat_mode
, TYPE_MODE (type
),
7991 type
, 0, X86_64_REGPARM_MAX
,
7992 X86_64_SSE_REGPARM_MAX
, intreg
,
7997 /* Pull the value out of the saved registers. */
7999 addr
= create_tmp_var (ptr_type_node
, "addr");
8003 int needed_intregs
, needed_sseregs
;
8005 tree int_addr
, sse_addr
;
8007 lab_false
= create_artificial_label (UNKNOWN_LOCATION
);
8008 lab_over
= create_artificial_label (UNKNOWN_LOCATION
);
8010 examine_argument (nat_mode
, type
, 0, &needed_intregs
, &needed_sseregs
);
8012 need_temp
= (!REG_P (container
)
8013 && ((needed_intregs
&& TYPE_ALIGN (type
) > 64)
8014 || TYPE_ALIGN (type
) > 128));
8016 /* In case we are passing structure, verify that it is consecutive block
8017 on the register save area. If not we need to do moves. */
8018 if (!need_temp
&& !REG_P (container
))
8020 /* Verify that all registers are strictly consecutive */
8021 if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container
, 0, 0), 0))))
8025 for (i
= 0; i
< XVECLEN (container
, 0) && !need_temp
; i
++)
8027 rtx slot
= XVECEXP (container
, 0, i
);
8028 if (REGNO (XEXP (slot
, 0)) != FIRST_SSE_REG
+ (unsigned int) i
8029 || INTVAL (XEXP (slot
, 1)) != i
* 16)
8037 for (i
= 0; i
< XVECLEN (container
, 0) && !need_temp
; i
++)
8039 rtx slot
= XVECEXP (container
, 0, i
);
8040 if (REGNO (XEXP (slot
, 0)) != (unsigned int) i
8041 || INTVAL (XEXP (slot
, 1)) != i
* 8)
8053 int_addr
= create_tmp_var (ptr_type_node
, "int_addr");
8054 sse_addr
= create_tmp_var (ptr_type_node
, "sse_addr");
8057 /* First ensure that we fit completely in registers. */
8060 t
= build_int_cst (TREE_TYPE (gpr
),
8061 (X86_64_REGPARM_MAX
- needed_intregs
+ 1) * 8);
8062 t
= build2 (GE_EXPR
, boolean_type_node
, gpr
, t
);
8063 t2
= build1 (GOTO_EXPR
, void_type_node
, lab_false
);
8064 t
= build3 (COND_EXPR
, void_type_node
, t
, t2
, NULL_TREE
);
8065 gimplify_and_add (t
, pre_p
);
8069 t
= build_int_cst (TREE_TYPE (fpr
),
8070 (X86_64_SSE_REGPARM_MAX
- needed_sseregs
+ 1) * 16
8071 + X86_64_REGPARM_MAX
* 8);
8072 t
= build2 (GE_EXPR
, boolean_type_node
, fpr
, t
);
8073 t2
= build1 (GOTO_EXPR
, void_type_node
, lab_false
);
8074 t
= build3 (COND_EXPR
, void_type_node
, t
, t2
, NULL_TREE
);
8075 gimplify_and_add (t
, pre_p
);
8078 /* Compute index to start of area used for integer regs. */
8081 /* int_addr = gpr + sav; */
8082 t
= fold_build_pointer_plus (sav
, gpr
);
8083 gimplify_assign (int_addr
, t
, pre_p
);
8087 /* sse_addr = fpr + sav; */
8088 t
= fold_build_pointer_plus (sav
, fpr
);
8089 gimplify_assign (sse_addr
, t
, pre_p
);
8093 int i
, prev_size
= 0;
8094 tree temp
= create_tmp_var (type
, "va_arg_tmp");
8097 t
= build1 (ADDR_EXPR
, build_pointer_type (type
), temp
);
8098 gimplify_assign (addr
, t
, pre_p
);
8100 for (i
= 0; i
< XVECLEN (container
, 0); i
++)
8102 rtx slot
= XVECEXP (container
, 0, i
);
8103 rtx reg
= XEXP (slot
, 0);
8104 enum machine_mode mode
= GET_MODE (reg
);
8110 tree dest_addr
, dest
;
8111 int cur_size
= GET_MODE_SIZE (mode
);
8113 gcc_assert (prev_size
<= INTVAL (XEXP (slot
, 1)));
8114 prev_size
= INTVAL (XEXP (slot
, 1));
8115 if (prev_size
+ cur_size
> size
)
8117 cur_size
= size
- prev_size
;
8118 mode
= mode_for_size (cur_size
* BITS_PER_UNIT
, MODE_INT
, 1);
8119 if (mode
== BLKmode
)
8122 piece_type
= lang_hooks
.types
.type_for_mode (mode
, 1);
8123 if (mode
== GET_MODE (reg
))
8124 addr_type
= build_pointer_type (piece_type
);
8126 addr_type
= build_pointer_type_for_mode (piece_type
, ptr_mode
,
8128 daddr_type
= build_pointer_type_for_mode (piece_type
, ptr_mode
,
8131 if (SSE_REGNO_P (REGNO (reg
)))
8133 src_addr
= sse_addr
;
8134 src_offset
= (REGNO (reg
) - FIRST_SSE_REG
) * 16;
8138 src_addr
= int_addr
;
8139 src_offset
= REGNO (reg
) * 8;
8141 src_addr
= fold_convert (addr_type
, src_addr
);
8142 src_addr
= fold_build_pointer_plus_hwi (src_addr
, src_offset
);
8144 dest_addr
= fold_convert (daddr_type
, addr
);
8145 dest_addr
= fold_build_pointer_plus_hwi (dest_addr
, prev_size
);
8146 if (cur_size
== GET_MODE_SIZE (mode
))
8148 src
= build_va_arg_indirect_ref (src_addr
);
8149 dest
= build_va_arg_indirect_ref (dest_addr
);
8151 gimplify_assign (dest
, src
, pre_p
);
8156 = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY
),
8157 3, dest_addr
, src_addr
,
8158 size_int (cur_size
));
8159 gimplify_and_add (copy
, pre_p
);
8161 prev_size
+= cur_size
;
8167 t
= build2 (PLUS_EXPR
, TREE_TYPE (gpr
), gpr
,
8168 build_int_cst (TREE_TYPE (gpr
), needed_intregs
* 8));
8169 gimplify_assign (gpr
, t
, pre_p
);
8174 t
= build2 (PLUS_EXPR
, TREE_TYPE (fpr
), fpr
,
8175 build_int_cst (TREE_TYPE (fpr
), needed_sseregs
* 16));
8176 gimplify_assign (fpr
, t
, pre_p
);
8179 gimple_seq_add_stmt (pre_p
, gimple_build_goto (lab_over
));
8181 gimple_seq_add_stmt (pre_p
, gimple_build_label (lab_false
));
8184 /* ... otherwise out of the overflow area. */
8186 /* When we align parameter on stack for caller, if the parameter
8187 alignment is beyond MAX_SUPPORTED_STACK_ALIGNMENT, it will be
8188 aligned at MAX_SUPPORTED_STACK_ALIGNMENT. We will match callee
8189 here with caller. */
8190 arg_boundary
= ix86_function_arg_boundary (VOIDmode
, type
);
8191 if ((unsigned int) arg_boundary
> MAX_SUPPORTED_STACK_ALIGNMENT
)
8192 arg_boundary
= MAX_SUPPORTED_STACK_ALIGNMENT
;
8194 /* Care for on-stack alignment if needed. */
8195 if (arg_boundary
<= 64 || size
== 0)
8199 HOST_WIDE_INT align
= arg_boundary
/ 8;
8200 t
= fold_build_pointer_plus_hwi (ovf
, align
- 1);
8201 t
= build2 (BIT_AND_EXPR
, TREE_TYPE (t
), t
,
8202 build_int_cst (TREE_TYPE (t
), -align
));
8205 gimplify_expr (&t
, pre_p
, NULL
, is_gimple_val
, fb_rvalue
);
8206 gimplify_assign (addr
, t
, pre_p
);
8208 t
= fold_build_pointer_plus_hwi (t
, rsize
* UNITS_PER_WORD
);
8209 gimplify_assign (unshare_expr (ovf
), t
, pre_p
);
8212 gimple_seq_add_stmt (pre_p
, gimple_build_label (lab_over
));
8214 ptrtype
= build_pointer_type_for_mode (type
, ptr_mode
, true);
8215 addr
= fold_convert (ptrtype
, addr
);
8218 addr
= build_va_arg_indirect_ref (addr
);
8219 return build_va_arg_indirect_ref (addr
);
8222 /* Return true if OPNUM's MEM should be matched
8223 in movabs* patterns. */
8226 ix86_check_movabs (rtx insn
, int opnum
)
8230 set
= PATTERN (insn
);
8231 if (GET_CODE (set
) == PARALLEL
)
8232 set
= XVECEXP (set
, 0, 0);
8233 gcc_assert (GET_CODE (set
) == SET
);
8234 mem
= XEXP (set
, opnum
);
8235 while (GET_CODE (mem
) == SUBREG
)
8236 mem
= SUBREG_REG (mem
);
8237 gcc_assert (MEM_P (mem
));
8238 return volatile_ok
|| !MEM_VOLATILE_P (mem
);
8241 /* Initialize the table of extra 80387 mathematical constants. */
8244 init_ext_80387_constants (void)
8246 static const char * cst
[5] =
8248 "0.3010299956639811952256464283594894482", /* 0: fldlg2 */
8249 "0.6931471805599453094286904741849753009", /* 1: fldln2 */
8250 "1.4426950408889634073876517827983434472", /* 2: fldl2e */
8251 "3.3219280948873623478083405569094566090", /* 3: fldl2t */
8252 "3.1415926535897932385128089594061862044", /* 4: fldpi */
8256 for (i
= 0; i
< 5; i
++)
8258 real_from_string (&ext_80387_constants_table
[i
], cst
[i
]);
8259 /* Ensure each constant is rounded to XFmode precision. */
8260 real_convert (&ext_80387_constants_table
[i
],
8261 XFmode
, &ext_80387_constants_table
[i
]);
8264 ext_80387_constants_init
= 1;
8267 /* Return non-zero if the constant is something that
8268 can be loaded with a special instruction. */
8271 standard_80387_constant_p (rtx x
)
8273 enum machine_mode mode
= GET_MODE (x
);
8277 if (!(X87_FLOAT_MODE_P (mode
) && (GET_CODE (x
) == CONST_DOUBLE
)))
8280 if (x
== CONST0_RTX (mode
))
8282 if (x
== CONST1_RTX (mode
))
8285 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
8287 /* For XFmode constants, try to find a special 80387 instruction when
8288 optimizing for size or on those CPUs that benefit from them. */
8290 && (optimize_function_for_size_p (cfun
) || TARGET_EXT_80387_CONSTANTS
))
8294 if (! ext_80387_constants_init
)
8295 init_ext_80387_constants ();
8297 for (i
= 0; i
< 5; i
++)
8298 if (real_identical (&r
, &ext_80387_constants_table
[i
]))
8302 /* Load of the constant -0.0 or -1.0 will be split as
8303 fldz;fchs or fld1;fchs sequence. */
8304 if (real_isnegzero (&r
))
8306 if (real_identical (&r
, &dconstm1
))
8312 /* Return the opcode of the special instruction to be used to load
8316 standard_80387_constant_opcode (rtx x
)
8318 switch (standard_80387_constant_p (x
))
8342 /* Return the CONST_DOUBLE representing the 80387 constant that is
8343 loaded by the specified special instruction. The argument IDX
8344 matches the return value from standard_80387_constant_p. */
8347 standard_80387_constant_rtx (int idx
)
8351 if (! ext_80387_constants_init
)
8352 init_ext_80387_constants ();
8368 return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table
[i
],
8372 /* Return 1 if X is all 0s and 2 if x is all 1s
8373 in supported SSE/AVX vector mode. */
8376 standard_sse_constant_p (rtx x
)
8378 enum machine_mode mode
= GET_MODE (x
);
8380 if (x
== const0_rtx
|| x
== CONST0_RTX (GET_MODE (x
)))
8382 if (vector_all_ones_operand (x
, mode
))
8404 /* Return the opcode of the special instruction to be used to load
8408 standard_sse_constant_opcode (rtx insn
, rtx x
)
8410 switch (standard_sse_constant_p (x
))
8413 switch (get_attr_mode (insn
))
8416 return "%vpxor\t%0, %d0";
8418 return "%vxorpd\t%0, %d0";
8420 return "%vxorps\t%0, %d0";
8423 return "vpxor\t%x0, %x0, %x0";
8425 return "vxorpd\t%x0, %x0, %x0";
8427 return "vxorps\t%x0, %x0, %x0";
8435 return "vpcmpeqd\t%0, %0, %0";
8437 return "pcmpeqd\t%0, %0";
8445 /* Returns true if OP contains a symbol reference */
8448 symbolic_reference_mentioned_p (rtx op
)
8453 if (GET_CODE (op
) == SYMBOL_REF
|| GET_CODE (op
) == LABEL_REF
)
8456 fmt
= GET_RTX_FORMAT (GET_CODE (op
));
8457 for (i
= GET_RTX_LENGTH (GET_CODE (op
)) - 1; i
>= 0; i
--)
8463 for (j
= XVECLEN (op
, i
) - 1; j
>= 0; j
--)
8464 if (symbolic_reference_mentioned_p (XVECEXP (op
, i
, j
)))
8468 else if (fmt
[i
] == 'e' && symbolic_reference_mentioned_p (XEXP (op
, i
)))
8475 /* Return true if it is appropriate to emit `ret' instructions in the
8476 body of a function. Do this only if the epilogue is simple, needing a
8477 couple of insns. Prior to reloading, we can't tell how many registers
8478 must be saved, so return false then. Return false if there is no frame
8479 marker to de-allocate. */
8482 ix86_can_use_return_insn_p (void)
8484 struct ix86_frame frame
;
8486 if (! reload_completed
|| frame_pointer_needed
)
8489 /* Don't allow more than 32k pop, since that's all we can do
8490 with one instruction. */
8491 if (crtl
->args
.pops_args
&& crtl
->args
.size
>= 32768)
8494 ix86_compute_frame_layout (&frame
);
8495 return (frame
.stack_pointer_offset
== UNITS_PER_WORD
8496 && (frame
.nregs
+ frame
.nsseregs
) == 0);
8499 /* Value should be nonzero if functions must have frame pointers.
8500 Zero means the frame pointer need not be set up (and parms may
8501 be accessed via the stack pointer) in functions that seem suitable. */
8504 ix86_frame_pointer_required (void)
8506 /* If we accessed previous frames, then the generated code expects
8507 to be able to access the saved ebp value in our frame. */
8508 if (cfun
->machine
->accesses_prev_frame
)
8511 /* Several x86 os'es need a frame pointer for other reasons,
8512 usually pertaining to setjmp. */
8513 if (SUBTARGET_FRAME_POINTER_REQUIRED
)
8516 /* For older 32-bit runtimes setjmp requires valid frame-pointer. */
8517 if (TARGET_32BIT_MS_ABI
&& cfun
->calls_setjmp
)
8520 /* Win64 SEH, very large frames need a frame-pointer as maximum stack
8521 allocation is 4GB. */
8522 if (TARGET_64BIT_MS_ABI
&& get_frame_size () > SEH_MAX_FRAME_SIZE
)
8525 /* In ix86_option_override_internal, TARGET_OMIT_LEAF_FRAME_POINTER
8526 turns off the frame pointer by default. Turn it back on now if
8527 we've not got a leaf function. */
8528 if (TARGET_OMIT_LEAF_FRAME_POINTER
8530 || ix86_current_function_calls_tls_descriptor
))
8533 if (crtl
->profile
&& !flag_fentry
)
8539 /* Record that the current function accesses previous call frames. */
8542 ix86_setup_frame_addresses (void)
8544 cfun
->machine
->accesses_prev_frame
= 1;
8547 #ifndef USE_HIDDEN_LINKONCE
8548 # if defined(HAVE_GAS_HIDDEN) && (SUPPORTS_ONE_ONLY - 0)
8549 # define USE_HIDDEN_LINKONCE 1
8551 # define USE_HIDDEN_LINKONCE 0
8555 static int pic_labels_used
;
8557 /* Fills in the label name that should be used for a pc thunk for
8558 the given register. */
8561 get_pc_thunk_name (char name
[32], unsigned int regno
)
8563 gcc_assert (!TARGET_64BIT
);
8565 if (USE_HIDDEN_LINKONCE
)
8566 sprintf (name
, "__x86.get_pc_thunk.%s", reg_names
[regno
]);
8568 ASM_GENERATE_INTERNAL_LABEL (name
, "LPR", regno
);
8572 /* This function generates code for -fpic that loads %ebx with
8573 the return address of the caller and then returns. */
8576 ix86_code_end (void)
8581 for (regno
= AX_REG
; regno
<= SP_REG
; regno
++)
8586 if (!(pic_labels_used
& (1 << regno
)))
8589 get_pc_thunk_name (name
, regno
);
8591 decl
= build_decl (BUILTINS_LOCATION
, FUNCTION_DECL
,
8592 get_identifier (name
),
8593 build_function_type_list (void_type_node
, NULL_TREE
));
8594 DECL_RESULT (decl
) = build_decl (BUILTINS_LOCATION
, RESULT_DECL
,
8595 NULL_TREE
, void_type_node
);
8596 TREE_PUBLIC (decl
) = 1;
8597 TREE_STATIC (decl
) = 1;
8598 DECL_IGNORED_P (decl
) = 1;
8603 switch_to_section (darwin_sections
[text_coal_section
]);
8604 fputs ("\t.weak_definition\t", asm_out_file
);
8605 assemble_name (asm_out_file
, name
);
8606 fputs ("\n\t.private_extern\t", asm_out_file
);
8607 assemble_name (asm_out_file
, name
);
8608 putc ('\n', asm_out_file
);
8609 ASM_OUTPUT_LABEL (asm_out_file
, name
);
8610 DECL_WEAK (decl
) = 1;
8614 if (USE_HIDDEN_LINKONCE
)
8616 DECL_COMDAT_GROUP (decl
) = DECL_ASSEMBLER_NAME (decl
);
8618 targetm
.asm_out
.unique_section (decl
, 0);
8619 switch_to_section (get_named_section (decl
, NULL
, 0));
8621 targetm
.asm_out
.globalize_label (asm_out_file
, name
);
8622 fputs ("\t.hidden\t", asm_out_file
);
8623 assemble_name (asm_out_file
, name
);
8624 putc ('\n', asm_out_file
);
8625 ASM_DECLARE_FUNCTION_NAME (asm_out_file
, name
, decl
);
8629 switch_to_section (text_section
);
8630 ASM_OUTPUT_LABEL (asm_out_file
, name
);
8633 DECL_INITIAL (decl
) = make_node (BLOCK
);
8634 current_function_decl
= decl
;
8635 init_function_start (decl
);
8636 first_function_block_is_cold
= false;
8637 /* Make sure unwind info is emitted for the thunk if needed. */
8638 final_start_function (emit_barrier (), asm_out_file
, 1);
8640 /* Pad stack IP move with 4 instructions (two NOPs count
8641 as one instruction). */
8642 if (TARGET_PAD_SHORT_FUNCTION
)
8647 fputs ("\tnop\n", asm_out_file
);
8650 xops
[0] = gen_rtx_REG (Pmode
, regno
);
8651 xops
[1] = gen_rtx_MEM (Pmode
, stack_pointer_rtx
);
8652 output_asm_insn ("mov%z0\t{%1, %0|%0, %1}", xops
);
8653 fputs ("\tret\n", asm_out_file
);
8654 final_end_function ();
8655 init_insn_lengths ();
8656 free_after_compilation (cfun
);
8658 current_function_decl
= NULL
;
8661 if (flag_split_stack
)
8662 file_end_indicate_split_stack ();
8665 /* Emit code for the SET_GOT patterns. */
8668 output_set_got (rtx dest
, rtx label ATTRIBUTE_UNUSED
)
8674 if (TARGET_VXWORKS_RTP
&& flag_pic
)
8676 /* Load (*VXWORKS_GOTT_BASE) into the PIC register. */
8677 xops
[2] = gen_rtx_MEM (Pmode
,
8678 gen_rtx_SYMBOL_REF (Pmode
, VXWORKS_GOTT_BASE
));
8679 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops
);
8681 /* Load (*VXWORKS_GOTT_BASE)[VXWORKS_GOTT_INDEX] into the PIC register.
8682 Use %P and a local symbol in order to print VXWORKS_GOTT_INDEX as
8683 an unadorned address. */
8684 xops
[2] = gen_rtx_SYMBOL_REF (Pmode
, VXWORKS_GOTT_INDEX
);
8685 SYMBOL_REF_FLAGS (xops
[2]) |= SYMBOL_FLAG_LOCAL
;
8686 output_asm_insn ("mov{l}\t{%P2(%0), %0|%0, DWORD PTR %P2[%0]}", xops
);
8690 xops
[1] = gen_rtx_SYMBOL_REF (Pmode
, GOT_SYMBOL_NAME
);
8694 xops
[2] = gen_rtx_LABEL_REF (Pmode
, label
? label
: gen_label_rtx ());
8696 output_asm_insn ("mov%z0\t{%2, %0|%0, %2}", xops
);
8699 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
8700 is what will be referenced by the Mach-O PIC subsystem. */
8702 ASM_OUTPUT_LABEL (asm_out_file
, MACHOPIC_FUNCTION_BASE_NAME
);
8705 targetm
.asm_out
.internal_label (asm_out_file
, "L",
8706 CODE_LABEL_NUMBER (XEXP (xops
[2], 0)));
8711 get_pc_thunk_name (name
, REGNO (dest
));
8712 pic_labels_used
|= 1 << REGNO (dest
);
8714 xops
[2] = gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (name
));
8715 xops
[2] = gen_rtx_MEM (QImode
, xops
[2]);
8716 output_asm_insn ("call\t%X2", xops
);
8717 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
8718 is what will be referenced by the Mach-O PIC subsystem. */
8721 ASM_OUTPUT_LABEL (asm_out_file
, MACHOPIC_FUNCTION_BASE_NAME
);
8723 targetm
.asm_out
.internal_label (asm_out_file
, "L",
8724 CODE_LABEL_NUMBER (label
));
8729 output_asm_insn ("add%z0\t{%1, %0|%0, %1}", xops
);
8734 /* Generate an "push" pattern for input ARG. */
8739 struct machine_function
*m
= cfun
->machine
;
8741 if (m
->fs
.cfa_reg
== stack_pointer_rtx
)
8742 m
->fs
.cfa_offset
+= UNITS_PER_WORD
;
8743 m
->fs
.sp_offset
+= UNITS_PER_WORD
;
8745 if (REG_P (arg
) && GET_MODE (arg
) != word_mode
)
8746 arg
= gen_rtx_REG (word_mode
, REGNO (arg
));
8748 return gen_rtx_SET (VOIDmode
,
8749 gen_rtx_MEM (word_mode
,
8750 gen_rtx_PRE_DEC (Pmode
,
8751 stack_pointer_rtx
)),
8755 /* Generate an "pop" pattern for input ARG. */
8760 if (REG_P (arg
) && GET_MODE (arg
) != word_mode
)
8761 arg
= gen_rtx_REG (word_mode
, REGNO (arg
));
8763 return gen_rtx_SET (VOIDmode
,
8765 gen_rtx_MEM (word_mode
,
8766 gen_rtx_POST_INC (Pmode
,
8767 stack_pointer_rtx
)));
8770 /* Return >= 0 if there is an unused call-clobbered register available
8771 for the entire function. */
8774 ix86_select_alt_pic_regnum (void)
8778 && !ix86_current_function_calls_tls_descriptor
)
8781 /* Can't use the same register for both PIC and DRAP. */
8783 drap
= REGNO (crtl
->drap_reg
);
8786 for (i
= 2; i
>= 0; --i
)
8787 if (i
!= drap
&& !df_regs_ever_live_p (i
))
8791 return INVALID_REGNUM
;
8794 /* Return TRUE if we need to save REGNO. */
8797 ix86_save_reg (unsigned int regno
, bool maybe_eh_return
)
8799 if (pic_offset_table_rtx
8800 && regno
== REAL_PIC_OFFSET_TABLE_REGNUM
8801 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM
)
8803 || crtl
->calls_eh_return
8804 || crtl
->uses_const_pool
))
8805 return ix86_select_alt_pic_regnum () == INVALID_REGNUM
;
8807 if (crtl
->calls_eh_return
&& maybe_eh_return
)
8812 unsigned test
= EH_RETURN_DATA_REGNO (i
);
8813 if (test
== INVALID_REGNUM
)
8820 if (crtl
->drap_reg
&& regno
== REGNO (crtl
->drap_reg
))
8823 return (df_regs_ever_live_p (regno
)
8824 && !call_used_regs
[regno
]
8825 && !fixed_regs
[regno
]
8826 && (regno
!= HARD_FRAME_POINTER_REGNUM
|| !frame_pointer_needed
));
8829 /* Return number of saved general prupose registers. */
8832 ix86_nsaved_regs (void)
8837 for (regno
= 0; regno
< FIRST_PSEUDO_REGISTER
; regno
++)
8838 if (!SSE_REGNO_P (regno
) && ix86_save_reg (regno
, true))
8843 /* Return number of saved SSE registrers. */
8846 ix86_nsaved_sseregs (void)
8851 if (!TARGET_64BIT_MS_ABI
)
8853 for (regno
= 0; regno
< FIRST_PSEUDO_REGISTER
; regno
++)
8854 if (SSE_REGNO_P (regno
) && ix86_save_reg (regno
, true))
8859 /* Given FROM and TO register numbers, say whether this elimination is
8860 allowed. If stack alignment is needed, we can only replace argument
8861 pointer with hard frame pointer, or replace frame pointer with stack
8862 pointer. Otherwise, frame pointer elimination is automatically
8863 handled and all other eliminations are valid. */
8866 ix86_can_eliminate (const int from
, const int to
)
8868 if (stack_realign_fp
)
8869 return ((from
== ARG_POINTER_REGNUM
8870 && to
== HARD_FRAME_POINTER_REGNUM
)
8871 || (from
== FRAME_POINTER_REGNUM
8872 && to
== STACK_POINTER_REGNUM
));
8874 return to
== STACK_POINTER_REGNUM
? !frame_pointer_needed
: true;
8877 /* Return the offset between two registers, one to be eliminated, and the other
8878 its replacement, at the start of a routine. */
8881 ix86_initial_elimination_offset (int from
, int to
)
8883 struct ix86_frame frame
;
8884 ix86_compute_frame_layout (&frame
);
8886 if (from
== ARG_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
8887 return frame
.hard_frame_pointer_offset
;
8888 else if (from
== FRAME_POINTER_REGNUM
8889 && to
== HARD_FRAME_POINTER_REGNUM
)
8890 return frame
.hard_frame_pointer_offset
- frame
.frame_pointer_offset
;
8893 gcc_assert (to
== STACK_POINTER_REGNUM
);
8895 if (from
== ARG_POINTER_REGNUM
)
8896 return frame
.stack_pointer_offset
;
8898 gcc_assert (from
== FRAME_POINTER_REGNUM
);
8899 return frame
.stack_pointer_offset
- frame
.frame_pointer_offset
;
8903 /* In a dynamically-aligned function, we can't know the offset from
8904 stack pointer to frame pointer, so we must ensure that setjmp
8905 eliminates fp against the hard fp (%ebp) rather than trying to
8906 index from %esp up to the top of the frame across a gap that is
8907 of unknown (at compile-time) size. */
8909 ix86_builtin_setjmp_frame_value (void)
8911 return stack_realign_fp
? hard_frame_pointer_rtx
: virtual_stack_vars_rtx
;
8914 /* When using -fsplit-stack, the allocation routines set a field in
8915 the TCB to the bottom of the stack plus this much space, measured
8918 #define SPLIT_STACK_AVAILABLE 256
8920 /* Fill structure ix86_frame about frame of currently computed function. */
8923 ix86_compute_frame_layout (struct ix86_frame
*frame
)
8925 unsigned HOST_WIDE_INT stack_alignment_needed
;
8926 HOST_WIDE_INT offset
;
8927 unsigned HOST_WIDE_INT preferred_alignment
;
8928 HOST_WIDE_INT size
= get_frame_size ();
8929 HOST_WIDE_INT to_allocate
;
8931 frame
->nregs
= ix86_nsaved_regs ();
8932 frame
->nsseregs
= ix86_nsaved_sseregs ();
8934 stack_alignment_needed
= crtl
->stack_alignment_needed
/ BITS_PER_UNIT
;
8935 preferred_alignment
= crtl
->preferred_stack_boundary
/ BITS_PER_UNIT
;
8937 /* 64-bit MS ABI seem to require stack alignment to be always 16 except for
8938 function prologues and leaf. */
8939 if ((TARGET_64BIT_MS_ABI
&& preferred_alignment
< 16)
8940 && (!crtl
->is_leaf
|| cfun
->calls_alloca
!= 0
8941 || ix86_current_function_calls_tls_descriptor
))
8943 preferred_alignment
= 16;
8944 stack_alignment_needed
= 16;
8945 crtl
->preferred_stack_boundary
= 128;
8946 crtl
->stack_alignment_needed
= 128;
8949 gcc_assert (!size
|| stack_alignment_needed
);
8950 gcc_assert (preferred_alignment
>= STACK_BOUNDARY
/ BITS_PER_UNIT
);
8951 gcc_assert (preferred_alignment
<= stack_alignment_needed
);
8953 /* For SEH we have to limit the amount of code movement into the prologue.
8954 At present we do this via a BLOCKAGE, at which point there's very little
8955 scheduling that can be done, which means that there's very little point
8956 in doing anything except PUSHs. */
8958 cfun
->machine
->use_fast_prologue_epilogue
= false;
8960 /* During reload iteration the amount of registers saved can change.
8961 Recompute the value as needed. Do not recompute when amount of registers
8962 didn't change as reload does multiple calls to the function and does not
8963 expect the decision to change within single iteration. */
8964 else if (!optimize_function_for_size_p (cfun
)
8965 && cfun
->machine
->use_fast_prologue_epilogue_nregs
!= frame
->nregs
)
8967 int count
= frame
->nregs
;
8968 struct cgraph_node
*node
= cgraph_get_node (current_function_decl
);
8970 cfun
->machine
->use_fast_prologue_epilogue_nregs
= count
;
8972 /* The fast prologue uses move instead of push to save registers. This
8973 is significantly longer, but also executes faster as modern hardware
8974 can execute the moves in parallel, but can't do that for push/pop.
8976 Be careful about choosing what prologue to emit: When function takes
8977 many instructions to execute we may use slow version as well as in
8978 case function is known to be outside hot spot (this is known with
8979 feedback only). Weight the size of function by number of registers
8980 to save as it is cheap to use one or two push instructions but very
8981 slow to use many of them. */
8983 count
= (count
- 1) * FAST_PROLOGUE_INSN_COUNT
;
8984 if (node
->frequency
< NODE_FREQUENCY_NORMAL
8985 || (flag_branch_probabilities
8986 && node
->frequency
< NODE_FREQUENCY_HOT
))
8987 cfun
->machine
->use_fast_prologue_epilogue
= false;
8989 cfun
->machine
->use_fast_prologue_epilogue
8990 = !expensive_function_p (count
);
8993 frame
->save_regs_using_mov
8994 = (TARGET_PROLOGUE_USING_MOVE
&& cfun
->machine
->use_fast_prologue_epilogue
8995 /* If static stack checking is enabled and done with probes,
8996 the registers need to be saved before allocating the frame. */
8997 && flag_stack_check
!= STATIC_BUILTIN_STACK_CHECK
);
8999 /* Skip return address. */
9000 offset
= UNITS_PER_WORD
;
9002 /* Skip pushed static chain. */
9003 if (ix86_static_chain_on_stack
)
9004 offset
+= UNITS_PER_WORD
;
9006 /* Skip saved base pointer. */
9007 if (frame_pointer_needed
)
9008 offset
+= UNITS_PER_WORD
;
9009 frame
->hfp_save_offset
= offset
;
9011 /* The traditional frame pointer location is at the top of the frame. */
9012 frame
->hard_frame_pointer_offset
= offset
;
9014 /* Register save area */
9015 offset
+= frame
->nregs
* UNITS_PER_WORD
;
9016 frame
->reg_save_offset
= offset
;
9018 /* On SEH target, registers are pushed just before the frame pointer
9021 frame
->hard_frame_pointer_offset
= offset
;
9023 /* Align and set SSE register save area. */
9024 if (frame
->nsseregs
)
9026 /* The only ABI that has saved SSE registers (Win64) also has a
9027 16-byte aligned default stack, and thus we don't need to be
9028 within the re-aligned local stack frame to save them. */
9029 gcc_assert (INCOMING_STACK_BOUNDARY
>= 128);
9030 offset
= (offset
+ 16 - 1) & -16;
9031 offset
+= frame
->nsseregs
* 16;
9033 frame
->sse_reg_save_offset
= offset
;
9035 /* The re-aligned stack starts here. Values before this point are not
9036 directly comparable with values below this point. In order to make
9037 sure that no value happens to be the same before and after, force
9038 the alignment computation below to add a non-zero value. */
9039 if (stack_realign_fp
)
9040 offset
= (offset
+ stack_alignment_needed
) & -stack_alignment_needed
;
9043 frame
->va_arg_size
= ix86_varargs_gpr_size
+ ix86_varargs_fpr_size
;
9044 offset
+= frame
->va_arg_size
;
9046 /* Align start of frame for local function. */
9047 if (stack_realign_fp
9048 || offset
!= frame
->sse_reg_save_offset
9051 || cfun
->calls_alloca
9052 || ix86_current_function_calls_tls_descriptor
)
9053 offset
= (offset
+ stack_alignment_needed
- 1) & -stack_alignment_needed
;
9055 /* Frame pointer points here. */
9056 frame
->frame_pointer_offset
= offset
;
9060 /* Add outgoing arguments area. Can be skipped if we eliminated
9061 all the function calls as dead code.
9062 Skipping is however impossible when function calls alloca. Alloca
9063 expander assumes that last crtl->outgoing_args_size
9064 of stack frame are unused. */
9065 if (ACCUMULATE_OUTGOING_ARGS
9066 && (!crtl
->is_leaf
|| cfun
->calls_alloca
9067 || ix86_current_function_calls_tls_descriptor
))
9069 offset
+= crtl
->outgoing_args_size
;
9070 frame
->outgoing_arguments_size
= crtl
->outgoing_args_size
;
9073 frame
->outgoing_arguments_size
= 0;
9075 /* Align stack boundary. Only needed if we're calling another function
9077 if (!crtl
->is_leaf
|| cfun
->calls_alloca
9078 || ix86_current_function_calls_tls_descriptor
)
9079 offset
= (offset
+ preferred_alignment
- 1) & -preferred_alignment
;
9081 /* We've reached end of stack frame. */
9082 frame
->stack_pointer_offset
= offset
;
9084 /* Size prologue needs to allocate. */
9085 to_allocate
= offset
- frame
->sse_reg_save_offset
;
9087 if ((!to_allocate
&& frame
->nregs
<= 1)
9088 || (TARGET_64BIT
&& to_allocate
>= (HOST_WIDE_INT
) 0x80000000))
9089 frame
->save_regs_using_mov
= false;
9091 if (ix86_using_red_zone ()
9092 && crtl
->sp_is_unchanging
9094 && !ix86_current_function_calls_tls_descriptor
)
9096 frame
->red_zone_size
= to_allocate
;
9097 if (frame
->save_regs_using_mov
)
9098 frame
->red_zone_size
+= frame
->nregs
* UNITS_PER_WORD
;
9099 if (frame
->red_zone_size
> RED_ZONE_SIZE
- RED_ZONE_RESERVE
)
9100 frame
->red_zone_size
= RED_ZONE_SIZE
- RED_ZONE_RESERVE
;
9103 frame
->red_zone_size
= 0;
9104 frame
->stack_pointer_offset
-= frame
->red_zone_size
;
9106 /* The SEH frame pointer location is near the bottom of the frame.
9107 This is enforced by the fact that the difference between the
9108 stack pointer and the frame pointer is limited to 240 bytes in
9109 the unwind data structure. */
9114 /* If we can leave the frame pointer where it is, do so. Also, returns
9115 the establisher frame for __builtin_frame_address (0). */
9116 diff
= frame
->stack_pointer_offset
- frame
->hard_frame_pointer_offset
;
9117 if (diff
<= SEH_MAX_FRAME_SIZE
9118 && (diff
> 240 || (diff
& 15) != 0)
9119 && !crtl
->accesses_prior_frames
)
9121 /* Ideally we'd determine what portion of the local stack frame
9122 (within the constraint of the lowest 240) is most heavily used.
9123 But without that complication, simply bias the frame pointer
9124 by 128 bytes so as to maximize the amount of the local stack
9125 frame that is addressable with 8-bit offsets. */
9126 frame
->hard_frame_pointer_offset
= frame
->stack_pointer_offset
- 128;
9131 /* This is semi-inlined memory_address_length, but simplified
9132 since we know that we're always dealing with reg+offset, and
9133 to avoid having to create and discard all that rtl. */
9136 choose_baseaddr_len (unsigned int regno
, HOST_WIDE_INT offset
)
9142 /* EBP and R13 cannot be encoded without an offset. */
9143 len
= (regno
== BP_REG
|| regno
== R13_REG
);
9145 else if (IN_RANGE (offset
, -128, 127))
9148 /* ESP and R12 must be encoded with a SIB byte. */
9149 if (regno
== SP_REG
|| regno
== R12_REG
)
9155 /* Return an RTX that points to CFA_OFFSET within the stack frame.
9156 The valid base registers are taken from CFUN->MACHINE->FS. */
9159 choose_baseaddr (HOST_WIDE_INT cfa_offset
)
9161 const struct machine_function
*m
= cfun
->machine
;
9162 rtx base_reg
= NULL
;
9163 HOST_WIDE_INT base_offset
= 0;
9165 if (m
->use_fast_prologue_epilogue
)
9167 /* Choose the base register most likely to allow the most scheduling
9168 opportunities. Generally FP is valid throughout the function,
9169 while DRAP must be reloaded within the epilogue. But choose either
9170 over the SP due to increased encoding size. */
9174 base_reg
= hard_frame_pointer_rtx
;
9175 base_offset
= m
->fs
.fp_offset
- cfa_offset
;
9177 else if (m
->fs
.drap_valid
)
9179 base_reg
= crtl
->drap_reg
;
9180 base_offset
= 0 - cfa_offset
;
9182 else if (m
->fs
.sp_valid
)
9184 base_reg
= stack_pointer_rtx
;
9185 base_offset
= m
->fs
.sp_offset
- cfa_offset
;
9190 HOST_WIDE_INT toffset
;
9193 /* Choose the base register with the smallest address encoding.
9194 With a tie, choose FP > DRAP > SP. */
9197 base_reg
= stack_pointer_rtx
;
9198 base_offset
= m
->fs
.sp_offset
- cfa_offset
;
9199 len
= choose_baseaddr_len (STACK_POINTER_REGNUM
, base_offset
);
9201 if (m
->fs
.drap_valid
)
9203 toffset
= 0 - cfa_offset
;
9204 tlen
= choose_baseaddr_len (REGNO (crtl
->drap_reg
), toffset
);
9207 base_reg
= crtl
->drap_reg
;
9208 base_offset
= toffset
;
9214 toffset
= m
->fs
.fp_offset
- cfa_offset
;
9215 tlen
= choose_baseaddr_len (HARD_FRAME_POINTER_REGNUM
, toffset
);
9218 base_reg
= hard_frame_pointer_rtx
;
9219 base_offset
= toffset
;
9224 gcc_assert (base_reg
!= NULL
);
9226 return plus_constant (Pmode
, base_reg
, base_offset
);
9229 /* Emit code to save registers in the prologue. */
9232 ix86_emit_save_regs (void)
9237 for (regno
= FIRST_PSEUDO_REGISTER
- 1; regno
-- > 0; )
9238 if (!SSE_REGNO_P (regno
) && ix86_save_reg (regno
, true))
9240 insn
= emit_insn (gen_push (gen_rtx_REG (word_mode
, regno
)));
9241 RTX_FRAME_RELATED_P (insn
) = 1;
9245 /* Emit a single register save at CFA - CFA_OFFSET. */
9248 ix86_emit_save_reg_using_mov (enum machine_mode mode
, unsigned int regno
,
9249 HOST_WIDE_INT cfa_offset
)
9251 struct machine_function
*m
= cfun
->machine
;
9252 rtx reg
= gen_rtx_REG (mode
, regno
);
9253 rtx mem
, addr
, base
, insn
;
9255 addr
= choose_baseaddr (cfa_offset
);
9256 mem
= gen_frame_mem (mode
, addr
);
9258 /* For SSE saves, we need to indicate the 128-bit alignment. */
9259 set_mem_align (mem
, GET_MODE_ALIGNMENT (mode
));
9261 insn
= emit_move_insn (mem
, reg
);
9262 RTX_FRAME_RELATED_P (insn
) = 1;
9265 if (GET_CODE (base
) == PLUS
)
9266 base
= XEXP (base
, 0);
9267 gcc_checking_assert (REG_P (base
));
9269 /* When saving registers into a re-aligned local stack frame, avoid
9270 any tricky guessing by dwarf2out. */
9271 if (m
->fs
.realigned
)
9273 gcc_checking_assert (stack_realign_drap
);
9275 if (regno
== REGNO (crtl
->drap_reg
))
9277 /* A bit of a hack. We force the DRAP register to be saved in
9278 the re-aligned stack frame, which provides us with a copy
9279 of the CFA that will last past the prologue. Install it. */
9280 gcc_checking_assert (cfun
->machine
->fs
.fp_valid
);
9281 addr
= plus_constant (Pmode
, hard_frame_pointer_rtx
,
9282 cfun
->machine
->fs
.fp_offset
- cfa_offset
);
9283 mem
= gen_rtx_MEM (mode
, addr
);
9284 add_reg_note (insn
, REG_CFA_DEF_CFA
, mem
);
9288 /* The frame pointer is a stable reference within the
9289 aligned frame. Use it. */
9290 gcc_checking_assert (cfun
->machine
->fs
.fp_valid
);
9291 addr
= plus_constant (Pmode
, hard_frame_pointer_rtx
,
9292 cfun
->machine
->fs
.fp_offset
- cfa_offset
);
9293 mem
= gen_rtx_MEM (mode
, addr
);
9294 add_reg_note (insn
, REG_CFA_EXPRESSION
,
9295 gen_rtx_SET (VOIDmode
, mem
, reg
));
9299 /* The memory may not be relative to the current CFA register,
9300 which means that we may need to generate a new pattern for
9301 use by the unwind info. */
9302 else if (base
!= m
->fs
.cfa_reg
)
9304 addr
= plus_constant (Pmode
, m
->fs
.cfa_reg
,
9305 m
->fs
.cfa_offset
- cfa_offset
);
9306 mem
= gen_rtx_MEM (mode
, addr
);
9307 add_reg_note (insn
, REG_CFA_OFFSET
, gen_rtx_SET (VOIDmode
, mem
, reg
));
9311 /* Emit code to save registers using MOV insns.
9312 First register is stored at CFA - CFA_OFFSET. */
9314 ix86_emit_save_regs_using_mov (HOST_WIDE_INT cfa_offset
)
9318 for (regno
= 0; regno
< FIRST_PSEUDO_REGISTER
; regno
++)
9319 if (!SSE_REGNO_P (regno
) && ix86_save_reg (regno
, true))
9321 ix86_emit_save_reg_using_mov (word_mode
, regno
, cfa_offset
);
9322 cfa_offset
-= UNITS_PER_WORD
;
9326 /* Emit code to save SSE registers using MOV insns.
9327 First register is stored at CFA - CFA_OFFSET. */
9329 ix86_emit_save_sse_regs_using_mov (HOST_WIDE_INT cfa_offset
)
9333 for (regno
= 0; regno
< FIRST_PSEUDO_REGISTER
; regno
++)
9334 if (SSE_REGNO_P (regno
) && ix86_save_reg (regno
, true))
9336 ix86_emit_save_reg_using_mov (V4SFmode
, regno
, cfa_offset
);
9341 static GTY(()) rtx queued_cfa_restores
;
9343 /* Add a REG_CFA_RESTORE REG note to INSN or queue them until next stack
9344 manipulation insn. The value is on the stack at CFA - CFA_OFFSET.
9345 Don't add the note if the previously saved value will be left untouched
9346 within stack red-zone till return, as unwinders can find the same value
9347 in the register and on the stack. */
9350 ix86_add_cfa_restore_note (rtx insn
, rtx reg
, HOST_WIDE_INT cfa_offset
)
9352 if (!crtl
->shrink_wrapped
9353 && cfa_offset
<= cfun
->machine
->fs
.red_zone_offset
)
9358 add_reg_note (insn
, REG_CFA_RESTORE
, reg
);
9359 RTX_FRAME_RELATED_P (insn
) = 1;
9363 = alloc_reg_note (REG_CFA_RESTORE
, reg
, queued_cfa_restores
);
9366 /* Add queued REG_CFA_RESTORE notes if any to INSN. */
9369 ix86_add_queued_cfa_restore_notes (rtx insn
)
9372 if (!queued_cfa_restores
)
9374 for (last
= queued_cfa_restores
; XEXP (last
, 1); last
= XEXP (last
, 1))
9376 XEXP (last
, 1) = REG_NOTES (insn
);
9377 REG_NOTES (insn
) = queued_cfa_restores
;
9378 queued_cfa_restores
= NULL_RTX
;
9379 RTX_FRAME_RELATED_P (insn
) = 1;
9382 /* Expand prologue or epilogue stack adjustment.
9383 The pattern exist to put a dependency on all ebp-based memory accesses.
9384 STYLE should be negative if instructions should be marked as frame related,
9385 zero if %r11 register is live and cannot be freely used and positive
9389 pro_epilogue_adjust_stack (rtx dest
, rtx src
, rtx offset
,
9390 int style
, bool set_cfa
)
9392 struct machine_function
*m
= cfun
->machine
;
9394 bool add_frame_related_expr
= false;
9396 if (Pmode
== SImode
)
9397 insn
= gen_pro_epilogue_adjust_stack_si_add (dest
, src
, offset
);
9398 else if (x86_64_immediate_operand (offset
, DImode
))
9399 insn
= gen_pro_epilogue_adjust_stack_di_add (dest
, src
, offset
);
9403 /* r11 is used by indirect sibcall return as well, set before the
9404 epilogue and used after the epilogue. */
9406 tmp
= gen_rtx_REG (DImode
, R11_REG
);
9409 gcc_assert (src
!= hard_frame_pointer_rtx
9410 && dest
!= hard_frame_pointer_rtx
);
9411 tmp
= hard_frame_pointer_rtx
;
9413 insn
= emit_insn (gen_rtx_SET (DImode
, tmp
, offset
));
9415 add_frame_related_expr
= true;
9417 insn
= gen_pro_epilogue_adjust_stack_di_add (dest
, src
, tmp
);
9420 insn
= emit_insn (insn
);
9422 ix86_add_queued_cfa_restore_notes (insn
);
9428 gcc_assert (m
->fs
.cfa_reg
== src
);
9429 m
->fs
.cfa_offset
+= INTVAL (offset
);
9430 m
->fs
.cfa_reg
= dest
;
9432 r
= gen_rtx_PLUS (Pmode
, src
, offset
);
9433 r
= gen_rtx_SET (VOIDmode
, dest
, r
);
9434 add_reg_note (insn
, REG_CFA_ADJUST_CFA
, r
);
9435 RTX_FRAME_RELATED_P (insn
) = 1;
9439 RTX_FRAME_RELATED_P (insn
) = 1;
9440 if (add_frame_related_expr
)
9442 rtx r
= gen_rtx_PLUS (Pmode
, src
, offset
);
9443 r
= gen_rtx_SET (VOIDmode
, dest
, r
);
9444 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, r
);
9448 if (dest
== stack_pointer_rtx
)
9450 HOST_WIDE_INT ooffset
= m
->fs
.sp_offset
;
9451 bool valid
= m
->fs
.sp_valid
;
9453 if (src
== hard_frame_pointer_rtx
)
9455 valid
= m
->fs
.fp_valid
;
9456 ooffset
= m
->fs
.fp_offset
;
9458 else if (src
== crtl
->drap_reg
)
9460 valid
= m
->fs
.drap_valid
;
9465 /* Else there are two possibilities: SP itself, which we set
9466 up as the default above. Or EH_RETURN_STACKADJ_RTX, which is
9467 taken care of this by hand along the eh_return path. */
9468 gcc_checking_assert (src
== stack_pointer_rtx
9469 || offset
== const0_rtx
);
9472 m
->fs
.sp_offset
= ooffset
- INTVAL (offset
);
9473 m
->fs
.sp_valid
= valid
;
9477 /* Find an available register to be used as dynamic realign argument
9478 pointer regsiter. Such a register will be written in prologue and
9479 used in begin of body, so it must not be
9480 1. parameter passing register.
9482 We reuse static-chain register if it is available. Otherwise, we
9483 use DI for i386 and R13 for x86-64. We chose R13 since it has
9486 Return: the regno of chosen register. */
9489 find_drap_reg (void)
9491 tree decl
= cfun
->decl
;
9495 /* Use R13 for nested function or function need static chain.
9496 Since function with tail call may use any caller-saved
9497 registers in epilogue, DRAP must not use caller-saved
9498 register in such case. */
9499 if (DECL_STATIC_CHAIN (decl
) || crtl
->tail_call_emit
)
9506 /* Use DI for nested function or function need static chain.
9507 Since function with tail call may use any caller-saved
9508 registers in epilogue, DRAP must not use caller-saved
9509 register in such case. */
9510 if (DECL_STATIC_CHAIN (decl
) || crtl
->tail_call_emit
)
9513 /* Reuse static chain register if it isn't used for parameter
9515 if (ix86_function_regparm (TREE_TYPE (decl
), decl
) <= 2)
9517 unsigned int ccvt
= ix86_get_callcvt (TREE_TYPE (decl
));
9518 if ((ccvt
& (IX86_CALLCVT_FASTCALL
| IX86_CALLCVT_THISCALL
)) == 0)
9525 /* Return minimum incoming stack alignment. */
9528 ix86_minimum_incoming_stack_boundary (bool sibcall
)
9530 unsigned int incoming_stack_boundary
;
9532 /* Prefer the one specified at command line. */
9533 if (ix86_user_incoming_stack_boundary
)
9534 incoming_stack_boundary
= ix86_user_incoming_stack_boundary
;
9535 /* In 32bit, use MIN_STACK_BOUNDARY for incoming stack boundary
9536 if -mstackrealign is used, it isn't used for sibcall check and
9537 estimated stack alignment is 128bit. */
9540 && ix86_force_align_arg_pointer
9541 && crtl
->stack_alignment_estimated
== 128)
9542 incoming_stack_boundary
= MIN_STACK_BOUNDARY
;
9544 incoming_stack_boundary
= ix86_default_incoming_stack_boundary
;
9546 /* Incoming stack alignment can be changed on individual functions
9547 via force_align_arg_pointer attribute. We use the smallest
9548 incoming stack boundary. */
9549 if (incoming_stack_boundary
> MIN_STACK_BOUNDARY
9550 && lookup_attribute (ix86_force_align_arg_pointer_string
,
9551 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl
))))
9552 incoming_stack_boundary
= MIN_STACK_BOUNDARY
;
9554 /* The incoming stack frame has to be aligned at least at
9555 parm_stack_boundary. */
9556 if (incoming_stack_boundary
< crtl
->parm_stack_boundary
)
9557 incoming_stack_boundary
= crtl
->parm_stack_boundary
;
9559 /* Stack at entrance of main is aligned by runtime. We use the
9560 smallest incoming stack boundary. */
9561 if (incoming_stack_boundary
> MAIN_STACK_BOUNDARY
9562 && DECL_NAME (current_function_decl
)
9563 && MAIN_NAME_P (DECL_NAME (current_function_decl
))
9564 && DECL_FILE_SCOPE_P (current_function_decl
))
9565 incoming_stack_boundary
= MAIN_STACK_BOUNDARY
;
9567 return incoming_stack_boundary
;
9570 /* Update incoming stack boundary and estimated stack alignment. */
9573 ix86_update_stack_boundary (void)
9575 ix86_incoming_stack_boundary
9576 = ix86_minimum_incoming_stack_boundary (false);
9578 /* x86_64 vararg needs 16byte stack alignment for register save
9582 && crtl
->stack_alignment_estimated
< 128)
9583 crtl
->stack_alignment_estimated
= 128;
9586 /* Handle the TARGET_GET_DRAP_RTX hook. Return NULL if no DRAP is
9587 needed or an rtx for DRAP otherwise. */
9590 ix86_get_drap_rtx (void)
9592 if (ix86_force_drap
|| !ACCUMULATE_OUTGOING_ARGS
)
9593 crtl
->need_drap
= true;
9595 if (stack_realign_drap
)
9597 /* Assign DRAP to vDRAP and returns vDRAP */
9598 unsigned int regno
= find_drap_reg ();
9603 arg_ptr
= gen_rtx_REG (Pmode
, regno
);
9604 crtl
->drap_reg
= arg_ptr
;
9607 drap_vreg
= copy_to_reg (arg_ptr
);
9611 insn
= emit_insn_before (seq
, NEXT_INSN (entry_of_function ()));
9614 add_reg_note (insn
, REG_CFA_SET_VDRAP
, drap_vreg
);
9615 RTX_FRAME_RELATED_P (insn
) = 1;
9623 /* Handle the TARGET_INTERNAL_ARG_POINTER hook. */
9626 ix86_internal_arg_pointer (void)
9628 return virtual_incoming_args_rtx
;
9631 struct scratch_reg
{
9636 /* Return a short-lived scratch register for use on function entry.
9637 In 32-bit mode, it is valid only after the registers are saved
9638 in the prologue. This register must be released by means of
9639 release_scratch_register_on_entry once it is dead. */
9642 get_scratch_register_on_entry (struct scratch_reg
*sr
)
9650 /* We always use R11 in 64-bit mode. */
9655 tree decl
= current_function_decl
, fntype
= TREE_TYPE (decl
);
9657 = lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype
)) != NULL_TREE
;
9659 = lookup_attribute ("thiscall", TYPE_ATTRIBUTES (fntype
)) != NULL_TREE
;
9660 bool static_chain_p
= DECL_STATIC_CHAIN (decl
);
9661 int regparm
= ix86_function_regparm (fntype
, decl
);
9663 = crtl
->drap_reg
? REGNO (crtl
->drap_reg
) : INVALID_REGNUM
;
9665 /* 'fastcall' sets regparm to 2, uses ecx/edx for arguments and eax
9666 for the static chain register. */
9667 if ((regparm
< 1 || (fastcall_p
&& !static_chain_p
))
9668 && drap_regno
!= AX_REG
)
9670 /* 'thiscall' sets regparm to 1, uses ecx for arguments and edx
9671 for the static chain register. */
9672 else if (thiscall_p
&& !static_chain_p
&& drap_regno
!= AX_REG
)
9674 else if (regparm
< 2 && !thiscall_p
&& drap_regno
!= DX_REG
)
9676 /* ecx is the static chain register. */
9677 else if (regparm
< 3 && !fastcall_p
&& !thiscall_p
9679 && drap_regno
!= CX_REG
)
9681 else if (ix86_save_reg (BX_REG
, true))
9683 /* esi is the static chain register. */
9684 else if (!(regparm
== 3 && static_chain_p
)
9685 && ix86_save_reg (SI_REG
, true))
9687 else if (ix86_save_reg (DI_REG
, true))
9691 regno
= (drap_regno
== AX_REG
? DX_REG
: AX_REG
);
9696 sr
->reg
= gen_rtx_REG (Pmode
, regno
);
9699 rtx insn
= emit_insn (gen_push (sr
->reg
));
9700 RTX_FRAME_RELATED_P (insn
) = 1;
9704 /* Release a scratch register obtained from the preceding function. */
9707 release_scratch_register_on_entry (struct scratch_reg
*sr
)
9711 struct machine_function
*m
= cfun
->machine
;
9712 rtx x
, insn
= emit_insn (gen_pop (sr
->reg
));
9714 /* The RTX_FRAME_RELATED_P mechanism doesn't know about pop. */
9715 RTX_FRAME_RELATED_P (insn
) = 1;
9716 x
= gen_rtx_PLUS (Pmode
, stack_pointer_rtx
, GEN_INT (UNITS_PER_WORD
));
9717 x
= gen_rtx_SET (VOIDmode
, stack_pointer_rtx
, x
);
9718 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, x
);
9719 m
->fs
.sp_offset
-= UNITS_PER_WORD
;
9723 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
9725 /* Emit code to adjust the stack pointer by SIZE bytes while probing it. */
9728 ix86_adjust_stack_and_probe (const HOST_WIDE_INT size
)
9730 /* We skip the probe for the first interval + a small dope of 4 words and
9731 probe that many bytes past the specified size to maintain a protection
9732 area at the botton of the stack. */
9733 const int dope
= 4 * UNITS_PER_WORD
;
9734 rtx size_rtx
= GEN_INT (size
), last
;
9736 /* See if we have a constant small number of probes to generate. If so,
9737 that's the easy case. The run-time loop is made up of 11 insns in the
9738 generic case while the compile-time loop is made up of 3+2*(n-1) insns
9739 for n # of intervals. */
9740 if (size
<= 5 * PROBE_INTERVAL
)
9742 HOST_WIDE_INT i
, adjust
;
9743 bool first_probe
= true;
9745 /* Adjust SP and probe at PROBE_INTERVAL + N * PROBE_INTERVAL for
9746 values of N from 1 until it exceeds SIZE. If only one probe is
9747 needed, this will not generate any code. Then adjust and probe
9748 to PROBE_INTERVAL + SIZE. */
9749 for (i
= PROBE_INTERVAL
; i
< size
; i
+= PROBE_INTERVAL
)
9753 adjust
= 2 * PROBE_INTERVAL
+ dope
;
9754 first_probe
= false;
9757 adjust
= PROBE_INTERVAL
;
9759 emit_insn (gen_rtx_SET (VOIDmode
, stack_pointer_rtx
,
9760 plus_constant (Pmode
, stack_pointer_rtx
,
9762 emit_stack_probe (stack_pointer_rtx
);
9766 adjust
= size
+ PROBE_INTERVAL
+ dope
;
9768 adjust
= size
+ PROBE_INTERVAL
- i
;
9770 emit_insn (gen_rtx_SET (VOIDmode
, stack_pointer_rtx
,
9771 plus_constant (Pmode
, stack_pointer_rtx
,
9773 emit_stack_probe (stack_pointer_rtx
);
9775 /* Adjust back to account for the additional first interval. */
9776 last
= emit_insn (gen_rtx_SET (VOIDmode
, stack_pointer_rtx
,
9777 plus_constant (Pmode
, stack_pointer_rtx
,
9778 PROBE_INTERVAL
+ dope
)));
9781 /* Otherwise, do the same as above, but in a loop. Note that we must be
9782 extra careful with variables wrapping around because we might be at
9783 the very top (or the very bottom) of the address space and we have
9784 to be able to handle this case properly; in particular, we use an
9785 equality test for the loop condition. */
9788 HOST_WIDE_INT rounded_size
;
9789 struct scratch_reg sr
;
9791 get_scratch_register_on_entry (&sr
);
9794 /* Step 1: round SIZE to the previous multiple of the interval. */
9796 rounded_size
= size
& -PROBE_INTERVAL
;
9799 /* Step 2: compute initial and final value of the loop counter. */
9801 /* SP = SP_0 + PROBE_INTERVAL. */
9802 emit_insn (gen_rtx_SET (VOIDmode
, stack_pointer_rtx
,
9803 plus_constant (Pmode
, stack_pointer_rtx
,
9804 - (PROBE_INTERVAL
+ dope
))));
9806 /* LAST_ADDR = SP_0 + PROBE_INTERVAL + ROUNDED_SIZE. */
9807 emit_move_insn (sr
.reg
, GEN_INT (-rounded_size
));
9808 emit_insn (gen_rtx_SET (VOIDmode
, sr
.reg
,
9809 gen_rtx_PLUS (Pmode
, sr
.reg
,
9810 stack_pointer_rtx
)));
9815 while (SP != LAST_ADDR)
9817 SP = SP + PROBE_INTERVAL
9821 adjusts SP and probes to PROBE_INTERVAL + N * PROBE_INTERVAL for
9822 values of N from 1 until it is equal to ROUNDED_SIZE. */
9824 emit_insn (ix86_gen_adjust_stack_and_probe (sr
.reg
, sr
.reg
, size_rtx
));
9827 /* Step 4: adjust SP and probe at PROBE_INTERVAL + SIZE if we cannot
9828 assert at compile-time that SIZE is equal to ROUNDED_SIZE. */
9830 if (size
!= rounded_size
)
9832 emit_insn (gen_rtx_SET (VOIDmode
, stack_pointer_rtx
,
9833 plus_constant (Pmode
, stack_pointer_rtx
,
9834 rounded_size
- size
)));
9835 emit_stack_probe (stack_pointer_rtx
);
9838 /* Adjust back to account for the additional first interval. */
9839 last
= emit_insn (gen_rtx_SET (VOIDmode
, stack_pointer_rtx
,
9840 plus_constant (Pmode
, stack_pointer_rtx
,
9841 PROBE_INTERVAL
+ dope
)));
9843 release_scratch_register_on_entry (&sr
);
9846 gcc_assert (cfun
->machine
->fs
.cfa_reg
!= stack_pointer_rtx
);
9848 /* Even if the stack pointer isn't the CFA register, we need to correctly
9849 describe the adjustments made to it, in particular differentiate the
9850 frame-related ones from the frame-unrelated ones. */
9853 rtx expr
= gen_rtx_SEQUENCE (VOIDmode
, rtvec_alloc (2));
9854 XVECEXP (expr
, 0, 0)
9855 = gen_rtx_SET (VOIDmode
, stack_pointer_rtx
,
9856 plus_constant (Pmode
, stack_pointer_rtx
, -size
));
9857 XVECEXP (expr
, 0, 1)
9858 = gen_rtx_SET (VOIDmode
, stack_pointer_rtx
,
9859 plus_constant (Pmode
, stack_pointer_rtx
,
9860 PROBE_INTERVAL
+ dope
+ size
));
9861 add_reg_note (last
, REG_FRAME_RELATED_EXPR
, expr
);
9862 RTX_FRAME_RELATED_P (last
) = 1;
9864 cfun
->machine
->fs
.sp_offset
+= size
;
9867 /* Make sure nothing is scheduled before we are done. */
9868 emit_insn (gen_blockage ());
9871 /* Adjust the stack pointer up to REG while probing it. */
9874 output_adjust_stack_and_probe (rtx reg
)
9876 static int labelno
= 0;
9877 char loop_lab
[32], end_lab
[32];
9880 ASM_GENERATE_INTERNAL_LABEL (loop_lab
, "LPSRL", labelno
);
9881 ASM_GENERATE_INTERNAL_LABEL (end_lab
, "LPSRE", labelno
++);
9883 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file
, loop_lab
);
9885 /* Jump to END_LAB if SP == LAST_ADDR. */
9886 xops
[0] = stack_pointer_rtx
;
9888 output_asm_insn ("cmp%z0\t{%1, %0|%0, %1}", xops
);
9889 fputs ("\tje\t", asm_out_file
);
9890 assemble_name_raw (asm_out_file
, end_lab
);
9891 fputc ('\n', asm_out_file
);
9893 /* SP = SP + PROBE_INTERVAL. */
9894 xops
[1] = GEN_INT (PROBE_INTERVAL
);
9895 output_asm_insn ("sub%z0\t{%1, %0|%0, %1}", xops
);
9898 xops
[1] = const0_rtx
;
9899 output_asm_insn ("or%z0\t{%1, (%0)|DWORD PTR [%0], %1}", xops
);
9901 fprintf (asm_out_file
, "\tjmp\t");
9902 assemble_name_raw (asm_out_file
, loop_lab
);
9903 fputc ('\n', asm_out_file
);
9905 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file
, end_lab
);
9910 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
9911 inclusive. These are offsets from the current stack pointer. */
9914 ix86_emit_probe_stack_range (HOST_WIDE_INT first
, HOST_WIDE_INT size
)
9916 /* See if we have a constant small number of probes to generate. If so,
9917 that's the easy case. The run-time loop is made up of 7 insns in the
9918 generic case while the compile-time loop is made up of n insns for n #
9920 if (size
<= 7 * PROBE_INTERVAL
)
9924 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
9925 it exceeds SIZE. If only one probe is needed, this will not
9926 generate any code. Then probe at FIRST + SIZE. */
9927 for (i
= PROBE_INTERVAL
; i
< size
; i
+= PROBE_INTERVAL
)
9928 emit_stack_probe (plus_constant (Pmode
, stack_pointer_rtx
,
9931 emit_stack_probe (plus_constant (Pmode
, stack_pointer_rtx
,
9935 /* Otherwise, do the same as above, but in a loop. Note that we must be
9936 extra careful with variables wrapping around because we might be at
9937 the very top (or the very bottom) of the address space and we have
9938 to be able to handle this case properly; in particular, we use an
9939 equality test for the loop condition. */
9942 HOST_WIDE_INT rounded_size
, last
;
9943 struct scratch_reg sr
;
9945 get_scratch_register_on_entry (&sr
);
9948 /* Step 1: round SIZE to the previous multiple of the interval. */
9950 rounded_size
= size
& -PROBE_INTERVAL
;
9953 /* Step 2: compute initial and final value of the loop counter. */
9955 /* TEST_OFFSET = FIRST. */
9956 emit_move_insn (sr
.reg
, GEN_INT (-first
));
9958 /* LAST_OFFSET = FIRST + ROUNDED_SIZE. */
9959 last
= first
+ rounded_size
;
9964 while (TEST_ADDR != LAST_ADDR)
9966 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
9970 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
9971 until it is equal to ROUNDED_SIZE. */
9973 emit_insn (ix86_gen_probe_stack_range (sr
.reg
, sr
.reg
, GEN_INT (-last
)));
9976 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
9977 that SIZE is equal to ROUNDED_SIZE. */
9979 if (size
!= rounded_size
)
9980 emit_stack_probe (plus_constant (Pmode
,
9981 gen_rtx_PLUS (Pmode
,
9984 rounded_size
- size
));
9986 release_scratch_register_on_entry (&sr
);
9989 /* Make sure nothing is scheduled before we are done. */
9990 emit_insn (gen_blockage ());
9993 /* Probe a range of stack addresses from REG to END, inclusive. These are
9994 offsets from the current stack pointer. */
9997 output_probe_stack_range (rtx reg
, rtx end
)
9999 static int labelno
= 0;
10000 char loop_lab
[32], end_lab
[32];
10003 ASM_GENERATE_INTERNAL_LABEL (loop_lab
, "LPSRL", labelno
);
10004 ASM_GENERATE_INTERNAL_LABEL (end_lab
, "LPSRE", labelno
++);
10006 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file
, loop_lab
);
10008 /* Jump to END_LAB if TEST_ADDR == LAST_ADDR. */
10011 output_asm_insn ("cmp%z0\t{%1, %0|%0, %1}", xops
);
10012 fputs ("\tje\t", asm_out_file
);
10013 assemble_name_raw (asm_out_file
, end_lab
);
10014 fputc ('\n', asm_out_file
);
10016 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
10017 xops
[1] = GEN_INT (PROBE_INTERVAL
);
10018 output_asm_insn ("sub%z0\t{%1, %0|%0, %1}", xops
);
10020 /* Probe at TEST_ADDR. */
10021 xops
[0] = stack_pointer_rtx
;
10023 xops
[2] = const0_rtx
;
10024 output_asm_insn ("or%z0\t{%2, (%0,%1)|DWORD PTR [%0+%1], %2}", xops
);
10026 fprintf (asm_out_file
, "\tjmp\t");
10027 assemble_name_raw (asm_out_file
, loop_lab
);
10028 fputc ('\n', asm_out_file
);
10030 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file
, end_lab
);
10035 /* Finalize stack_realign_needed flag, which will guide prologue/epilogue
10036 to be generated in correct form. */
10038 ix86_finalize_stack_realign_flags (void)
10040 /* Check if stack realign is really needed after reload, and
10041 stores result in cfun */
10042 unsigned int incoming_stack_boundary
10043 = (crtl
->parm_stack_boundary
> ix86_incoming_stack_boundary
10044 ? crtl
->parm_stack_boundary
: ix86_incoming_stack_boundary
);
10045 unsigned int stack_realign
= (incoming_stack_boundary
10047 ? crtl
->max_used_stack_slot_alignment
10048 : crtl
->stack_alignment_needed
));
10050 if (crtl
->stack_realign_finalized
)
10052 /* After stack_realign_needed is finalized, we can't no longer
10054 gcc_assert (crtl
->stack_realign_needed
== stack_realign
);
10058 /* If the only reason for frame_pointer_needed is that we conservatively
10059 assumed stack realignment might be needed, but in the end nothing that
10060 needed the stack alignment had been spilled, clear frame_pointer_needed
10061 and say we don't need stack realignment. */
10063 && !crtl
->need_drap
10064 && frame_pointer_needed
10066 && flag_omit_frame_pointer
10067 && crtl
->sp_is_unchanging
10068 && !ix86_current_function_calls_tls_descriptor
10069 && !crtl
->accesses_prior_frames
10070 && !cfun
->calls_alloca
10071 && !crtl
->calls_eh_return
10072 && !(flag_stack_check
&& STACK_CHECK_MOVING_SP
)
10073 && !ix86_frame_pointer_required ()
10074 && get_frame_size () == 0
10075 && ix86_nsaved_sseregs () == 0
10076 && ix86_varargs_gpr_size
+ ix86_varargs_fpr_size
== 0)
10078 HARD_REG_SET set_up_by_prologue
, prologue_used
;
10081 CLEAR_HARD_REG_SET (prologue_used
);
10082 CLEAR_HARD_REG_SET (set_up_by_prologue
);
10083 add_to_hard_reg_set (&set_up_by_prologue
, Pmode
, STACK_POINTER_REGNUM
);
10084 add_to_hard_reg_set (&set_up_by_prologue
, Pmode
, ARG_POINTER_REGNUM
);
10085 add_to_hard_reg_set (&set_up_by_prologue
, Pmode
,
10086 HARD_FRAME_POINTER_REGNUM
);
10090 FOR_BB_INSNS (bb
, insn
)
10091 if (NONDEBUG_INSN_P (insn
)
10092 && requires_stack_frame_p (insn
, prologue_used
,
10093 set_up_by_prologue
))
10095 crtl
->stack_realign_needed
= stack_realign
;
10096 crtl
->stack_realign_finalized
= true;
10101 frame_pointer_needed
= false;
10102 stack_realign
= false;
10103 crtl
->max_used_stack_slot_alignment
= incoming_stack_boundary
;
10104 crtl
->stack_alignment_needed
= incoming_stack_boundary
;
10105 crtl
->stack_alignment_estimated
= incoming_stack_boundary
;
10106 if (crtl
->preferred_stack_boundary
> incoming_stack_boundary
)
10107 crtl
->preferred_stack_boundary
= incoming_stack_boundary
;
10108 df_finish_pass (true);
10109 df_scan_alloc (NULL
);
10111 df_compute_regs_ever_live (true);
10115 crtl
->stack_realign_needed
= stack_realign
;
10116 crtl
->stack_realign_finalized
= true;
10119 /* Expand the prologue into a bunch of separate insns. */
10122 ix86_expand_prologue (void)
10124 struct machine_function
*m
= cfun
->machine
;
10127 struct ix86_frame frame
;
10128 HOST_WIDE_INT allocate
;
10129 bool int_registers_saved
;
10130 bool sse_registers_saved
;
10132 ix86_finalize_stack_realign_flags ();
10134 /* DRAP should not coexist with stack_realign_fp */
10135 gcc_assert (!(crtl
->drap_reg
&& stack_realign_fp
));
10137 memset (&m
->fs
, 0, sizeof (m
->fs
));
10139 /* Initialize CFA state for before the prologue. */
10140 m
->fs
.cfa_reg
= stack_pointer_rtx
;
10141 m
->fs
.cfa_offset
= INCOMING_FRAME_SP_OFFSET
;
10143 /* Track SP offset to the CFA. We continue tracking this after we've
10144 swapped the CFA register away from SP. In the case of re-alignment
10145 this is fudged; we're interested to offsets within the local frame. */
10146 m
->fs
.sp_offset
= INCOMING_FRAME_SP_OFFSET
;
10147 m
->fs
.sp_valid
= true;
10149 ix86_compute_frame_layout (&frame
);
10151 if (!TARGET_64BIT
&& ix86_function_ms_hook_prologue (current_function_decl
))
10153 /* We should have already generated an error for any use of
10154 ms_hook on a nested function. */
10155 gcc_checking_assert (!ix86_static_chain_on_stack
);
10157 /* Check if profiling is active and we shall use profiling before
10158 prologue variant. If so sorry. */
10159 if (crtl
->profile
&& flag_fentry
!= 0)
10160 sorry ("ms_hook_prologue attribute isn%'t compatible "
10161 "with -mfentry for 32-bit");
10163 /* In ix86_asm_output_function_label we emitted:
10164 8b ff movl.s %edi,%edi
10166 8b ec movl.s %esp,%ebp
10168 This matches the hookable function prologue in Win32 API
10169 functions in Microsoft Windows XP Service Pack 2 and newer.
10170 Wine uses this to enable Windows apps to hook the Win32 API
10171 functions provided by Wine.
10173 What that means is that we've already set up the frame pointer. */
10175 if (frame_pointer_needed
10176 && !(crtl
->drap_reg
&& crtl
->stack_realign_needed
))
10180 /* We've decided to use the frame pointer already set up.
10181 Describe this to the unwinder by pretending that both
10182 push and mov insns happen right here.
10184 Putting the unwind info here at the end of the ms_hook
10185 is done so that we can make absolutely certain we get
10186 the required byte sequence at the start of the function,
10187 rather than relying on an assembler that can produce
10188 the exact encoding required.
10190 However it does mean (in the unpatched case) that we have
10191 a 1 insn window where the asynchronous unwind info is
10192 incorrect. However, if we placed the unwind info at
10193 its correct location we would have incorrect unwind info
10194 in the patched case. Which is probably all moot since
10195 I don't expect Wine generates dwarf2 unwind info for the
10196 system libraries that use this feature. */
10198 insn
= emit_insn (gen_blockage ());
10200 push
= gen_push (hard_frame_pointer_rtx
);
10201 mov
= gen_rtx_SET (VOIDmode
, hard_frame_pointer_rtx
,
10202 stack_pointer_rtx
);
10203 RTX_FRAME_RELATED_P (push
) = 1;
10204 RTX_FRAME_RELATED_P (mov
) = 1;
10206 RTX_FRAME_RELATED_P (insn
) = 1;
10207 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
10208 gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, push
, mov
)));
10210 /* Note that gen_push incremented m->fs.cfa_offset, even
10211 though we didn't emit the push insn here. */
10212 m
->fs
.cfa_reg
= hard_frame_pointer_rtx
;
10213 m
->fs
.fp_offset
= m
->fs
.cfa_offset
;
10214 m
->fs
.fp_valid
= true;
10218 /* The frame pointer is not needed so pop %ebp again.
10219 This leaves us with a pristine state. */
10220 emit_insn (gen_pop (hard_frame_pointer_rtx
));
10224 /* The first insn of a function that accepts its static chain on the
10225 stack is to push the register that would be filled in by a direct
10226 call. This insn will be skipped by the trampoline. */
10227 else if (ix86_static_chain_on_stack
)
10229 insn
= emit_insn (gen_push (ix86_static_chain (cfun
->decl
, false)));
10230 emit_insn (gen_blockage ());
10232 /* We don't want to interpret this push insn as a register save,
10233 only as a stack adjustment. The real copy of the register as
10234 a save will be done later, if needed. */
10235 t
= plus_constant (Pmode
, stack_pointer_rtx
, -UNITS_PER_WORD
);
10236 t
= gen_rtx_SET (VOIDmode
, stack_pointer_rtx
, t
);
10237 add_reg_note (insn
, REG_CFA_ADJUST_CFA
, t
);
10238 RTX_FRAME_RELATED_P (insn
) = 1;
10241 /* Emit prologue code to adjust stack alignment and setup DRAP, in case
10242 of DRAP is needed and stack realignment is really needed after reload */
10243 if (stack_realign_drap
)
10245 int align_bytes
= crtl
->stack_alignment_needed
/ BITS_PER_UNIT
;
10247 /* Only need to push parameter pointer reg if it is caller saved. */
10248 if (!call_used_regs
[REGNO (crtl
->drap_reg
)])
10250 /* Push arg pointer reg */
10251 insn
= emit_insn (gen_push (crtl
->drap_reg
));
10252 RTX_FRAME_RELATED_P (insn
) = 1;
10255 /* Grab the argument pointer. */
10256 t
= plus_constant (Pmode
, stack_pointer_rtx
, m
->fs
.sp_offset
);
10257 insn
= emit_insn (gen_rtx_SET (VOIDmode
, crtl
->drap_reg
, t
));
10258 RTX_FRAME_RELATED_P (insn
) = 1;
10259 m
->fs
.cfa_reg
= crtl
->drap_reg
;
10260 m
->fs
.cfa_offset
= 0;
10262 /* Align the stack. */
10263 insn
= emit_insn (ix86_gen_andsp (stack_pointer_rtx
,
10265 GEN_INT (-align_bytes
)));
10266 RTX_FRAME_RELATED_P (insn
) = 1;
10268 /* Replicate the return address on the stack so that return
10269 address can be reached via (argp - 1) slot. This is needed
10270 to implement macro RETURN_ADDR_RTX and intrinsic function
10271 expand_builtin_return_addr etc. */
10272 t
= plus_constant (Pmode
, crtl
->drap_reg
, -UNITS_PER_WORD
);
10273 t
= gen_frame_mem (word_mode
, t
);
10274 insn
= emit_insn (gen_push (t
));
10275 RTX_FRAME_RELATED_P (insn
) = 1;
10277 /* For the purposes of frame and register save area addressing,
10278 we've started over with a new frame. */
10279 m
->fs
.sp_offset
= INCOMING_FRAME_SP_OFFSET
;
10280 m
->fs
.realigned
= true;
10283 int_registers_saved
= (frame
.nregs
== 0);
10284 sse_registers_saved
= (frame
.nsseregs
== 0);
10286 if (frame_pointer_needed
&& !m
->fs
.fp_valid
)
10288 /* Note: AT&T enter does NOT have reversed args. Enter is probably
10289 slower on all targets. Also sdb doesn't like it. */
10290 insn
= emit_insn (gen_push (hard_frame_pointer_rtx
));
10291 RTX_FRAME_RELATED_P (insn
) = 1;
10293 /* Push registers now, before setting the frame pointer
10295 if (!int_registers_saved
10297 && !frame
.save_regs_using_mov
)
10299 ix86_emit_save_regs ();
10300 int_registers_saved
= true;
10301 gcc_assert (m
->fs
.sp_offset
== frame
.reg_save_offset
);
10304 if (m
->fs
.sp_offset
== frame
.hard_frame_pointer_offset
)
10306 insn
= emit_move_insn (hard_frame_pointer_rtx
, stack_pointer_rtx
);
10307 RTX_FRAME_RELATED_P (insn
) = 1;
10309 if (m
->fs
.cfa_reg
== stack_pointer_rtx
)
10310 m
->fs
.cfa_reg
= hard_frame_pointer_rtx
;
10311 m
->fs
.fp_offset
= m
->fs
.sp_offset
;
10312 m
->fs
.fp_valid
= true;
10316 if (!int_registers_saved
)
10318 /* If saving registers via PUSH, do so now. */
10319 if (!frame
.save_regs_using_mov
)
10321 ix86_emit_save_regs ();
10322 int_registers_saved
= true;
10323 gcc_assert (m
->fs
.sp_offset
== frame
.reg_save_offset
);
10326 /* When using red zone we may start register saving before allocating
10327 the stack frame saving one cycle of the prologue. However, avoid
10328 doing this if we have to probe the stack; at least on x86_64 the
10329 stack probe can turn into a call that clobbers a red zone location. */
10330 else if (ix86_using_red_zone ()
10331 && (! TARGET_STACK_PROBE
10332 || frame
.stack_pointer_offset
< CHECK_STACK_LIMIT
))
10334 ix86_emit_save_regs_using_mov (frame
.reg_save_offset
);
10335 int_registers_saved
= true;
10339 if (stack_realign_fp
)
10341 int align_bytes
= crtl
->stack_alignment_needed
/ BITS_PER_UNIT
;
10342 gcc_assert (align_bytes
> MIN_STACK_BOUNDARY
/ BITS_PER_UNIT
);
10344 /* The computation of the size of the re-aligned stack frame means
10345 that we must allocate the size of the register save area before
10346 performing the actual alignment. Otherwise we cannot guarantee
10347 that there's enough storage above the realignment point. */
10348 if (m
->fs
.sp_offset
!= frame
.sse_reg_save_offset
)
10349 pro_epilogue_adjust_stack (stack_pointer_rtx
, stack_pointer_rtx
,
10350 GEN_INT (m
->fs
.sp_offset
10351 - frame
.sse_reg_save_offset
),
10354 /* Align the stack. */
10355 insn
= emit_insn (ix86_gen_andsp (stack_pointer_rtx
,
10357 GEN_INT (-align_bytes
)));
10359 /* For the purposes of register save area addressing, the stack
10360 pointer is no longer valid. As for the value of sp_offset,
10361 see ix86_compute_frame_layout, which we need to match in order
10362 to pass verification of stack_pointer_offset at the end. */
10363 m
->fs
.sp_offset
= (m
->fs
.sp_offset
+ align_bytes
) & -align_bytes
;
10364 m
->fs
.sp_valid
= false;
10367 allocate
= frame
.stack_pointer_offset
- m
->fs
.sp_offset
;
10369 if (flag_stack_usage_info
)
10371 /* We start to count from ARG_POINTER. */
10372 HOST_WIDE_INT stack_size
= frame
.stack_pointer_offset
;
10374 /* If it was realigned, take into account the fake frame. */
10375 if (stack_realign_drap
)
10377 if (ix86_static_chain_on_stack
)
10378 stack_size
+= UNITS_PER_WORD
;
10380 if (!call_used_regs
[REGNO (crtl
->drap_reg
)])
10381 stack_size
+= UNITS_PER_WORD
;
10383 /* This over-estimates by 1 minimal-stack-alignment-unit but
10384 mitigates that by counting in the new return address slot. */
10385 current_function_dynamic_stack_size
10386 += crtl
->stack_alignment_needed
/ BITS_PER_UNIT
;
10389 current_function_static_stack_size
= stack_size
;
10392 /* On SEH target with very large frame size, allocate an area to save
10393 SSE registers (as the very large allocation won't be described). */
10395 && frame
.stack_pointer_offset
> SEH_MAX_FRAME_SIZE
10396 && !sse_registers_saved
)
10398 HOST_WIDE_INT sse_size
=
10399 frame
.sse_reg_save_offset
- frame
.reg_save_offset
;
10401 gcc_assert (int_registers_saved
);
10403 /* No need to do stack checking as the area will be immediately
10405 pro_epilogue_adjust_stack (stack_pointer_rtx
, stack_pointer_rtx
,
10406 GEN_INT (-sse_size
), -1,
10407 m
->fs
.cfa_reg
== stack_pointer_rtx
);
10408 allocate
-= sse_size
;
10409 ix86_emit_save_sse_regs_using_mov (frame
.sse_reg_save_offset
);
10410 sse_registers_saved
= true;
10413 /* The stack has already been decremented by the instruction calling us
10414 so probe if the size is non-negative to preserve the protection area. */
10415 if (allocate
>= 0 && flag_stack_check
== STATIC_BUILTIN_STACK_CHECK
)
10417 /* We expect the registers to be saved when probes are used. */
10418 gcc_assert (int_registers_saved
);
10420 if (STACK_CHECK_MOVING_SP
)
10422 ix86_adjust_stack_and_probe (allocate
);
10427 HOST_WIDE_INT size
= allocate
;
10429 if (TARGET_64BIT
&& size
>= (HOST_WIDE_INT
) 0x80000000)
10430 size
= 0x80000000 - STACK_CHECK_PROTECT
- 1;
10432 if (TARGET_STACK_PROBE
)
10433 ix86_emit_probe_stack_range (0, size
+ STACK_CHECK_PROTECT
);
10435 ix86_emit_probe_stack_range (STACK_CHECK_PROTECT
, size
);
10441 else if (!ix86_target_stack_probe ()
10442 || frame
.stack_pointer_offset
< CHECK_STACK_LIMIT
)
10444 pro_epilogue_adjust_stack (stack_pointer_rtx
, stack_pointer_rtx
,
10445 GEN_INT (-allocate
), -1,
10446 m
->fs
.cfa_reg
== stack_pointer_rtx
);
10450 rtx eax
= gen_rtx_REG (Pmode
, AX_REG
);
10452 rtx (*adjust_stack_insn
)(rtx
, rtx
, rtx
);
10453 const bool sp_is_cfa_reg
= (m
->fs
.cfa_reg
== stack_pointer_rtx
);
10454 bool eax_live
= false;
10455 bool r10_live
= false;
10458 r10_live
= (DECL_STATIC_CHAIN (current_function_decl
) != 0);
10459 if (!TARGET_64BIT_MS_ABI
)
10460 eax_live
= ix86_eax_live_at_start_p ();
10462 /* Note that SEH directives need to continue tracking the stack
10463 pointer even after the frame pointer has been set up. */
10466 insn
= emit_insn (gen_push (eax
));
10467 allocate
-= UNITS_PER_WORD
;
10468 if (sp_is_cfa_reg
|| TARGET_SEH
)
10471 m
->fs
.cfa_offset
+= UNITS_PER_WORD
;
10472 RTX_FRAME_RELATED_P (insn
) = 1;
10478 r10
= gen_rtx_REG (Pmode
, R10_REG
);
10479 insn
= emit_insn (gen_push (r10
));
10480 allocate
-= UNITS_PER_WORD
;
10481 if (sp_is_cfa_reg
|| TARGET_SEH
)
10484 m
->fs
.cfa_offset
+= UNITS_PER_WORD
;
10485 RTX_FRAME_RELATED_P (insn
) = 1;
10489 emit_move_insn (eax
, GEN_INT (allocate
));
10490 emit_insn (ix86_gen_allocate_stack_worker (eax
, eax
));
10492 /* Use the fact that AX still contains ALLOCATE. */
10493 adjust_stack_insn
= (Pmode
== DImode
10494 ? gen_pro_epilogue_adjust_stack_di_sub
10495 : gen_pro_epilogue_adjust_stack_si_sub
);
10497 insn
= emit_insn (adjust_stack_insn (stack_pointer_rtx
,
10498 stack_pointer_rtx
, eax
));
10500 if (sp_is_cfa_reg
|| TARGET_SEH
)
10503 m
->fs
.cfa_offset
+= allocate
;
10504 RTX_FRAME_RELATED_P (insn
) = 1;
10505 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
10506 gen_rtx_SET (VOIDmode
, stack_pointer_rtx
,
10507 plus_constant (Pmode
, stack_pointer_rtx
,
10510 m
->fs
.sp_offset
+= allocate
;
10512 if (r10_live
&& eax_live
)
10514 t
= choose_baseaddr (m
->fs
.sp_offset
- allocate
);
10515 emit_move_insn (gen_rtx_REG (word_mode
, R10_REG
),
10516 gen_frame_mem (word_mode
, t
));
10517 t
= choose_baseaddr (m
->fs
.sp_offset
- allocate
- UNITS_PER_WORD
);
10518 emit_move_insn (gen_rtx_REG (word_mode
, AX_REG
),
10519 gen_frame_mem (word_mode
, t
));
10521 else if (eax_live
|| r10_live
)
10523 t
= choose_baseaddr (m
->fs
.sp_offset
- allocate
);
10524 emit_move_insn (gen_rtx_REG (word_mode
,
10525 (eax_live
? AX_REG
: R10_REG
)),
10526 gen_frame_mem (word_mode
, t
));
10529 gcc_assert (m
->fs
.sp_offset
== frame
.stack_pointer_offset
);
10531 /* If we havn't already set up the frame pointer, do so now. */
10532 if (frame_pointer_needed
&& !m
->fs
.fp_valid
)
10534 insn
= ix86_gen_add3 (hard_frame_pointer_rtx
, stack_pointer_rtx
,
10535 GEN_INT (frame
.stack_pointer_offset
10536 - frame
.hard_frame_pointer_offset
));
10537 insn
= emit_insn (insn
);
10538 RTX_FRAME_RELATED_P (insn
) = 1;
10539 add_reg_note (insn
, REG_CFA_ADJUST_CFA
, NULL
);
10541 if (m
->fs
.cfa_reg
== stack_pointer_rtx
)
10542 m
->fs
.cfa_reg
= hard_frame_pointer_rtx
;
10543 m
->fs
.fp_offset
= frame
.hard_frame_pointer_offset
;
10544 m
->fs
.fp_valid
= true;
10547 if (!int_registers_saved
)
10548 ix86_emit_save_regs_using_mov (frame
.reg_save_offset
);
10549 if (!sse_registers_saved
)
10550 ix86_emit_save_sse_regs_using_mov (frame
.sse_reg_save_offset
);
10552 pic_reg_used
= false;
10553 if (pic_offset_table_rtx
10554 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM
)
10557 unsigned int alt_pic_reg_used
= ix86_select_alt_pic_regnum ();
10559 if (alt_pic_reg_used
!= INVALID_REGNUM
)
10560 SET_REGNO (pic_offset_table_rtx
, alt_pic_reg_used
);
10562 pic_reg_used
= true;
10569 if (ix86_cmodel
== CM_LARGE_PIC
)
10571 rtx label
, tmp_reg
;
10573 gcc_assert (Pmode
== DImode
);
10574 label
= gen_label_rtx ();
10575 emit_label (label
);
10576 LABEL_PRESERVE_P (label
) = 1;
10577 tmp_reg
= gen_rtx_REG (Pmode
, R11_REG
);
10578 gcc_assert (REGNO (pic_offset_table_rtx
) != REGNO (tmp_reg
));
10579 insn
= emit_insn (gen_set_rip_rex64 (pic_offset_table_rtx
,
10581 insn
= emit_insn (gen_set_got_offset_rex64 (tmp_reg
, label
));
10582 insn
= emit_insn (ix86_gen_add3 (pic_offset_table_rtx
,
10583 pic_offset_table_rtx
, tmp_reg
));
10586 insn
= emit_insn (gen_set_got_rex64 (pic_offset_table_rtx
));
10590 insn
= emit_insn (gen_set_got (pic_offset_table_rtx
));
10591 RTX_FRAME_RELATED_P (insn
) = 1;
10592 add_reg_note (insn
, REG_CFA_FLUSH_QUEUE
, NULL_RTX
);
10596 /* In the pic_reg_used case, make sure that the got load isn't deleted
10597 when mcount needs it. Blockage to avoid call movement across mcount
10598 call is emitted in generic code after the NOTE_INSN_PROLOGUE_END
10600 if (crtl
->profile
&& !flag_fentry
&& pic_reg_used
)
10601 emit_insn (gen_prologue_use (pic_offset_table_rtx
));
10603 if (crtl
->drap_reg
&& !crtl
->stack_realign_needed
)
10605 /* vDRAP is setup but after reload it turns out stack realign
10606 isn't necessary, here we will emit prologue to setup DRAP
10607 without stack realign adjustment */
10608 t
= choose_baseaddr (0);
10609 emit_insn (gen_rtx_SET (VOIDmode
, crtl
->drap_reg
, t
));
10612 /* Prevent instructions from being scheduled into register save push
10613 sequence when access to the redzone area is done through frame pointer.
10614 The offset between the frame pointer and the stack pointer is calculated
10615 relative to the value of the stack pointer at the end of the function
10616 prologue, and moving instructions that access redzone area via frame
10617 pointer inside push sequence violates this assumption. */
10618 if (frame_pointer_needed
&& frame
.red_zone_size
)
10619 emit_insn (gen_memory_blockage ());
10621 /* Emit cld instruction if stringops are used in the function. */
10622 if (TARGET_CLD
&& ix86_current_function_needs_cld
)
10623 emit_insn (gen_cld ());
10625 /* SEH requires that the prologue end within 256 bytes of the start of
10626 the function. Prevent instruction schedules that would extend that.
10627 Further, prevent alloca modifications to the stack pointer from being
10628 combined with prologue modifications. */
10630 emit_insn (gen_prologue_use (stack_pointer_rtx
));
10633 /* Emit code to restore REG using a POP insn. */
10636 ix86_emit_restore_reg_using_pop (rtx reg
)
10638 struct machine_function
*m
= cfun
->machine
;
10639 rtx insn
= emit_insn (gen_pop (reg
));
10641 ix86_add_cfa_restore_note (insn
, reg
, m
->fs
.sp_offset
);
10642 m
->fs
.sp_offset
-= UNITS_PER_WORD
;
10644 if (m
->fs
.cfa_reg
== crtl
->drap_reg
10645 && REGNO (reg
) == REGNO (crtl
->drap_reg
))
10647 /* Previously we'd represented the CFA as an expression
10648 like *(%ebp - 8). We've just popped that value from
10649 the stack, which means we need to reset the CFA to
10650 the drap register. This will remain until we restore
10651 the stack pointer. */
10652 add_reg_note (insn
, REG_CFA_DEF_CFA
, reg
);
10653 RTX_FRAME_RELATED_P (insn
) = 1;
10655 /* This means that the DRAP register is valid for addressing too. */
10656 m
->fs
.drap_valid
= true;
10660 if (m
->fs
.cfa_reg
== stack_pointer_rtx
)
10662 rtx x
= plus_constant (Pmode
, stack_pointer_rtx
, UNITS_PER_WORD
);
10663 x
= gen_rtx_SET (VOIDmode
, stack_pointer_rtx
, x
);
10664 add_reg_note (insn
, REG_CFA_ADJUST_CFA
, x
);
10665 RTX_FRAME_RELATED_P (insn
) = 1;
10667 m
->fs
.cfa_offset
-= UNITS_PER_WORD
;
10670 /* When the frame pointer is the CFA, and we pop it, we are
10671 swapping back to the stack pointer as the CFA. This happens
10672 for stack frames that don't allocate other data, so we assume
10673 the stack pointer is now pointing at the return address, i.e.
10674 the function entry state, which makes the offset be 1 word. */
10675 if (reg
== hard_frame_pointer_rtx
)
10677 m
->fs
.fp_valid
= false;
10678 if (m
->fs
.cfa_reg
== hard_frame_pointer_rtx
)
10680 m
->fs
.cfa_reg
= stack_pointer_rtx
;
10681 m
->fs
.cfa_offset
-= UNITS_PER_WORD
;
10683 add_reg_note (insn
, REG_CFA_DEF_CFA
,
10684 gen_rtx_PLUS (Pmode
, stack_pointer_rtx
,
10685 GEN_INT (m
->fs
.cfa_offset
)));
10686 RTX_FRAME_RELATED_P (insn
) = 1;
10691 /* Emit code to restore saved registers using POP insns. */
10694 ix86_emit_restore_regs_using_pop (void)
10696 unsigned int regno
;
10698 for (regno
= 0; regno
< FIRST_PSEUDO_REGISTER
; regno
++)
10699 if (!SSE_REGNO_P (regno
) && ix86_save_reg (regno
, false))
10700 ix86_emit_restore_reg_using_pop (gen_rtx_REG (word_mode
, regno
));
10703 /* Emit code and notes for the LEAVE instruction. */
10706 ix86_emit_leave (void)
10708 struct machine_function
*m
= cfun
->machine
;
10709 rtx insn
= emit_insn (ix86_gen_leave ());
10711 ix86_add_queued_cfa_restore_notes (insn
);
10713 gcc_assert (m
->fs
.fp_valid
);
10714 m
->fs
.sp_valid
= true;
10715 m
->fs
.sp_offset
= m
->fs
.fp_offset
- UNITS_PER_WORD
;
10716 m
->fs
.fp_valid
= false;
10718 if (m
->fs
.cfa_reg
== hard_frame_pointer_rtx
)
10720 m
->fs
.cfa_reg
= stack_pointer_rtx
;
10721 m
->fs
.cfa_offset
= m
->fs
.sp_offset
;
10723 add_reg_note (insn
, REG_CFA_DEF_CFA
,
10724 plus_constant (Pmode
, stack_pointer_rtx
,
10726 RTX_FRAME_RELATED_P (insn
) = 1;
10728 ix86_add_cfa_restore_note (insn
, hard_frame_pointer_rtx
,
10732 /* Emit code to restore saved registers using MOV insns.
10733 First register is restored from CFA - CFA_OFFSET. */
10735 ix86_emit_restore_regs_using_mov (HOST_WIDE_INT cfa_offset
,
10736 bool maybe_eh_return
)
10738 struct machine_function
*m
= cfun
->machine
;
10739 unsigned int regno
;
10741 for (regno
= 0; regno
< FIRST_PSEUDO_REGISTER
; regno
++)
10742 if (!SSE_REGNO_P (regno
) && ix86_save_reg (regno
, maybe_eh_return
))
10744 rtx reg
= gen_rtx_REG (word_mode
, regno
);
10747 mem
= choose_baseaddr (cfa_offset
);
10748 mem
= gen_frame_mem (word_mode
, mem
);
10749 insn
= emit_move_insn (reg
, mem
);
10751 if (m
->fs
.cfa_reg
== crtl
->drap_reg
&& regno
== REGNO (crtl
->drap_reg
))
10753 /* Previously we'd represented the CFA as an expression
10754 like *(%ebp - 8). We've just popped that value from
10755 the stack, which means we need to reset the CFA to
10756 the drap register. This will remain until we restore
10757 the stack pointer. */
10758 add_reg_note (insn
, REG_CFA_DEF_CFA
, reg
);
10759 RTX_FRAME_RELATED_P (insn
) = 1;
10761 /* This means that the DRAP register is valid for addressing. */
10762 m
->fs
.drap_valid
= true;
10765 ix86_add_cfa_restore_note (NULL_RTX
, reg
, cfa_offset
);
10767 cfa_offset
-= UNITS_PER_WORD
;
10771 /* Emit code to restore saved registers using MOV insns.
10772 First register is restored from CFA - CFA_OFFSET. */
10774 ix86_emit_restore_sse_regs_using_mov (HOST_WIDE_INT cfa_offset
,
10775 bool maybe_eh_return
)
10777 unsigned int regno
;
10779 for (regno
= 0; regno
< FIRST_PSEUDO_REGISTER
; regno
++)
10780 if (SSE_REGNO_P (regno
) && ix86_save_reg (regno
, maybe_eh_return
))
10782 rtx reg
= gen_rtx_REG (V4SFmode
, regno
);
10785 mem
= choose_baseaddr (cfa_offset
);
10786 mem
= gen_rtx_MEM (V4SFmode
, mem
);
10787 set_mem_align (mem
, 128);
10788 emit_move_insn (reg
, mem
);
10790 ix86_add_cfa_restore_note (NULL_RTX
, reg
, cfa_offset
);
10796 /* Restore function stack, frame, and registers. */
10799 ix86_expand_epilogue (int style
)
10801 struct machine_function
*m
= cfun
->machine
;
10802 struct machine_frame_state frame_state_save
= m
->fs
;
10803 struct ix86_frame frame
;
10804 bool restore_regs_via_mov
;
10807 ix86_finalize_stack_realign_flags ();
10808 ix86_compute_frame_layout (&frame
);
10810 m
->fs
.sp_valid
= (!frame_pointer_needed
10811 || (crtl
->sp_is_unchanging
10812 && !stack_realign_fp
));
10813 gcc_assert (!m
->fs
.sp_valid
10814 || m
->fs
.sp_offset
== frame
.stack_pointer_offset
);
10816 /* The FP must be valid if the frame pointer is present. */
10817 gcc_assert (frame_pointer_needed
== m
->fs
.fp_valid
);
10818 gcc_assert (!m
->fs
.fp_valid
10819 || m
->fs
.fp_offset
== frame
.hard_frame_pointer_offset
);
10821 /* We must have *some* valid pointer to the stack frame. */
10822 gcc_assert (m
->fs
.sp_valid
|| m
->fs
.fp_valid
);
10824 /* The DRAP is never valid at this point. */
10825 gcc_assert (!m
->fs
.drap_valid
);
10827 /* See the comment about red zone and frame
10828 pointer usage in ix86_expand_prologue. */
10829 if (frame_pointer_needed
&& frame
.red_zone_size
)
10830 emit_insn (gen_memory_blockage ());
10832 using_drap
= crtl
->drap_reg
&& crtl
->stack_realign_needed
;
10833 gcc_assert (!using_drap
|| m
->fs
.cfa_reg
== crtl
->drap_reg
);
10835 /* Determine the CFA offset of the end of the red-zone. */
10836 m
->fs
.red_zone_offset
= 0;
10837 if (ix86_using_red_zone () && crtl
->args
.pops_args
< 65536)
10839 /* The red-zone begins below the return address. */
10840 m
->fs
.red_zone_offset
= RED_ZONE_SIZE
+ UNITS_PER_WORD
;
10842 /* When the register save area is in the aligned portion of
10843 the stack, determine the maximum runtime displacement that
10844 matches up with the aligned frame. */
10845 if (stack_realign_drap
)
10846 m
->fs
.red_zone_offset
-= (crtl
->stack_alignment_needed
/ BITS_PER_UNIT
10850 /* Special care must be taken for the normal return case of a function
10851 using eh_return: the eax and edx registers are marked as saved, but
10852 not restored along this path. Adjust the save location to match. */
10853 if (crtl
->calls_eh_return
&& style
!= 2)
10854 frame
.reg_save_offset
-= 2 * UNITS_PER_WORD
;
10856 /* EH_RETURN requires the use of moves to function properly. */
10857 if (crtl
->calls_eh_return
)
10858 restore_regs_via_mov
= true;
10859 /* SEH requires the use of pops to identify the epilogue. */
10860 else if (TARGET_SEH
)
10861 restore_regs_via_mov
= false;
10862 /* If we're only restoring one register and sp is not valid then
10863 using a move instruction to restore the register since it's
10864 less work than reloading sp and popping the register. */
10865 else if (!m
->fs
.sp_valid
&& frame
.nregs
<= 1)
10866 restore_regs_via_mov
= true;
10867 else if (TARGET_EPILOGUE_USING_MOVE
10868 && cfun
->machine
->use_fast_prologue_epilogue
10869 && (frame
.nregs
> 1
10870 || m
->fs
.sp_offset
!= frame
.reg_save_offset
))
10871 restore_regs_via_mov
= true;
10872 else if (frame_pointer_needed
10874 && m
->fs
.sp_offset
!= frame
.reg_save_offset
)
10875 restore_regs_via_mov
= true;
10876 else if (frame_pointer_needed
10877 && TARGET_USE_LEAVE
10878 && cfun
->machine
->use_fast_prologue_epilogue
10879 && frame
.nregs
== 1)
10880 restore_regs_via_mov
= true;
10882 restore_regs_via_mov
= false;
10884 if (restore_regs_via_mov
|| frame
.nsseregs
)
10886 /* Ensure that the entire register save area is addressable via
10887 the stack pointer, if we will restore via sp. */
10889 && m
->fs
.sp_offset
> 0x7fffffff
10890 && !(m
->fs
.fp_valid
|| m
->fs
.drap_valid
)
10891 && (frame
.nsseregs
+ frame
.nregs
) != 0)
10893 pro_epilogue_adjust_stack (stack_pointer_rtx
, stack_pointer_rtx
,
10894 GEN_INT (m
->fs
.sp_offset
10895 - frame
.sse_reg_save_offset
),
10897 m
->fs
.cfa_reg
== stack_pointer_rtx
);
10901 /* If there are any SSE registers to restore, then we have to do it
10902 via moves, since there's obviously no pop for SSE regs. */
10903 if (frame
.nsseregs
)
10904 ix86_emit_restore_sse_regs_using_mov (frame
.sse_reg_save_offset
,
10907 if (restore_regs_via_mov
)
10912 ix86_emit_restore_regs_using_mov (frame
.reg_save_offset
, style
== 2);
10914 /* eh_return epilogues need %ecx added to the stack pointer. */
10917 rtx insn
, sa
= EH_RETURN_STACKADJ_RTX
;
10919 /* Stack align doesn't work with eh_return. */
10920 gcc_assert (!stack_realign_drap
);
10921 /* Neither does regparm nested functions. */
10922 gcc_assert (!ix86_static_chain_on_stack
);
10924 if (frame_pointer_needed
)
10926 t
= gen_rtx_PLUS (Pmode
, hard_frame_pointer_rtx
, sa
);
10927 t
= plus_constant (Pmode
, t
, m
->fs
.fp_offset
- UNITS_PER_WORD
);
10928 emit_insn (gen_rtx_SET (VOIDmode
, sa
, t
));
10930 t
= gen_frame_mem (Pmode
, hard_frame_pointer_rtx
);
10931 insn
= emit_move_insn (hard_frame_pointer_rtx
, t
);
10933 /* Note that we use SA as a temporary CFA, as the return
10934 address is at the proper place relative to it. We
10935 pretend this happens at the FP restore insn because
10936 prior to this insn the FP would be stored at the wrong
10937 offset relative to SA, and after this insn we have no
10938 other reasonable register to use for the CFA. We don't
10939 bother resetting the CFA to the SP for the duration of
10940 the return insn. */
10941 add_reg_note (insn
, REG_CFA_DEF_CFA
,
10942 plus_constant (Pmode
, sa
, UNITS_PER_WORD
));
10943 ix86_add_queued_cfa_restore_notes (insn
);
10944 add_reg_note (insn
, REG_CFA_RESTORE
, hard_frame_pointer_rtx
);
10945 RTX_FRAME_RELATED_P (insn
) = 1;
10947 m
->fs
.cfa_reg
= sa
;
10948 m
->fs
.cfa_offset
= UNITS_PER_WORD
;
10949 m
->fs
.fp_valid
= false;
10951 pro_epilogue_adjust_stack (stack_pointer_rtx
, sa
,
10952 const0_rtx
, style
, false);
10956 t
= gen_rtx_PLUS (Pmode
, stack_pointer_rtx
, sa
);
10957 t
= plus_constant (Pmode
, t
, m
->fs
.sp_offset
- UNITS_PER_WORD
);
10958 insn
= emit_insn (gen_rtx_SET (VOIDmode
, stack_pointer_rtx
, t
));
10959 ix86_add_queued_cfa_restore_notes (insn
);
10961 gcc_assert (m
->fs
.cfa_reg
== stack_pointer_rtx
);
10962 if (m
->fs
.cfa_offset
!= UNITS_PER_WORD
)
10964 m
->fs
.cfa_offset
= UNITS_PER_WORD
;
10965 add_reg_note (insn
, REG_CFA_DEF_CFA
,
10966 plus_constant (Pmode
, stack_pointer_rtx
,
10968 RTX_FRAME_RELATED_P (insn
) = 1;
10971 m
->fs
.sp_offset
= UNITS_PER_WORD
;
10972 m
->fs
.sp_valid
= true;
10977 /* SEH requires that the function end with (1) a stack adjustment
10978 if necessary, (2) a sequence of pops, and (3) a return or
10979 jump instruction. Prevent insns from the function body from
10980 being scheduled into this sequence. */
10983 /* Prevent a catch region from being adjacent to the standard
10984 epilogue sequence. Unfortuantely crtl->uses_eh_lsda nor
10985 several other flags that would be interesting to test are
10987 if (flag_non_call_exceptions
)
10988 emit_insn (gen_nops (const1_rtx
));
10990 emit_insn (gen_blockage ());
10993 /* First step is to deallocate the stack frame so that we can
10994 pop the registers. Also do it on SEH target for very large
10995 frame as the emitted instructions aren't allowed by the ABI in
10997 if (!m
->fs
.sp_valid
10999 && (m
->fs
.sp_offset
- frame
.reg_save_offset
11000 >= SEH_MAX_FRAME_SIZE
)))
11002 pro_epilogue_adjust_stack (stack_pointer_rtx
, hard_frame_pointer_rtx
,
11003 GEN_INT (m
->fs
.fp_offset
11004 - frame
.reg_save_offset
),
11007 else if (m
->fs
.sp_offset
!= frame
.reg_save_offset
)
11009 pro_epilogue_adjust_stack (stack_pointer_rtx
, stack_pointer_rtx
,
11010 GEN_INT (m
->fs
.sp_offset
11011 - frame
.reg_save_offset
),
11013 m
->fs
.cfa_reg
== stack_pointer_rtx
);
11016 ix86_emit_restore_regs_using_pop ();
11019 /* If we used a stack pointer and haven't already got rid of it,
11021 if (m
->fs
.fp_valid
)
11023 /* If the stack pointer is valid and pointing at the frame
11024 pointer store address, then we only need a pop. */
11025 if (m
->fs
.sp_valid
&& m
->fs
.sp_offset
== frame
.hfp_save_offset
)
11026 ix86_emit_restore_reg_using_pop (hard_frame_pointer_rtx
);
11027 /* Leave results in shorter dependency chains on CPUs that are
11028 able to grok it fast. */
11029 else if (TARGET_USE_LEAVE
11030 || optimize_function_for_size_p (cfun
)
11031 || !cfun
->machine
->use_fast_prologue_epilogue
)
11032 ix86_emit_leave ();
11035 pro_epilogue_adjust_stack (stack_pointer_rtx
,
11036 hard_frame_pointer_rtx
,
11037 const0_rtx
, style
, !using_drap
);
11038 ix86_emit_restore_reg_using_pop (hard_frame_pointer_rtx
);
11044 int param_ptr_offset
= UNITS_PER_WORD
;
11047 gcc_assert (stack_realign_drap
);
11049 if (ix86_static_chain_on_stack
)
11050 param_ptr_offset
+= UNITS_PER_WORD
;
11051 if (!call_used_regs
[REGNO (crtl
->drap_reg
)])
11052 param_ptr_offset
+= UNITS_PER_WORD
;
11054 insn
= emit_insn (gen_rtx_SET
11055 (VOIDmode
, stack_pointer_rtx
,
11056 gen_rtx_PLUS (Pmode
,
11058 GEN_INT (-param_ptr_offset
))));
11059 m
->fs
.cfa_reg
= stack_pointer_rtx
;
11060 m
->fs
.cfa_offset
= param_ptr_offset
;
11061 m
->fs
.sp_offset
= param_ptr_offset
;
11062 m
->fs
.realigned
= false;
11064 add_reg_note (insn
, REG_CFA_DEF_CFA
,
11065 gen_rtx_PLUS (Pmode
, stack_pointer_rtx
,
11066 GEN_INT (param_ptr_offset
)));
11067 RTX_FRAME_RELATED_P (insn
) = 1;
11069 if (!call_used_regs
[REGNO (crtl
->drap_reg
)])
11070 ix86_emit_restore_reg_using_pop (crtl
->drap_reg
);
11073 /* At this point the stack pointer must be valid, and we must have
11074 restored all of the registers. We may not have deallocated the
11075 entire stack frame. We've delayed this until now because it may
11076 be possible to merge the local stack deallocation with the
11077 deallocation forced by ix86_static_chain_on_stack. */
11078 gcc_assert (m
->fs
.sp_valid
);
11079 gcc_assert (!m
->fs
.fp_valid
);
11080 gcc_assert (!m
->fs
.realigned
);
11081 if (m
->fs
.sp_offset
!= UNITS_PER_WORD
)
11083 pro_epilogue_adjust_stack (stack_pointer_rtx
, stack_pointer_rtx
,
11084 GEN_INT (m
->fs
.sp_offset
- UNITS_PER_WORD
),
11088 ix86_add_queued_cfa_restore_notes (get_last_insn ());
11090 /* Sibcall epilogues don't want a return instruction. */
11093 m
->fs
= frame_state_save
;
11097 if (crtl
->args
.pops_args
&& crtl
->args
.size
)
11099 rtx popc
= GEN_INT (crtl
->args
.pops_args
);
11101 /* i386 can only pop 64K bytes. If asked to pop more, pop return
11102 address, do explicit add, and jump indirectly to the caller. */
11104 if (crtl
->args
.pops_args
>= 65536)
11106 rtx ecx
= gen_rtx_REG (SImode
, CX_REG
);
11109 /* There is no "pascal" calling convention in any 64bit ABI. */
11110 gcc_assert (!TARGET_64BIT
);
11112 insn
= emit_insn (gen_pop (ecx
));
11113 m
->fs
.cfa_offset
-= UNITS_PER_WORD
;
11114 m
->fs
.sp_offset
-= UNITS_PER_WORD
;
11116 add_reg_note (insn
, REG_CFA_ADJUST_CFA
,
11117 copy_rtx (XVECEXP (PATTERN (insn
), 0, 1)));
11118 add_reg_note (insn
, REG_CFA_REGISTER
,
11119 gen_rtx_SET (VOIDmode
, ecx
, pc_rtx
));
11120 RTX_FRAME_RELATED_P (insn
) = 1;
11122 pro_epilogue_adjust_stack (stack_pointer_rtx
, stack_pointer_rtx
,
11124 emit_jump_insn (gen_simple_return_indirect_internal (ecx
));
11127 emit_jump_insn (gen_simple_return_pop_internal (popc
));
11130 emit_jump_insn (gen_simple_return_internal ());
11132 /* Restore the state back to the state from the prologue,
11133 so that it's correct for the next epilogue. */
11134 m
->fs
= frame_state_save
;
11137 /* Reset from the function's potential modifications. */
11140 ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED
,
11141 HOST_WIDE_INT size ATTRIBUTE_UNUSED
)
11143 if (pic_offset_table_rtx
)
11144 SET_REGNO (pic_offset_table_rtx
, REAL_PIC_OFFSET_TABLE_REGNUM
);
11146 /* Mach-O doesn't support labels at the end of objects, so if
11147 it looks like we might want one, insert a NOP. */
11149 rtx insn
= get_last_insn ();
11150 rtx deleted_debug_label
= NULL_RTX
;
11153 && NOTE_KIND (insn
) != NOTE_INSN_DELETED_LABEL
)
11155 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
11156 notes only, instead set their CODE_LABEL_NUMBER to -1,
11157 otherwise there would be code generation differences
11158 in between -g and -g0. */
11159 if (NOTE_P (insn
) && NOTE_KIND (insn
) == NOTE_INSN_DELETED_DEBUG_LABEL
)
11160 deleted_debug_label
= insn
;
11161 insn
= PREV_INSN (insn
);
11166 && NOTE_KIND (insn
) == NOTE_INSN_DELETED_LABEL
)))
11167 fputs ("\tnop\n", file
);
11168 else if (deleted_debug_label
)
11169 for (insn
= deleted_debug_label
; insn
; insn
= NEXT_INSN (insn
))
11170 if (NOTE_KIND (insn
) == NOTE_INSN_DELETED_DEBUG_LABEL
)
11171 CODE_LABEL_NUMBER (insn
) = -1;
11177 /* Return a scratch register to use in the split stack prologue. The
11178 split stack prologue is used for -fsplit-stack. It is the first
11179 instructions in the function, even before the regular prologue.
11180 The scratch register can be any caller-saved register which is not
11181 used for parameters or for the static chain. */
11183 static unsigned int
11184 split_stack_prologue_scratch_regno (void)
11190 bool is_fastcall
, is_thiscall
;
11193 is_fastcall
= (lookup_attribute ("fastcall",
11194 TYPE_ATTRIBUTES (TREE_TYPE (cfun
->decl
)))
11196 is_thiscall
= (lookup_attribute ("thiscall",
11197 TYPE_ATTRIBUTES (TREE_TYPE (cfun
->decl
)))
11199 regparm
= ix86_function_regparm (TREE_TYPE (cfun
->decl
), cfun
->decl
);
11203 if (DECL_STATIC_CHAIN (cfun
->decl
))
11205 sorry ("-fsplit-stack does not support fastcall with "
11206 "nested function");
11207 return INVALID_REGNUM
;
11211 else if (is_thiscall
)
11213 if (!DECL_STATIC_CHAIN (cfun
->decl
))
11217 else if (regparm
< 3)
11219 if (!DECL_STATIC_CHAIN (cfun
->decl
))
11225 sorry ("-fsplit-stack does not support 2 register "
11226 " parameters for a nested function");
11227 return INVALID_REGNUM
;
11234 /* FIXME: We could make this work by pushing a register
11235 around the addition and comparison. */
11236 sorry ("-fsplit-stack does not support 3 register parameters");
11237 return INVALID_REGNUM
;
11242 /* A SYMBOL_REF for the function which allocates new stackspace for
11245 static GTY(()) rtx split_stack_fn
;
11247 /* A SYMBOL_REF for the more stack function when using the large
11250 static GTY(()) rtx split_stack_fn_large
;
11252 /* Handle -fsplit-stack. These are the first instructions in the
11253 function, even before the regular prologue. */
11256 ix86_expand_split_stack_prologue (void)
11258 struct ix86_frame frame
;
11259 HOST_WIDE_INT allocate
;
11260 unsigned HOST_WIDE_INT args_size
;
11261 rtx label
, limit
, current
, jump_insn
, allocate_rtx
, call_insn
, call_fusage
;
11262 rtx scratch_reg
= NULL_RTX
;
11263 rtx varargs_label
= NULL_RTX
;
11266 gcc_assert (flag_split_stack
&& reload_completed
);
11268 ix86_finalize_stack_realign_flags ();
11269 ix86_compute_frame_layout (&frame
);
11270 allocate
= frame
.stack_pointer_offset
- INCOMING_FRAME_SP_OFFSET
;
11272 /* This is the label we will branch to if we have enough stack
11273 space. We expect the basic block reordering pass to reverse this
11274 branch if optimizing, so that we branch in the unlikely case. */
11275 label
= gen_label_rtx ();
11277 /* We need to compare the stack pointer minus the frame size with
11278 the stack boundary in the TCB. The stack boundary always gives
11279 us SPLIT_STACK_AVAILABLE bytes, so if we need less than that we
11280 can compare directly. Otherwise we need to do an addition. */
11282 limit
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, const0_rtx
),
11283 UNSPEC_STACK_CHECK
);
11284 limit
= gen_rtx_CONST (Pmode
, limit
);
11285 limit
= gen_rtx_MEM (Pmode
, limit
);
11286 if (allocate
< SPLIT_STACK_AVAILABLE
)
11287 current
= stack_pointer_rtx
;
11290 unsigned int scratch_regno
;
11293 /* We need a scratch register to hold the stack pointer minus
11294 the required frame size. Since this is the very start of the
11295 function, the scratch register can be any caller-saved
11296 register which is not used for parameters. */
11297 offset
= GEN_INT (- allocate
);
11298 scratch_regno
= split_stack_prologue_scratch_regno ();
11299 if (scratch_regno
== INVALID_REGNUM
)
11301 scratch_reg
= gen_rtx_REG (Pmode
, scratch_regno
);
11302 if (!TARGET_64BIT
|| x86_64_immediate_operand (offset
, Pmode
))
11304 /* We don't use ix86_gen_add3 in this case because it will
11305 want to split to lea, but when not optimizing the insn
11306 will not be split after this point. */
11307 emit_insn (gen_rtx_SET (VOIDmode
, scratch_reg
,
11308 gen_rtx_PLUS (Pmode
, stack_pointer_rtx
,
11313 emit_move_insn (scratch_reg
, offset
);
11314 emit_insn (ix86_gen_add3 (scratch_reg
, scratch_reg
,
11315 stack_pointer_rtx
));
11317 current
= scratch_reg
;
11320 ix86_expand_branch (GEU
, current
, limit
, label
);
11321 jump_insn
= get_last_insn ();
11322 JUMP_LABEL (jump_insn
) = label
;
11324 /* Mark the jump as very likely to be taken. */
11325 add_reg_note (jump_insn
, REG_BR_PROB
,
11326 GEN_INT (REG_BR_PROB_BASE
- REG_BR_PROB_BASE
/ 100));
11328 if (split_stack_fn
== NULL_RTX
)
11329 split_stack_fn
= gen_rtx_SYMBOL_REF (Pmode
, "__morestack");
11330 fn
= split_stack_fn
;
11332 /* Get more stack space. We pass in the desired stack space and the
11333 size of the arguments to copy to the new stack. In 32-bit mode
11334 we push the parameters; __morestack will return on a new stack
11335 anyhow. In 64-bit mode we pass the parameters in r10 and
11337 allocate_rtx
= GEN_INT (allocate
);
11338 args_size
= crtl
->args
.size
>= 0 ? crtl
->args
.size
: 0;
11339 call_fusage
= NULL_RTX
;
11344 reg10
= gen_rtx_REG (Pmode
, R10_REG
);
11345 reg11
= gen_rtx_REG (Pmode
, R11_REG
);
11347 /* If this function uses a static chain, it will be in %r10.
11348 Preserve it across the call to __morestack. */
11349 if (DECL_STATIC_CHAIN (cfun
->decl
))
11353 rax
= gen_rtx_REG (word_mode
, AX_REG
);
11354 emit_move_insn (rax
, gen_rtx_REG (word_mode
, R10_REG
));
11355 use_reg (&call_fusage
, rax
);
11358 if (ix86_cmodel
== CM_LARGE
|| ix86_cmodel
== CM_LARGE_PIC
)
11360 HOST_WIDE_INT argval
;
11362 gcc_assert (Pmode
== DImode
);
11363 /* When using the large model we need to load the address
11364 into a register, and we've run out of registers. So we
11365 switch to a different calling convention, and we call a
11366 different function: __morestack_large. We pass the
11367 argument size in the upper 32 bits of r10 and pass the
11368 frame size in the lower 32 bits. */
11369 gcc_assert ((allocate
& (HOST_WIDE_INT
) 0xffffffff) == allocate
);
11370 gcc_assert ((args_size
& 0xffffffff) == args_size
);
11372 if (split_stack_fn_large
== NULL_RTX
)
11373 split_stack_fn_large
=
11374 gen_rtx_SYMBOL_REF (Pmode
, "__morestack_large_model");
11376 if (ix86_cmodel
== CM_LARGE_PIC
)
11380 label
= gen_label_rtx ();
11381 emit_label (label
);
11382 LABEL_PRESERVE_P (label
) = 1;
11383 emit_insn (gen_set_rip_rex64 (reg10
, label
));
11384 emit_insn (gen_set_got_offset_rex64 (reg11
, label
));
11385 emit_insn (ix86_gen_add3 (reg10
, reg10
, reg11
));
11386 x
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, split_stack_fn_large
),
11388 x
= gen_rtx_CONST (Pmode
, x
);
11389 emit_move_insn (reg11
, x
);
11390 x
= gen_rtx_PLUS (Pmode
, reg10
, reg11
);
11391 x
= gen_const_mem (Pmode
, x
);
11392 emit_move_insn (reg11
, x
);
11395 emit_move_insn (reg11
, split_stack_fn_large
);
11399 argval
= ((args_size
<< 16) << 16) + allocate
;
11400 emit_move_insn (reg10
, GEN_INT (argval
));
11404 emit_move_insn (reg10
, allocate_rtx
);
11405 emit_move_insn (reg11
, GEN_INT (args_size
));
11406 use_reg (&call_fusage
, reg11
);
11409 use_reg (&call_fusage
, reg10
);
11413 emit_insn (gen_push (GEN_INT (args_size
)));
11414 emit_insn (gen_push (allocate_rtx
));
11416 call_insn
= ix86_expand_call (NULL_RTX
, gen_rtx_MEM (QImode
, fn
),
11417 GEN_INT (UNITS_PER_WORD
), constm1_rtx
,
11419 add_function_usage_to (call_insn
, call_fusage
);
11421 /* In order to make call/return prediction work right, we now need
11422 to execute a return instruction. See
11423 libgcc/config/i386/morestack.S for the details on how this works.
11425 For flow purposes gcc must not see this as a return
11426 instruction--we need control flow to continue at the subsequent
11427 label. Therefore, we use an unspec. */
11428 gcc_assert (crtl
->args
.pops_args
< 65536);
11429 emit_insn (gen_split_stack_return (GEN_INT (crtl
->args
.pops_args
)));
11431 /* If we are in 64-bit mode and this function uses a static chain,
11432 we saved %r10 in %rax before calling _morestack. */
11433 if (TARGET_64BIT
&& DECL_STATIC_CHAIN (cfun
->decl
))
11434 emit_move_insn (gen_rtx_REG (word_mode
, R10_REG
),
11435 gen_rtx_REG (word_mode
, AX_REG
));
11437 /* If this function calls va_start, we need to store a pointer to
11438 the arguments on the old stack, because they may not have been
11439 all copied to the new stack. At this point the old stack can be
11440 found at the frame pointer value used by __morestack, because
11441 __morestack has set that up before calling back to us. Here we
11442 store that pointer in a scratch register, and in
11443 ix86_expand_prologue we store the scratch register in a stack
11445 if (cfun
->machine
->split_stack_varargs_pointer
!= NULL_RTX
)
11447 unsigned int scratch_regno
;
11451 scratch_regno
= split_stack_prologue_scratch_regno ();
11452 scratch_reg
= gen_rtx_REG (Pmode
, scratch_regno
);
11453 frame_reg
= gen_rtx_REG (Pmode
, BP_REG
);
11457 return address within this function
11458 return address of caller of this function
11460 So we add three words to get to the stack arguments.
11464 return address within this function
11465 first argument to __morestack
11466 second argument to __morestack
11467 return address of caller of this function
11469 So we add five words to get to the stack arguments.
11471 words
= TARGET_64BIT
? 3 : 5;
11472 emit_insn (gen_rtx_SET (VOIDmode
, scratch_reg
,
11473 gen_rtx_PLUS (Pmode
, frame_reg
,
11474 GEN_INT (words
* UNITS_PER_WORD
))));
11476 varargs_label
= gen_label_rtx ();
11477 emit_jump_insn (gen_jump (varargs_label
));
11478 JUMP_LABEL (get_last_insn ()) = varargs_label
;
11483 emit_label (label
);
11484 LABEL_NUSES (label
) = 1;
11486 /* If this function calls va_start, we now have to set the scratch
11487 register for the case where we do not call __morestack. In this
11488 case we need to set it based on the stack pointer. */
11489 if (cfun
->machine
->split_stack_varargs_pointer
!= NULL_RTX
)
11491 emit_insn (gen_rtx_SET (VOIDmode
, scratch_reg
,
11492 gen_rtx_PLUS (Pmode
, stack_pointer_rtx
,
11493 GEN_INT (UNITS_PER_WORD
))));
11495 emit_label (varargs_label
);
11496 LABEL_NUSES (varargs_label
) = 1;
11500 /* We may have to tell the dataflow pass that the split stack prologue
11501 is initializing a scratch register. */
11504 ix86_live_on_entry (bitmap regs
)
11506 if (cfun
->machine
->split_stack_varargs_pointer
!= NULL_RTX
)
11508 gcc_assert (flag_split_stack
);
11509 bitmap_set_bit (regs
, split_stack_prologue_scratch_regno ());
11513 /* Determine if op is suitable SUBREG RTX for address. */
11516 ix86_address_subreg_operand (rtx op
)
11518 enum machine_mode mode
;
11523 mode
= GET_MODE (op
);
11525 if (GET_MODE_CLASS (mode
) != MODE_INT
)
11528 /* Don't allow SUBREGs that span more than a word. It can lead to spill
11529 failures when the register is one word out of a two word structure. */
11530 if (GET_MODE_SIZE (mode
) > UNITS_PER_WORD
)
11533 /* Allow only SUBREGs of non-eliminable hard registers. */
11534 return register_no_elim_operand (op
, mode
);
11537 /* Extract the parts of an RTL expression that is a valid memory address
11538 for an instruction. Return 0 if the structure of the address is
11539 grossly off. Return -1 if the address contains ASHIFT, so it is not
11540 strictly valid, but still used for computing length of lea instruction. */
11543 ix86_decompose_address (rtx addr
, struct ix86_address
*out
)
11545 rtx base
= NULL_RTX
, index
= NULL_RTX
, disp
= NULL_RTX
;
11546 rtx base_reg
, index_reg
;
11547 HOST_WIDE_INT scale
= 1;
11548 rtx scale_rtx
= NULL_RTX
;
11551 enum ix86_address_seg seg
= SEG_DEFAULT
;
11553 /* Allow zero-extended SImode addresses,
11554 they will be emitted with addr32 prefix. */
11555 if (TARGET_64BIT
&& GET_MODE (addr
) == DImode
)
11557 if (GET_CODE (addr
) == ZERO_EXTEND
11558 && GET_MODE (XEXP (addr
, 0)) == SImode
)
11560 addr
= XEXP (addr
, 0);
11561 if (CONST_INT_P (addr
))
11564 else if (GET_CODE (addr
) == AND
11565 && const_32bit_mask (XEXP (addr
, 1), DImode
))
11567 addr
= simplify_gen_subreg (SImode
, XEXP (addr
, 0), DImode
, 0);
11568 if (addr
== NULL_RTX
)
11571 if (CONST_INT_P (addr
))
11576 /* Allow SImode subregs of DImode addresses,
11577 they will be emitted with addr32 prefix. */
11578 if (TARGET_64BIT
&& GET_MODE (addr
) == SImode
)
11580 if (GET_CODE (addr
) == SUBREG
11581 && GET_MODE (SUBREG_REG (addr
)) == DImode
)
11583 addr
= SUBREG_REG (addr
);
11584 if (CONST_INT_P (addr
))
11591 else if (GET_CODE (addr
) == SUBREG
)
11593 if (ix86_address_subreg_operand (SUBREG_REG (addr
)))
11598 else if (GET_CODE (addr
) == PLUS
)
11600 rtx addends
[4], op
;
11608 addends
[n
++] = XEXP (op
, 1);
11611 while (GET_CODE (op
) == PLUS
);
11616 for (i
= n
; i
>= 0; --i
)
11619 switch (GET_CODE (op
))
11624 index
= XEXP (op
, 0);
11625 scale_rtx
= XEXP (op
, 1);
11631 index
= XEXP (op
, 0);
11632 tmp
= XEXP (op
, 1);
11633 if (!CONST_INT_P (tmp
))
11635 scale
= INTVAL (tmp
);
11636 if ((unsigned HOST_WIDE_INT
) scale
> 3)
11638 scale
= 1 << scale
;
11643 if (GET_CODE (op
) != UNSPEC
)
11648 if (XINT (op
, 1) == UNSPEC_TP
11649 && TARGET_TLS_DIRECT_SEG_REFS
11650 && seg
== SEG_DEFAULT
)
11651 seg
= TARGET_64BIT
? SEG_FS
: SEG_GS
;
11657 if (!ix86_address_subreg_operand (SUBREG_REG (op
)))
11684 else if (GET_CODE (addr
) == MULT
)
11686 index
= XEXP (addr
, 0); /* index*scale */
11687 scale_rtx
= XEXP (addr
, 1);
11689 else if (GET_CODE (addr
) == ASHIFT
)
11691 /* We're called for lea too, which implements ashift on occasion. */
11692 index
= XEXP (addr
, 0);
11693 tmp
= XEXP (addr
, 1);
11694 if (!CONST_INT_P (tmp
))
11696 scale
= INTVAL (tmp
);
11697 if ((unsigned HOST_WIDE_INT
) scale
> 3)
11699 scale
= 1 << scale
;
11702 else if (CONST_INT_P (addr
))
11704 if (!x86_64_immediate_operand (addr
, VOIDmode
))
11707 /* Constant addresses are sign extended to 64bit, we have to
11708 prevent addresses from 0x80000000 to 0xffffffff in x32 mode. */
11710 && val_signbit_known_set_p (SImode
, INTVAL (addr
)))
11716 disp
= addr
; /* displacement */
11722 else if (GET_CODE (index
) == SUBREG
11723 && ix86_address_subreg_operand (SUBREG_REG (index
)))
11729 /* Address override works only on the (%reg) part of %fs:(%reg). */
11730 if (seg
!= SEG_DEFAULT
11731 && ((base
&& GET_MODE (base
) != word_mode
)
11732 || (index
&& GET_MODE (index
) != word_mode
)))
11735 /* Extract the integral value of scale. */
11738 if (!CONST_INT_P (scale_rtx
))
11740 scale
= INTVAL (scale_rtx
);
11743 base_reg
= base
&& GET_CODE (base
) == SUBREG
? SUBREG_REG (base
) : base
;
11744 index_reg
= index
&& GET_CODE (index
) == SUBREG
? SUBREG_REG (index
) : index
;
11746 /* Avoid useless 0 displacement. */
11747 if (disp
== const0_rtx
&& (base
|| index
))
11750 /* Allow arg pointer and stack pointer as index if there is not scaling. */
11751 if (base_reg
&& index_reg
&& scale
== 1
11752 && (index_reg
== arg_pointer_rtx
11753 || index_reg
== frame_pointer_rtx
11754 || (REG_P (index_reg
) && REGNO (index_reg
) == STACK_POINTER_REGNUM
)))
11757 tmp
= base
, base
= index
, index
= tmp
;
11758 tmp
= base_reg
, base_reg
= index_reg
, index_reg
= tmp
;
11761 /* Special case: %ebp cannot be encoded as a base without a displacement.
11765 && (base_reg
== hard_frame_pointer_rtx
11766 || base_reg
== frame_pointer_rtx
11767 || base_reg
== arg_pointer_rtx
11768 || (REG_P (base_reg
)
11769 && (REGNO (base_reg
) == HARD_FRAME_POINTER_REGNUM
11770 || REGNO (base_reg
) == R13_REG
))))
11773 /* Special case: on K6, [%esi] makes the instruction vector decoded.
11774 Avoid this by transforming to [%esi+0].
11775 Reload calls address legitimization without cfun defined, so we need
11776 to test cfun for being non-NULL. */
11777 if (TARGET_K6
&& cfun
&& optimize_function_for_speed_p (cfun
)
11778 && base_reg
&& !index_reg
&& !disp
11779 && REG_P (base_reg
) && REGNO (base_reg
) == SI_REG
)
11782 /* Special case: encode reg+reg instead of reg*2. */
11783 if (!base
&& index
&& scale
== 2)
11784 base
= index
, base_reg
= index_reg
, scale
= 1;
11786 /* Special case: scaling cannot be encoded without base or displacement. */
11787 if (!base
&& !disp
&& index
&& scale
!= 1)
11791 out
->index
= index
;
11793 out
->scale
= scale
;
11799 /* Return cost of the memory address x.
11800 For i386, it is better to use a complex address than let gcc copy
11801 the address into a reg and make a new pseudo. But not if the address
11802 requires to two regs - that would mean more pseudos with longer
11805 ix86_address_cost (rtx x
, enum machine_mode mode ATTRIBUTE_UNUSED
,
11806 addr_space_t as ATTRIBUTE_UNUSED
,
11807 bool speed ATTRIBUTE_UNUSED
)
11809 struct ix86_address parts
;
11811 int ok
= ix86_decompose_address (x
, &parts
);
11815 if (parts
.base
&& GET_CODE (parts
.base
) == SUBREG
)
11816 parts
.base
= SUBREG_REG (parts
.base
);
11817 if (parts
.index
&& GET_CODE (parts
.index
) == SUBREG
)
11818 parts
.index
= SUBREG_REG (parts
.index
);
11820 /* Attempt to minimize number of registers in the address. */
11822 && (!REG_P (parts
.base
) || REGNO (parts
.base
) >= FIRST_PSEUDO_REGISTER
))
11824 && (!REG_P (parts
.index
)
11825 || REGNO (parts
.index
) >= FIRST_PSEUDO_REGISTER
)))
11829 && (!REG_P (parts
.base
) || REGNO (parts
.base
) >= FIRST_PSEUDO_REGISTER
)
11831 && (!REG_P (parts
.index
) || REGNO (parts
.index
) >= FIRST_PSEUDO_REGISTER
)
11832 && parts
.base
!= parts
.index
)
11835 /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
11836 since it's predecode logic can't detect the length of instructions
11837 and it degenerates to vector decoded. Increase cost of such
11838 addresses here. The penalty is minimally 2 cycles. It may be worthwhile
11839 to split such addresses or even refuse such addresses at all.
11841 Following addressing modes are affected:
11846 The first and last case may be avoidable by explicitly coding the zero in
11847 memory address, but I don't have AMD-K6 machine handy to check this
11851 && ((!parts
.disp
&& parts
.base
&& parts
.index
&& parts
.scale
!= 1)
11852 || (parts
.disp
&& !parts
.base
&& parts
.index
&& parts
.scale
!= 1)
11853 || (!parts
.disp
&& parts
.base
&& parts
.index
&& parts
.scale
== 1)))
11859 /* Allow {LABEL | SYMBOL}_REF - SYMBOL_REF-FOR-PICBASE for Mach-O as
11860 this is used for to form addresses to local data when -fPIC is in
11864 darwin_local_data_pic (rtx disp
)
11866 return (GET_CODE (disp
) == UNSPEC
11867 && XINT (disp
, 1) == UNSPEC_MACHOPIC_OFFSET
);
11870 /* Determine if a given RTX is a valid constant. We already know this
11871 satisfies CONSTANT_P. */
11874 ix86_legitimate_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED
, rtx x
)
11876 switch (GET_CODE (x
))
11881 if (GET_CODE (x
) == PLUS
)
11883 if (!CONST_INT_P (XEXP (x
, 1)))
11888 if (TARGET_MACHO
&& darwin_local_data_pic (x
))
11891 /* Only some unspecs are valid as "constants". */
11892 if (GET_CODE (x
) == UNSPEC
)
11893 switch (XINT (x
, 1))
11896 case UNSPEC_GOTOFF
:
11897 case UNSPEC_PLTOFF
:
11898 return TARGET_64BIT
;
11900 case UNSPEC_NTPOFF
:
11901 x
= XVECEXP (x
, 0, 0);
11902 return (GET_CODE (x
) == SYMBOL_REF
11903 && SYMBOL_REF_TLS_MODEL (x
) == TLS_MODEL_LOCAL_EXEC
);
11904 case UNSPEC_DTPOFF
:
11905 x
= XVECEXP (x
, 0, 0);
11906 return (GET_CODE (x
) == SYMBOL_REF
11907 && SYMBOL_REF_TLS_MODEL (x
) == TLS_MODEL_LOCAL_DYNAMIC
);
11912 /* We must have drilled down to a symbol. */
11913 if (GET_CODE (x
) == LABEL_REF
)
11915 if (GET_CODE (x
) != SYMBOL_REF
)
11920 /* TLS symbols are never valid. */
11921 if (SYMBOL_REF_TLS_MODEL (x
))
11924 /* DLLIMPORT symbols are never valid. */
11925 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
11926 && SYMBOL_REF_DLLIMPORT_P (x
))
11930 /* mdynamic-no-pic */
11931 if (MACHO_DYNAMIC_NO_PIC_P
)
11932 return machopic_symbol_defined_p (x
);
11937 if (GET_MODE (x
) == TImode
11938 && x
!= CONST0_RTX (TImode
)
11944 if (!standard_sse_constant_p (x
))
11951 /* Otherwise we handle everything else in the move patterns. */
11955 /* Determine if it's legal to put X into the constant pool. This
11956 is not possible for the address of thread-local symbols, which
11957 is checked above. */
11960 ix86_cannot_force_const_mem (enum machine_mode mode
, rtx x
)
11962 /* We can always put integral constants and vectors in memory. */
11963 switch (GET_CODE (x
))
11973 return !ix86_legitimate_constant_p (mode
, x
);
11977 /* Nonzero if the constant value X is a legitimate general operand
11978 when generating PIC code. It is given that flag_pic is on and
11979 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
11982 legitimate_pic_operand_p (rtx x
)
11986 switch (GET_CODE (x
))
11989 inner
= XEXP (x
, 0);
11990 if (GET_CODE (inner
) == PLUS
11991 && CONST_INT_P (XEXP (inner
, 1)))
11992 inner
= XEXP (inner
, 0);
11994 /* Only some unspecs are valid as "constants". */
11995 if (GET_CODE (inner
) == UNSPEC
)
11996 switch (XINT (inner
, 1))
11999 case UNSPEC_GOTOFF
:
12000 case UNSPEC_PLTOFF
:
12001 return TARGET_64BIT
;
12003 x
= XVECEXP (inner
, 0, 0);
12004 return (GET_CODE (x
) == SYMBOL_REF
12005 && SYMBOL_REF_TLS_MODEL (x
) == TLS_MODEL_LOCAL_EXEC
);
12006 case UNSPEC_MACHOPIC_OFFSET
:
12007 return legitimate_pic_address_disp_p (x
);
12015 return legitimate_pic_address_disp_p (x
);
12022 /* Determine if a given CONST RTX is a valid memory displacement
12026 legitimate_pic_address_disp_p (rtx disp
)
12030 /* In 64bit mode we can allow direct addresses of symbols and labels
12031 when they are not dynamic symbols. */
12034 rtx op0
= disp
, op1
;
12036 switch (GET_CODE (disp
))
12042 if (GET_CODE (XEXP (disp
, 0)) != PLUS
)
12044 op0
= XEXP (XEXP (disp
, 0), 0);
12045 op1
= XEXP (XEXP (disp
, 0), 1);
12046 if (!CONST_INT_P (op1
)
12047 || INTVAL (op1
) >= 16*1024*1024
12048 || INTVAL (op1
) < -16*1024*1024)
12050 if (GET_CODE (op0
) == LABEL_REF
)
12052 if (GET_CODE (op0
) == CONST
12053 && GET_CODE (XEXP (op0
, 0)) == UNSPEC
12054 && XINT (XEXP (op0
, 0), 1) == UNSPEC_PCREL
)
12056 if (GET_CODE (op0
) == UNSPEC
12057 && XINT (op0
, 1) == UNSPEC_PCREL
)
12059 if (GET_CODE (op0
) != SYMBOL_REF
)
12064 /* TLS references should always be enclosed in UNSPEC. */
12065 if (SYMBOL_REF_TLS_MODEL (op0
))
12067 if (!SYMBOL_REF_FAR_ADDR_P (op0
) && SYMBOL_REF_LOCAL_P (op0
)
12068 && ix86_cmodel
!= CM_LARGE_PIC
)
12076 if (GET_CODE (disp
) != CONST
)
12078 disp
= XEXP (disp
, 0);
12082 /* We are unsafe to allow PLUS expressions. This limit allowed distance
12083 of GOT tables. We should not need these anyway. */
12084 if (GET_CODE (disp
) != UNSPEC
12085 || (XINT (disp
, 1) != UNSPEC_GOTPCREL
12086 && XINT (disp
, 1) != UNSPEC_GOTOFF
12087 && XINT (disp
, 1) != UNSPEC_PCREL
12088 && XINT (disp
, 1) != UNSPEC_PLTOFF
))
12091 if (GET_CODE (XVECEXP (disp
, 0, 0)) != SYMBOL_REF
12092 && GET_CODE (XVECEXP (disp
, 0, 0)) != LABEL_REF
)
12098 if (GET_CODE (disp
) == PLUS
)
12100 if (!CONST_INT_P (XEXP (disp
, 1)))
12102 disp
= XEXP (disp
, 0);
12106 if (TARGET_MACHO
&& darwin_local_data_pic (disp
))
12109 if (GET_CODE (disp
) != UNSPEC
)
12112 switch (XINT (disp
, 1))
12117 /* We need to check for both symbols and labels because VxWorks loads
12118 text labels with @GOT rather than @GOTOFF. See gotoff_operand for
12120 return (GET_CODE (XVECEXP (disp
, 0, 0)) == SYMBOL_REF
12121 || GET_CODE (XVECEXP (disp
, 0, 0)) == LABEL_REF
);
12122 case UNSPEC_GOTOFF
:
12123 /* Refuse GOTOFF in 64bit mode since it is always 64bit when used.
12124 While ABI specify also 32bit relocation but we don't produce it in
12125 small PIC model at all. */
12126 if ((GET_CODE (XVECEXP (disp
, 0, 0)) == SYMBOL_REF
12127 || GET_CODE (XVECEXP (disp
, 0, 0)) == LABEL_REF
)
12129 return gotoff_operand (XVECEXP (disp
, 0, 0), Pmode
);
12131 case UNSPEC_GOTTPOFF
:
12132 case UNSPEC_GOTNTPOFF
:
12133 case UNSPEC_INDNTPOFF
:
12136 disp
= XVECEXP (disp
, 0, 0);
12137 return (GET_CODE (disp
) == SYMBOL_REF
12138 && SYMBOL_REF_TLS_MODEL (disp
) == TLS_MODEL_INITIAL_EXEC
);
12139 case UNSPEC_NTPOFF
:
12140 disp
= XVECEXP (disp
, 0, 0);
12141 return (GET_CODE (disp
) == SYMBOL_REF
12142 && SYMBOL_REF_TLS_MODEL (disp
) == TLS_MODEL_LOCAL_EXEC
);
12143 case UNSPEC_DTPOFF
:
12144 disp
= XVECEXP (disp
, 0, 0);
12145 return (GET_CODE (disp
) == SYMBOL_REF
12146 && SYMBOL_REF_TLS_MODEL (disp
) == TLS_MODEL_LOCAL_DYNAMIC
);
12152 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
12153 replace the input X, or the original X if no replacement is called for.
12154 The output parameter *WIN is 1 if the calling macro should goto WIN,
12155 0 if it should not. */
12158 ix86_legitimize_reload_address (rtx x
,
12159 enum machine_mode mode ATTRIBUTE_UNUSED
,
12160 int opnum
, int type
,
12161 int ind_levels ATTRIBUTE_UNUSED
)
12163 /* Reload can generate:
12165 (plus:DI (plus:DI (unspec:DI [(const_int 0 [0])] UNSPEC_TP)
12169 This RTX is rejected from ix86_legitimate_address_p due to
12170 non-strictness of base register 97. Following this rejection,
12171 reload pushes all three components into separate registers,
12172 creating invalid memory address RTX.
12174 Following code reloads only the invalid part of the
12175 memory address RTX. */
12177 if (GET_CODE (x
) == PLUS
12178 && REG_P (XEXP (x
, 1))
12179 && GET_CODE (XEXP (x
, 0)) == PLUS
12180 && REG_P (XEXP (XEXP (x
, 0), 1)))
12183 bool something_reloaded
= false;
12185 base
= XEXP (XEXP (x
, 0), 1);
12186 if (!REG_OK_FOR_BASE_STRICT_P (base
))
12188 push_reload (base
, NULL_RTX
, &XEXP (XEXP (x
, 0), 1), NULL
,
12189 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
12190 opnum
, (enum reload_type
) type
);
12191 something_reloaded
= true;
12194 index
= XEXP (x
, 1);
12195 if (!REG_OK_FOR_INDEX_STRICT_P (index
))
12197 push_reload (index
, NULL_RTX
, &XEXP (x
, 1), NULL
,
12198 INDEX_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
12199 opnum
, (enum reload_type
) type
);
12200 something_reloaded
= true;
12203 gcc_assert (something_reloaded
);
12210 /* Recognizes RTL expressions that are valid memory addresses for an
12211 instruction. The MODE argument is the machine mode for the MEM
12212 expression that wants to use this address.
12214 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
12215 convert common non-canonical forms to canonical form so that they will
12219 ix86_legitimate_address_p (enum machine_mode mode ATTRIBUTE_UNUSED
,
12220 rtx addr
, bool strict
)
12222 struct ix86_address parts
;
12223 rtx base
, index
, disp
;
12224 HOST_WIDE_INT scale
;
12226 if (ix86_decompose_address (addr
, &parts
) <= 0)
12227 /* Decomposition failed. */
12231 index
= parts
.index
;
12233 scale
= parts
.scale
;
12235 /* Validate base register. */
12242 else if (GET_CODE (base
) == SUBREG
&& REG_P (SUBREG_REG (base
)))
12243 reg
= SUBREG_REG (base
);
12245 /* Base is not a register. */
12248 if (GET_MODE (base
) != SImode
&& GET_MODE (base
) != DImode
)
12251 if ((strict
&& ! REG_OK_FOR_BASE_STRICT_P (reg
))
12252 || (! strict
&& ! REG_OK_FOR_BASE_NONSTRICT_P (reg
)))
12253 /* Base is not valid. */
12257 /* Validate index register. */
12264 else if (GET_CODE (index
) == SUBREG
&& REG_P (SUBREG_REG (index
)))
12265 reg
= SUBREG_REG (index
);
12267 /* Index is not a register. */
12270 if (GET_MODE (index
) != SImode
&& GET_MODE (index
) != DImode
)
12273 if ((strict
&& ! REG_OK_FOR_INDEX_STRICT_P (reg
))
12274 || (! strict
&& ! REG_OK_FOR_INDEX_NONSTRICT_P (reg
)))
12275 /* Index is not valid. */
12279 /* Index and base should have the same mode. */
12281 && GET_MODE (base
) != GET_MODE (index
))
12284 /* Validate scale factor. */
12288 /* Scale without index. */
12291 if (scale
!= 2 && scale
!= 4 && scale
!= 8)
12292 /* Scale is not a valid multiplier. */
12296 /* Validate displacement. */
12299 if (GET_CODE (disp
) == CONST
12300 && GET_CODE (XEXP (disp
, 0)) == UNSPEC
12301 && XINT (XEXP (disp
, 0), 1) != UNSPEC_MACHOPIC_OFFSET
)
12302 switch (XINT (XEXP (disp
, 0), 1))
12304 /* Refuse GOTOFF and GOT in 64bit mode since it is always 64bit when
12305 used. While ABI specify also 32bit relocations, we don't produce
12306 them at all and use IP relative instead. */
12308 case UNSPEC_GOTOFF
:
12309 gcc_assert (flag_pic
);
12311 goto is_legitimate_pic
;
12313 /* 64bit address unspec. */
12316 case UNSPEC_GOTPCREL
:
12318 gcc_assert (flag_pic
);
12319 goto is_legitimate_pic
;
12321 case UNSPEC_GOTTPOFF
:
12322 case UNSPEC_GOTNTPOFF
:
12323 case UNSPEC_INDNTPOFF
:
12324 case UNSPEC_NTPOFF
:
12325 case UNSPEC_DTPOFF
:
12328 case UNSPEC_STACK_CHECK
:
12329 gcc_assert (flag_split_stack
);
12333 /* Invalid address unspec. */
12337 else if (SYMBOLIC_CONST (disp
)
12341 && MACHOPIC_INDIRECT
12342 && !machopic_operand_p (disp
)
12348 if (TARGET_64BIT
&& (index
|| base
))
12350 /* foo@dtpoff(%rX) is ok. */
12351 if (GET_CODE (disp
) != CONST
12352 || GET_CODE (XEXP (disp
, 0)) != PLUS
12353 || GET_CODE (XEXP (XEXP (disp
, 0), 0)) != UNSPEC
12354 || !CONST_INT_P (XEXP (XEXP (disp
, 0), 1))
12355 || (XINT (XEXP (XEXP (disp
, 0), 0), 1) != UNSPEC_DTPOFF
12356 && XINT (XEXP (XEXP (disp
, 0), 0), 1) != UNSPEC_NTPOFF
))
12357 /* Non-constant pic memory reference. */
12360 else if ((!TARGET_MACHO
|| flag_pic
)
12361 && ! legitimate_pic_address_disp_p (disp
))
12362 /* Displacement is an invalid pic construct. */
12365 else if (MACHO_DYNAMIC_NO_PIC_P
12366 && !ix86_legitimate_constant_p (Pmode
, disp
))
12367 /* displacment must be referenced via non_lazy_pointer */
12371 /* This code used to verify that a symbolic pic displacement
12372 includes the pic_offset_table_rtx register.
12374 While this is good idea, unfortunately these constructs may
12375 be created by "adds using lea" optimization for incorrect
12384 This code is nonsensical, but results in addressing
12385 GOT table with pic_offset_table_rtx base. We can't
12386 just refuse it easily, since it gets matched by
12387 "addsi3" pattern, that later gets split to lea in the
12388 case output register differs from input. While this
12389 can be handled by separate addsi pattern for this case
12390 that never results in lea, this seems to be easier and
12391 correct fix for crash to disable this test. */
12393 else if (GET_CODE (disp
) != LABEL_REF
12394 && !CONST_INT_P (disp
)
12395 && (GET_CODE (disp
) != CONST
12396 || !ix86_legitimate_constant_p (Pmode
, disp
))
12397 && (GET_CODE (disp
) != SYMBOL_REF
12398 || !ix86_legitimate_constant_p (Pmode
, disp
)))
12399 /* Displacement is not constant. */
12401 else if (TARGET_64BIT
12402 && !x86_64_immediate_operand (disp
, VOIDmode
))
12403 /* Displacement is out of range. */
12407 /* Everything looks valid. */
12411 /* Determine if a given RTX is a valid constant address. */
12414 constant_address_p (rtx x
)
12416 return CONSTANT_P (x
) && ix86_legitimate_address_p (Pmode
, x
, 1);
12419 /* Return a unique alias set for the GOT. */
12421 static alias_set_type
12422 ix86_GOT_alias_set (void)
12424 static alias_set_type set
= -1;
12426 set
= new_alias_set ();
12430 /* Return a legitimate reference for ORIG (an address) using the
12431 register REG. If REG is 0, a new pseudo is generated.
12433 There are two types of references that must be handled:
12435 1. Global data references must load the address from the GOT, via
12436 the PIC reg. An insn is emitted to do this load, and the reg is
12439 2. Static data references, constant pool addresses, and code labels
12440 compute the address as an offset from the GOT, whose base is in
12441 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
12442 differentiate them from global data objects. The returned
12443 address is the PIC reg + an unspec constant.
12445 TARGET_LEGITIMATE_ADDRESS_P rejects symbolic references unless the PIC
12446 reg also appears in the address. */
12449 legitimize_pic_address (rtx orig
, rtx reg
)
12452 rtx new_rtx
= orig
;
12455 if (TARGET_MACHO
&& !TARGET_64BIT
)
12458 reg
= gen_reg_rtx (Pmode
);
12459 /* Use the generic Mach-O PIC machinery. */
12460 return machopic_legitimize_pic_address (orig
, GET_MODE (orig
), reg
);
12464 if (TARGET_64BIT
&& legitimate_pic_address_disp_p (addr
))
12466 else if (TARGET_64BIT
12467 && ix86_cmodel
!= CM_SMALL_PIC
12468 && gotoff_operand (addr
, Pmode
))
12471 /* This symbol may be referenced via a displacement from the PIC
12472 base address (@GOTOFF). */
12474 if (reload_in_progress
)
12475 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM
, true);
12476 if (GET_CODE (addr
) == CONST
)
12477 addr
= XEXP (addr
, 0);
12478 if (GET_CODE (addr
) == PLUS
)
12480 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, XEXP (addr
, 0)),
12482 new_rtx
= gen_rtx_PLUS (Pmode
, new_rtx
, XEXP (addr
, 1));
12485 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
), UNSPEC_GOTOFF
);
12486 new_rtx
= gen_rtx_CONST (Pmode
, new_rtx
);
12488 tmpreg
= gen_reg_rtx (Pmode
);
12491 emit_move_insn (tmpreg
, new_rtx
);
12495 new_rtx
= expand_simple_binop (Pmode
, PLUS
, reg
, pic_offset_table_rtx
,
12496 tmpreg
, 1, OPTAB_DIRECT
);
12499 else new_rtx
= gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
, tmpreg
);
12501 else if (!TARGET_64BIT
&& gotoff_operand (addr
, Pmode
))
12503 /* This symbol may be referenced via a displacement from the PIC
12504 base address (@GOTOFF). */
12506 if (reload_in_progress
)
12507 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM
, true);
12508 if (GET_CODE (addr
) == CONST
)
12509 addr
= XEXP (addr
, 0);
12510 if (GET_CODE (addr
) == PLUS
)
12512 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, XEXP (addr
, 0)),
12514 new_rtx
= gen_rtx_PLUS (Pmode
, new_rtx
, XEXP (addr
, 1));
12517 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
), UNSPEC_GOTOFF
);
12518 new_rtx
= gen_rtx_CONST (Pmode
, new_rtx
);
12519 new_rtx
= gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
, new_rtx
);
12523 emit_move_insn (reg
, new_rtx
);
12527 else if ((GET_CODE (addr
) == SYMBOL_REF
&& SYMBOL_REF_TLS_MODEL (addr
) == 0)
12528 /* We can't use @GOTOFF for text labels on VxWorks;
12529 see gotoff_operand. */
12530 || (TARGET_VXWORKS_RTP
&& GET_CODE (addr
) == LABEL_REF
))
12532 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
)
12534 if (GET_CODE (addr
) == SYMBOL_REF
&& SYMBOL_REF_DLLIMPORT_P (addr
))
12535 return legitimize_dllimport_symbol (addr
, true);
12536 if (GET_CODE (addr
) == CONST
&& GET_CODE (XEXP (addr
, 0)) == PLUS
12537 && GET_CODE (XEXP (XEXP (addr
, 0), 0)) == SYMBOL_REF
12538 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (addr
, 0), 0)))
12540 rtx t
= legitimize_dllimport_symbol (XEXP (XEXP (addr
, 0), 0), true);
12541 return gen_rtx_PLUS (Pmode
, t
, XEXP (XEXP (addr
, 0), 1));
12545 /* For x64 PE-COFF there is no GOT table. So we use address
12547 if (TARGET_64BIT
&& DEFAULT_ABI
== MS_ABI
)
12549 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
), UNSPEC_PCREL
);
12550 new_rtx
= gen_rtx_CONST (Pmode
, new_rtx
);
12553 reg
= gen_reg_rtx (Pmode
);
12554 emit_move_insn (reg
, new_rtx
);
12557 else if (TARGET_64BIT
&& ix86_cmodel
!= CM_LARGE_PIC
)
12559 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
), UNSPEC_GOTPCREL
);
12560 new_rtx
= gen_rtx_CONST (Pmode
, new_rtx
);
12561 new_rtx
= gen_const_mem (Pmode
, new_rtx
);
12562 set_mem_alias_set (new_rtx
, ix86_GOT_alias_set ());
12565 reg
= gen_reg_rtx (Pmode
);
12566 /* Use directly gen_movsi, otherwise the address is loaded
12567 into register for CSE. We don't want to CSE this addresses,
12568 instead we CSE addresses from the GOT table, so skip this. */
12569 emit_insn (gen_movsi (reg
, new_rtx
));
12574 /* This symbol must be referenced via a load from the
12575 Global Offset Table (@GOT). */
12577 if (reload_in_progress
)
12578 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM
, true);
12579 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
), UNSPEC_GOT
);
12580 new_rtx
= gen_rtx_CONST (Pmode
, new_rtx
);
12582 new_rtx
= force_reg (Pmode
, new_rtx
);
12583 new_rtx
= gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
, new_rtx
);
12584 new_rtx
= gen_const_mem (Pmode
, new_rtx
);
12585 set_mem_alias_set (new_rtx
, ix86_GOT_alias_set ());
12588 reg
= gen_reg_rtx (Pmode
);
12589 emit_move_insn (reg
, new_rtx
);
12595 if (CONST_INT_P (addr
)
12596 && !x86_64_immediate_operand (addr
, VOIDmode
))
12600 emit_move_insn (reg
, addr
);
12604 new_rtx
= force_reg (Pmode
, addr
);
12606 else if (GET_CODE (addr
) == CONST
)
12608 addr
= XEXP (addr
, 0);
12610 /* We must match stuff we generate before. Assume the only
12611 unspecs that can get here are ours. Not that we could do
12612 anything with them anyway.... */
12613 if (GET_CODE (addr
) == UNSPEC
12614 || (GET_CODE (addr
) == PLUS
12615 && GET_CODE (XEXP (addr
, 0)) == UNSPEC
))
12617 gcc_assert (GET_CODE (addr
) == PLUS
);
12619 if (GET_CODE (addr
) == PLUS
)
12621 rtx op0
= XEXP (addr
, 0), op1
= XEXP (addr
, 1);
12623 /* Check first to see if this is a constant offset from a @GOTOFF
12624 symbol reference. */
12625 if (gotoff_operand (op0
, Pmode
)
12626 && CONST_INT_P (op1
))
12630 if (reload_in_progress
)
12631 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM
, true);
12632 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, op0
),
12634 new_rtx
= gen_rtx_PLUS (Pmode
, new_rtx
, op1
);
12635 new_rtx
= gen_rtx_CONST (Pmode
, new_rtx
);
12636 new_rtx
= gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
, new_rtx
);
12640 emit_move_insn (reg
, new_rtx
);
12646 if (INTVAL (op1
) < -16*1024*1024
12647 || INTVAL (op1
) >= 16*1024*1024)
12649 if (!x86_64_immediate_operand (op1
, Pmode
))
12650 op1
= force_reg (Pmode
, op1
);
12651 new_rtx
= gen_rtx_PLUS (Pmode
, force_reg (Pmode
, op0
), op1
);
12657 rtx base
= legitimize_pic_address (op0
, reg
);
12658 enum machine_mode mode
= GET_MODE (base
);
12660 = legitimize_pic_address (op1
, base
== reg
? NULL_RTX
: reg
);
12662 if (CONST_INT_P (new_rtx
))
12664 if (INTVAL (new_rtx
) < -16*1024*1024
12665 || INTVAL (new_rtx
) >= 16*1024*1024)
12667 if (!x86_64_immediate_operand (new_rtx
, mode
))
12668 new_rtx
= force_reg (mode
, new_rtx
);
12670 = gen_rtx_PLUS (mode
, force_reg (mode
, base
), new_rtx
);
12673 new_rtx
= plus_constant (mode
, base
, INTVAL (new_rtx
));
12677 if (GET_CODE (new_rtx
) == PLUS
12678 && CONSTANT_P (XEXP (new_rtx
, 1)))
12680 base
= gen_rtx_PLUS (mode
, base
, XEXP (new_rtx
, 0));
12681 new_rtx
= XEXP (new_rtx
, 1);
12683 new_rtx
= gen_rtx_PLUS (mode
, base
, new_rtx
);
12691 /* Load the thread pointer. If TO_REG is true, force it into a register. */
12694 get_thread_pointer (enum machine_mode tp_mode
, bool to_reg
)
12696 rtx tp
= gen_rtx_UNSPEC (ptr_mode
, gen_rtvec (1, const0_rtx
), UNSPEC_TP
);
12698 if (GET_MODE (tp
) != tp_mode
)
12700 gcc_assert (GET_MODE (tp
) == SImode
);
12701 gcc_assert (tp_mode
== DImode
);
12703 tp
= gen_rtx_ZERO_EXTEND (tp_mode
, tp
);
12707 tp
= copy_to_mode_reg (tp_mode
, tp
);
12712 /* Construct the SYMBOL_REF for the tls_get_addr function. */
12714 static GTY(()) rtx ix86_tls_symbol
;
12717 ix86_tls_get_addr (void)
12719 if (!ix86_tls_symbol
)
12722 = ((TARGET_ANY_GNU_TLS
&& !TARGET_64BIT
)
12723 ? "___tls_get_addr" : "__tls_get_addr");
12725 ix86_tls_symbol
= gen_rtx_SYMBOL_REF (Pmode
, sym
);
12728 return ix86_tls_symbol
;
12731 /* Construct the SYMBOL_REF for the _TLS_MODULE_BASE_ symbol. */
12733 static GTY(()) rtx ix86_tls_module_base_symbol
;
12736 ix86_tls_module_base (void)
12738 if (!ix86_tls_module_base_symbol
)
12740 ix86_tls_module_base_symbol
12741 = gen_rtx_SYMBOL_REF (Pmode
, "_TLS_MODULE_BASE_");
12743 SYMBOL_REF_FLAGS (ix86_tls_module_base_symbol
)
12744 |= TLS_MODEL_GLOBAL_DYNAMIC
<< SYMBOL_FLAG_TLS_SHIFT
;
12747 return ix86_tls_module_base_symbol
;
12750 /* A subroutine of ix86_legitimize_address and ix86_expand_move. FOR_MOV is
12751 false if we expect this to be used for a memory address and true if
12752 we expect to load the address into a register. */
12755 legitimize_tls_address (rtx x
, enum tls_model model
, bool for_mov
)
12757 rtx dest
, base
, off
;
12758 rtx pic
= NULL_RTX
, tp
= NULL_RTX
;
12759 enum machine_mode tp_mode
= Pmode
;
12764 case TLS_MODEL_GLOBAL_DYNAMIC
:
12765 dest
= gen_reg_rtx (Pmode
);
12770 pic
= pic_offset_table_rtx
;
12773 pic
= gen_reg_rtx (Pmode
);
12774 emit_insn (gen_set_got (pic
));
12778 if (TARGET_GNU2_TLS
)
12781 emit_insn (gen_tls_dynamic_gnu2_64 (dest
, x
));
12783 emit_insn (gen_tls_dynamic_gnu2_32 (dest
, x
, pic
));
12785 tp
= get_thread_pointer (Pmode
, true);
12786 dest
= force_reg (Pmode
, gen_rtx_PLUS (Pmode
, tp
, dest
));
12788 if (GET_MODE (x
) != Pmode
)
12789 x
= gen_rtx_ZERO_EXTEND (Pmode
, x
);
12791 set_unique_reg_note (get_last_insn (), REG_EQUAL
, x
);
12795 rtx caddr
= ix86_tls_get_addr ();
12799 rtx rax
= gen_rtx_REG (Pmode
, AX_REG
);
12804 (ix86_gen_tls_global_dynamic_64 (rax
, x
, caddr
));
12805 insns
= get_insns ();
12808 if (GET_MODE (x
) != Pmode
)
12809 x
= gen_rtx_ZERO_EXTEND (Pmode
, x
);
12811 RTL_CONST_CALL_P (insns
) = 1;
12812 emit_libcall_block (insns
, dest
, rax
, x
);
12815 emit_insn (gen_tls_global_dynamic_32 (dest
, x
, pic
, caddr
));
12819 case TLS_MODEL_LOCAL_DYNAMIC
:
12820 base
= gen_reg_rtx (Pmode
);
12825 pic
= pic_offset_table_rtx
;
12828 pic
= gen_reg_rtx (Pmode
);
12829 emit_insn (gen_set_got (pic
));
12833 if (TARGET_GNU2_TLS
)
12835 rtx tmp
= ix86_tls_module_base ();
12838 emit_insn (gen_tls_dynamic_gnu2_64 (base
, tmp
));
12840 emit_insn (gen_tls_dynamic_gnu2_32 (base
, tmp
, pic
));
12842 tp
= get_thread_pointer (Pmode
, true);
12843 set_unique_reg_note (get_last_insn (), REG_EQUAL
,
12844 gen_rtx_MINUS (Pmode
, tmp
, tp
));
12848 rtx caddr
= ix86_tls_get_addr ();
12852 rtx rax
= gen_rtx_REG (Pmode
, AX_REG
);
12857 (ix86_gen_tls_local_dynamic_base_64 (rax
, caddr
));
12858 insns
= get_insns ();
12861 /* Attach a unique REG_EQUAL, to allow the RTL optimizers to
12862 share the LD_BASE result with other LD model accesses. */
12863 eqv
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, const0_rtx
),
12864 UNSPEC_TLS_LD_BASE
);
12866 RTL_CONST_CALL_P (insns
) = 1;
12867 emit_libcall_block (insns
, base
, rax
, eqv
);
12870 emit_insn (gen_tls_local_dynamic_base_32 (base
, pic
, caddr
));
12873 off
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, x
), UNSPEC_DTPOFF
);
12874 off
= gen_rtx_CONST (Pmode
, off
);
12876 dest
= force_reg (Pmode
, gen_rtx_PLUS (Pmode
, base
, off
));
12878 if (TARGET_GNU2_TLS
)
12880 dest
= force_reg (Pmode
, gen_rtx_PLUS (Pmode
, dest
, tp
));
12882 if (GET_MODE (x
) != Pmode
)
12883 x
= gen_rtx_ZERO_EXTEND (Pmode
, x
);
12885 set_unique_reg_note (get_last_insn (), REG_EQUAL
, x
);
12889 case TLS_MODEL_INITIAL_EXEC
:
12892 if (TARGET_SUN_TLS
&& !TARGET_X32
)
12894 /* The Sun linker took the AMD64 TLS spec literally
12895 and can only handle %rax as destination of the
12896 initial executable code sequence. */
12898 dest
= gen_reg_rtx (DImode
);
12899 emit_insn (gen_tls_initial_exec_64_sun (dest
, x
));
12903 /* Generate DImode references to avoid %fs:(%reg32)
12904 problems and linker IE->LE relaxation bug. */
12907 type
= UNSPEC_GOTNTPOFF
;
12911 if (reload_in_progress
)
12912 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM
, true);
12913 pic
= pic_offset_table_rtx
;
12914 type
= TARGET_ANY_GNU_TLS
? UNSPEC_GOTNTPOFF
: UNSPEC_GOTTPOFF
;
12916 else if (!TARGET_ANY_GNU_TLS
)
12918 pic
= gen_reg_rtx (Pmode
);
12919 emit_insn (gen_set_got (pic
));
12920 type
= UNSPEC_GOTTPOFF
;
12925 type
= UNSPEC_INDNTPOFF
;
12928 off
= gen_rtx_UNSPEC (tp_mode
, gen_rtvec (1, x
), type
);
12929 off
= gen_rtx_CONST (tp_mode
, off
);
12931 off
= gen_rtx_PLUS (tp_mode
, pic
, off
);
12932 off
= gen_const_mem (tp_mode
, off
);
12933 set_mem_alias_set (off
, ix86_GOT_alias_set ());
12935 if (TARGET_64BIT
|| TARGET_ANY_GNU_TLS
)
12937 base
= get_thread_pointer (tp_mode
,
12938 for_mov
|| !TARGET_TLS_DIRECT_SEG_REFS
);
12939 off
= force_reg (tp_mode
, off
);
12940 return gen_rtx_PLUS (tp_mode
, base
, off
);
12944 base
= get_thread_pointer (Pmode
, true);
12945 dest
= gen_reg_rtx (Pmode
);
12946 emit_insn (ix86_gen_sub3 (dest
, base
, off
));
12950 case TLS_MODEL_LOCAL_EXEC
:
12951 off
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, x
),
12952 (TARGET_64BIT
|| TARGET_ANY_GNU_TLS
)
12953 ? UNSPEC_NTPOFF
: UNSPEC_TPOFF
);
12954 off
= gen_rtx_CONST (Pmode
, off
);
12956 if (TARGET_64BIT
|| TARGET_ANY_GNU_TLS
)
12958 base
= get_thread_pointer (Pmode
,
12959 for_mov
|| !TARGET_TLS_DIRECT_SEG_REFS
);
12960 return gen_rtx_PLUS (Pmode
, base
, off
);
12964 base
= get_thread_pointer (Pmode
, true);
12965 dest
= gen_reg_rtx (Pmode
);
12966 emit_insn (ix86_gen_sub3 (dest
, base
, off
));
12971 gcc_unreachable ();
12977 /* Create or return the unique __imp_DECL dllimport symbol corresponding
12980 static GTY((if_marked ("tree_map_marked_p"), param_is (struct tree_map
)))
12981 htab_t dllimport_map
;
12984 get_dllimport_decl (tree decl
)
12986 struct tree_map
*h
, in
;
12989 const char *prefix
;
12990 size_t namelen
, prefixlen
;
12995 if (!dllimport_map
)
12996 dllimport_map
= htab_create_ggc (512, tree_map_hash
, tree_map_eq
, 0);
12998 in
.hash
= htab_hash_pointer (decl
);
12999 in
.base
.from
= decl
;
13000 loc
= htab_find_slot_with_hash (dllimport_map
, &in
, in
.hash
, INSERT
);
13001 h
= (struct tree_map
*) *loc
;
13005 *loc
= h
= ggc_alloc_tree_map ();
13007 h
->base
.from
= decl
;
13008 h
->to
= to
= build_decl (DECL_SOURCE_LOCATION (decl
),
13009 VAR_DECL
, NULL
, ptr_type_node
);
13010 DECL_ARTIFICIAL (to
) = 1;
13011 DECL_IGNORED_P (to
) = 1;
13012 DECL_EXTERNAL (to
) = 1;
13013 TREE_READONLY (to
) = 1;
13015 name
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
));
13016 name
= targetm
.strip_name_encoding (name
);
13017 prefix
= name
[0] == FASTCALL_PREFIX
|| user_label_prefix
[0] == 0
13018 ? "*__imp_" : "*__imp__";
13019 namelen
= strlen (name
);
13020 prefixlen
= strlen (prefix
);
13021 imp_name
= (char *) alloca (namelen
+ prefixlen
+ 1);
13022 memcpy (imp_name
, prefix
, prefixlen
);
13023 memcpy (imp_name
+ prefixlen
, name
, namelen
+ 1);
13025 name
= ggc_alloc_string (imp_name
, namelen
+ prefixlen
);
13026 rtl
= gen_rtx_SYMBOL_REF (Pmode
, name
);
13027 SET_SYMBOL_REF_DECL (rtl
, to
);
13028 SYMBOL_REF_FLAGS (rtl
) = SYMBOL_FLAG_LOCAL
;
13030 rtl
= gen_const_mem (Pmode
, rtl
);
13031 set_mem_alias_set (rtl
, ix86_GOT_alias_set ());
13033 SET_DECL_RTL (to
, rtl
);
13034 SET_DECL_ASSEMBLER_NAME (to
, get_identifier (name
));
13039 /* Expand SYMBOL into its corresponding dllimport symbol. WANT_REG is
13040 true if we require the result be a register. */
13043 legitimize_dllimport_symbol (rtx symbol
, bool want_reg
)
13048 gcc_assert (SYMBOL_REF_DECL (symbol
));
13049 imp_decl
= get_dllimport_decl (SYMBOL_REF_DECL (symbol
));
13051 x
= DECL_RTL (imp_decl
);
13053 x
= force_reg (Pmode
, x
);
13057 /* Try machine-dependent ways of modifying an illegitimate address
13058 to be legitimate. If we find one, return the new, valid address.
13059 This macro is used in only one place: `memory_address' in explow.c.
13061 OLDX is the address as it was before break_out_memory_refs was called.
13062 In some cases it is useful to look at this to decide what needs to be done.
13064 It is always safe for this macro to do nothing. It exists to recognize
13065 opportunities to optimize the output.
13067 For the 80386, we handle X+REG by loading X into a register R and
13068 using R+REG. R will go in a general reg and indexing will be used.
13069 However, if REG is a broken-out memory address or multiplication,
13070 nothing needs to be done because REG can certainly go in a general reg.
13072 When -fpic is used, special handling is needed for symbolic references.
13073 See comments by legitimize_pic_address in i386.c for details. */
13076 ix86_legitimize_address (rtx x
, rtx oldx ATTRIBUTE_UNUSED
,
13077 enum machine_mode mode
)
13082 log
= GET_CODE (x
) == SYMBOL_REF
? SYMBOL_REF_TLS_MODEL (x
) : 0;
13084 return legitimize_tls_address (x
, (enum tls_model
) log
, false);
13085 if (GET_CODE (x
) == CONST
13086 && GET_CODE (XEXP (x
, 0)) == PLUS
13087 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == SYMBOL_REF
13088 && (log
= SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x
, 0), 0))))
13090 rtx t
= legitimize_tls_address (XEXP (XEXP (x
, 0), 0),
13091 (enum tls_model
) log
, false);
13092 return gen_rtx_PLUS (Pmode
, t
, XEXP (XEXP (x
, 0), 1));
13095 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
)
13097 if (GET_CODE (x
) == SYMBOL_REF
&& SYMBOL_REF_DLLIMPORT_P (x
))
13098 return legitimize_dllimport_symbol (x
, true);
13099 if (GET_CODE (x
) == CONST
13100 && GET_CODE (XEXP (x
, 0)) == PLUS
13101 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == SYMBOL_REF
13102 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (x
, 0), 0)))
13104 rtx t
= legitimize_dllimport_symbol (XEXP (XEXP (x
, 0), 0), true);
13105 return gen_rtx_PLUS (Pmode
, t
, XEXP (XEXP (x
, 0), 1));
13109 if (flag_pic
&& SYMBOLIC_CONST (x
))
13110 return legitimize_pic_address (x
, 0);
13113 if (MACHO_DYNAMIC_NO_PIC_P
&& SYMBOLIC_CONST (x
))
13114 return machopic_indirect_data_reference (x
, 0);
13117 /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
13118 if (GET_CODE (x
) == ASHIFT
13119 && CONST_INT_P (XEXP (x
, 1))
13120 && (unsigned HOST_WIDE_INT
) INTVAL (XEXP (x
, 1)) < 4)
13123 log
= INTVAL (XEXP (x
, 1));
13124 x
= gen_rtx_MULT (Pmode
, force_reg (Pmode
, XEXP (x
, 0)),
13125 GEN_INT (1 << log
));
13128 if (GET_CODE (x
) == PLUS
)
13130 /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
13132 if (GET_CODE (XEXP (x
, 0)) == ASHIFT
13133 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
13134 && (unsigned HOST_WIDE_INT
) INTVAL (XEXP (XEXP (x
, 0), 1)) < 4)
13137 log
= INTVAL (XEXP (XEXP (x
, 0), 1));
13138 XEXP (x
, 0) = gen_rtx_MULT (Pmode
,
13139 force_reg (Pmode
, XEXP (XEXP (x
, 0), 0)),
13140 GEN_INT (1 << log
));
13143 if (GET_CODE (XEXP (x
, 1)) == ASHIFT
13144 && CONST_INT_P (XEXP (XEXP (x
, 1), 1))
13145 && (unsigned HOST_WIDE_INT
) INTVAL (XEXP (XEXP (x
, 1), 1)) < 4)
13148 log
= INTVAL (XEXP (XEXP (x
, 1), 1));
13149 XEXP (x
, 1) = gen_rtx_MULT (Pmode
,
13150 force_reg (Pmode
, XEXP (XEXP (x
, 1), 0)),
13151 GEN_INT (1 << log
));
13154 /* Put multiply first if it isn't already. */
13155 if (GET_CODE (XEXP (x
, 1)) == MULT
)
13157 rtx tmp
= XEXP (x
, 0);
13158 XEXP (x
, 0) = XEXP (x
, 1);
13163 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
13164 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
13165 created by virtual register instantiation, register elimination, and
13166 similar optimizations. */
13167 if (GET_CODE (XEXP (x
, 0)) == MULT
&& GET_CODE (XEXP (x
, 1)) == PLUS
)
13170 x
= gen_rtx_PLUS (Pmode
,
13171 gen_rtx_PLUS (Pmode
, XEXP (x
, 0),
13172 XEXP (XEXP (x
, 1), 0)),
13173 XEXP (XEXP (x
, 1), 1));
13177 (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
13178 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
13179 else if (GET_CODE (x
) == PLUS
&& GET_CODE (XEXP (x
, 0)) == PLUS
13180 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == MULT
13181 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == PLUS
13182 && CONSTANT_P (XEXP (x
, 1)))
13185 rtx other
= NULL_RTX
;
13187 if (CONST_INT_P (XEXP (x
, 1)))
13189 constant
= XEXP (x
, 1);
13190 other
= XEXP (XEXP (XEXP (x
, 0), 1), 1);
13192 else if (CONST_INT_P (XEXP (XEXP (XEXP (x
, 0), 1), 1)))
13194 constant
= XEXP (XEXP (XEXP (x
, 0), 1), 1);
13195 other
= XEXP (x
, 1);
13203 x
= gen_rtx_PLUS (Pmode
,
13204 gen_rtx_PLUS (Pmode
, XEXP (XEXP (x
, 0), 0),
13205 XEXP (XEXP (XEXP (x
, 0), 1), 0)),
13206 plus_constant (Pmode
, other
,
13207 INTVAL (constant
)));
13211 if (changed
&& ix86_legitimate_address_p (mode
, x
, false))
13214 if (GET_CODE (XEXP (x
, 0)) == MULT
)
13217 XEXP (x
, 0) = force_operand (XEXP (x
, 0), 0);
13220 if (GET_CODE (XEXP (x
, 1)) == MULT
)
13223 XEXP (x
, 1) = force_operand (XEXP (x
, 1), 0);
13227 && REG_P (XEXP (x
, 1))
13228 && REG_P (XEXP (x
, 0)))
13231 if (flag_pic
&& SYMBOLIC_CONST (XEXP (x
, 1)))
13234 x
= legitimize_pic_address (x
, 0);
13237 if (changed
&& ix86_legitimate_address_p (mode
, x
, false))
13240 if (REG_P (XEXP (x
, 0)))
13242 rtx temp
= gen_reg_rtx (Pmode
);
13243 rtx val
= force_operand (XEXP (x
, 1), temp
);
13246 if (GET_MODE (val
) != Pmode
)
13247 val
= convert_to_mode (Pmode
, val
, 1);
13248 emit_move_insn (temp
, val
);
13251 XEXP (x
, 1) = temp
;
13255 else if (REG_P (XEXP (x
, 1)))
13257 rtx temp
= gen_reg_rtx (Pmode
);
13258 rtx val
= force_operand (XEXP (x
, 0), temp
);
13261 if (GET_MODE (val
) != Pmode
)
13262 val
= convert_to_mode (Pmode
, val
, 1);
13263 emit_move_insn (temp
, val
);
13266 XEXP (x
, 0) = temp
;
13274 /* Print an integer constant expression in assembler syntax. Addition
13275 and subtraction are the only arithmetic that may appear in these
13276 expressions. FILE is the stdio stream to write to, X is the rtx, and
13277 CODE is the operand print code from the output string. */
13280 output_pic_addr_const (FILE *file
, rtx x
, int code
)
13284 switch (GET_CODE (x
))
13287 gcc_assert (flag_pic
);
13292 if (TARGET_64BIT
|| ! TARGET_MACHO_BRANCH_ISLANDS
)
13293 output_addr_const (file
, x
);
13296 const char *name
= XSTR (x
, 0);
13298 /* Mark the decl as referenced so that cgraph will
13299 output the function. */
13300 if (SYMBOL_REF_DECL (x
))
13301 mark_decl_referenced (SYMBOL_REF_DECL (x
));
13304 if (MACHOPIC_INDIRECT
13305 && machopic_classify_symbol (x
) == MACHOPIC_UNDEFINED_FUNCTION
)
13306 name
= machopic_indirection_name (x
, /*stub_p=*/true);
13308 assemble_name (file
, name
);
13310 if (!TARGET_MACHO
&& !(TARGET_64BIT
&& DEFAULT_ABI
== MS_ABI
)
13311 && code
== 'P' && ! SYMBOL_REF_LOCAL_P (x
))
13312 fputs ("@PLT", file
);
13319 ASM_GENERATE_INTERNAL_LABEL (buf
, "L", CODE_LABEL_NUMBER (x
));
13320 assemble_name (asm_out_file
, buf
);
13324 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (x
));
13328 /* This used to output parentheses around the expression,
13329 but that does not work on the 386 (either ATT or BSD assembler). */
13330 output_pic_addr_const (file
, XEXP (x
, 0), code
);
13334 if (GET_MODE (x
) == VOIDmode
)
13336 /* We can use %d if the number is <32 bits and positive. */
13337 if (CONST_DOUBLE_HIGH (x
) || CONST_DOUBLE_LOW (x
) < 0)
13338 fprintf (file
, "0x%lx%08lx",
13339 (unsigned long) CONST_DOUBLE_HIGH (x
),
13340 (unsigned long) CONST_DOUBLE_LOW (x
));
13342 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, CONST_DOUBLE_LOW (x
));
13345 /* We can't handle floating point constants;
13346 TARGET_PRINT_OPERAND must handle them. */
13347 output_operand_lossage ("floating constant misused");
13351 /* Some assemblers need integer constants to appear first. */
13352 if (CONST_INT_P (XEXP (x
, 0)))
13354 output_pic_addr_const (file
, XEXP (x
, 0), code
);
13356 output_pic_addr_const (file
, XEXP (x
, 1), code
);
13360 gcc_assert (CONST_INT_P (XEXP (x
, 1)));
13361 output_pic_addr_const (file
, XEXP (x
, 1), code
);
13363 output_pic_addr_const (file
, XEXP (x
, 0), code
);
13369 putc (ASSEMBLER_DIALECT
== ASM_INTEL
? '(' : '[', file
);
13370 output_pic_addr_const (file
, XEXP (x
, 0), code
);
13372 output_pic_addr_const (file
, XEXP (x
, 1), code
);
13374 putc (ASSEMBLER_DIALECT
== ASM_INTEL
? ')' : ']', file
);
13378 if (XINT (x
, 1) == UNSPEC_STACK_CHECK
)
13380 bool f
= i386_asm_output_addr_const_extra (file
, x
);
13385 gcc_assert (XVECLEN (x
, 0) == 1);
13386 output_pic_addr_const (file
, XVECEXP (x
, 0, 0), code
);
13387 switch (XINT (x
, 1))
13390 fputs ("@GOT", file
);
13392 case UNSPEC_GOTOFF
:
13393 fputs ("@GOTOFF", file
);
13395 case UNSPEC_PLTOFF
:
13396 fputs ("@PLTOFF", file
);
13399 fputs (ASSEMBLER_DIALECT
== ASM_ATT
?
13400 "(%rip)" : "[rip]", file
);
13402 case UNSPEC_GOTPCREL
:
13403 fputs (ASSEMBLER_DIALECT
== ASM_ATT
?
13404 "@GOTPCREL(%rip)" : "@GOTPCREL[rip]", file
);
13406 case UNSPEC_GOTTPOFF
:
13407 /* FIXME: This might be @TPOFF in Sun ld too. */
13408 fputs ("@gottpoff", file
);
13411 fputs ("@tpoff", file
);
13413 case UNSPEC_NTPOFF
:
13415 fputs ("@tpoff", file
);
13417 fputs ("@ntpoff", file
);
13419 case UNSPEC_DTPOFF
:
13420 fputs ("@dtpoff", file
);
13422 case UNSPEC_GOTNTPOFF
:
13424 fputs (ASSEMBLER_DIALECT
== ASM_ATT
?
13425 "@gottpoff(%rip)": "@gottpoff[rip]", file
);
13427 fputs ("@gotntpoff", file
);
13429 case UNSPEC_INDNTPOFF
:
13430 fputs ("@indntpoff", file
);
13433 case UNSPEC_MACHOPIC_OFFSET
:
13435 machopic_output_function_base_name (file
);
13439 output_operand_lossage ("invalid UNSPEC as operand");
13445 output_operand_lossage ("invalid expression as operand");
13449 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
13450 We need to emit DTP-relative relocations. */
13452 static void ATTRIBUTE_UNUSED
13453 i386_output_dwarf_dtprel (FILE *file
, int size
, rtx x
)
13455 fputs (ASM_LONG
, file
);
13456 output_addr_const (file
, x
);
13457 fputs ("@dtpoff", file
);
13463 fputs (", 0", file
);
13466 gcc_unreachable ();
13470 /* Return true if X is a representation of the PIC register. This copes
13471 with calls from ix86_find_base_term, where the register might have
13472 been replaced by a cselib value. */
13475 ix86_pic_register_p (rtx x
)
13477 if (GET_CODE (x
) == VALUE
&& CSELIB_VAL_PTR (x
))
13478 return (pic_offset_table_rtx
13479 && rtx_equal_for_cselib_p (x
, pic_offset_table_rtx
));
13481 return REG_P (x
) && REGNO (x
) == PIC_OFFSET_TABLE_REGNUM
;
13484 /* Helper function for ix86_delegitimize_address.
13485 Attempt to delegitimize TLS local-exec accesses. */
13488 ix86_delegitimize_tls_address (rtx orig_x
)
13490 rtx x
= orig_x
, unspec
;
13491 struct ix86_address addr
;
13493 if (!TARGET_TLS_DIRECT_SEG_REFS
)
13497 if (GET_CODE (x
) != PLUS
|| GET_MODE (x
) != Pmode
)
13499 if (ix86_decompose_address (x
, &addr
) == 0
13500 || addr
.seg
!= (TARGET_64BIT
? SEG_FS
: SEG_GS
)
13501 || addr
.disp
== NULL_RTX
13502 || GET_CODE (addr
.disp
) != CONST
)
13504 unspec
= XEXP (addr
.disp
, 0);
13505 if (GET_CODE (unspec
) == PLUS
&& CONST_INT_P (XEXP (unspec
, 1)))
13506 unspec
= XEXP (unspec
, 0);
13507 if (GET_CODE (unspec
) != UNSPEC
|| XINT (unspec
, 1) != UNSPEC_NTPOFF
)
13509 x
= XVECEXP (unspec
, 0, 0);
13510 gcc_assert (GET_CODE (x
) == SYMBOL_REF
);
13511 if (unspec
!= XEXP (addr
.disp
, 0))
13512 x
= gen_rtx_PLUS (Pmode
, x
, XEXP (XEXP (addr
.disp
, 0), 1));
13515 rtx idx
= addr
.index
;
13516 if (addr
.scale
!= 1)
13517 idx
= gen_rtx_MULT (Pmode
, idx
, GEN_INT (addr
.scale
));
13518 x
= gen_rtx_PLUS (Pmode
, idx
, x
);
13521 x
= gen_rtx_PLUS (Pmode
, addr
.base
, x
);
13522 if (MEM_P (orig_x
))
13523 x
= replace_equiv_address_nv (orig_x
, x
);
13527 /* In the name of slightly smaller debug output, and to cater to
13528 general assembler lossage, recognize PIC+GOTOFF and turn it back
13529 into a direct symbol reference.
13531 On Darwin, this is necessary to avoid a crash, because Darwin
13532 has a different PIC label for each routine but the DWARF debugging
13533 information is not associated with any particular routine, so it's
13534 necessary to remove references to the PIC label from RTL stored by
13535 the DWARF output code. */
13538 ix86_delegitimize_address (rtx x
)
13540 rtx orig_x
= delegitimize_mem_from_attrs (x
);
13541 /* addend is NULL or some rtx if x is something+GOTOFF where
13542 something doesn't include the PIC register. */
13543 rtx addend
= NULL_RTX
;
13544 /* reg_addend is NULL or a multiple of some register. */
13545 rtx reg_addend
= NULL_RTX
;
13546 /* const_addend is NULL or a const_int. */
13547 rtx const_addend
= NULL_RTX
;
13548 /* This is the result, or NULL. */
13549 rtx result
= NULL_RTX
;
13558 if (GET_CODE (x
) == CONST
13559 && GET_CODE (XEXP (x
, 0)) == PLUS
13560 && GET_MODE (XEXP (x
, 0)) == Pmode
13561 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
13562 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == UNSPEC
13563 && XINT (XEXP (XEXP (x
, 0), 0), 1) == UNSPEC_PCREL
)
13565 rtx x2
= XVECEXP (XEXP (XEXP (x
, 0), 0), 0, 0);
13566 x
= gen_rtx_PLUS (Pmode
, XEXP (XEXP (x
, 0), 1), x2
);
13567 if (MEM_P (orig_x
))
13568 x
= replace_equiv_address_nv (orig_x
, x
);
13571 if (GET_CODE (x
) != CONST
13572 || GET_CODE (XEXP (x
, 0)) != UNSPEC
13573 || (XINT (XEXP (x
, 0), 1) != UNSPEC_GOTPCREL
13574 && XINT (XEXP (x
, 0), 1) != UNSPEC_PCREL
)
13575 || (!MEM_P (orig_x
) && XINT (XEXP (x
, 0), 1) != UNSPEC_PCREL
))
13576 return ix86_delegitimize_tls_address (orig_x
);
13577 x
= XVECEXP (XEXP (x
, 0), 0, 0);
13578 if (GET_MODE (orig_x
) != GET_MODE (x
) && MEM_P (orig_x
))
13580 x
= simplify_gen_subreg (GET_MODE (orig_x
), x
,
13588 if (GET_CODE (x
) != PLUS
13589 || GET_CODE (XEXP (x
, 1)) != CONST
)
13590 return ix86_delegitimize_tls_address (orig_x
);
13592 if (ix86_pic_register_p (XEXP (x
, 0)))
13593 /* %ebx + GOT/GOTOFF */
13595 else if (GET_CODE (XEXP (x
, 0)) == PLUS
)
13597 /* %ebx + %reg * scale + GOT/GOTOFF */
13598 reg_addend
= XEXP (x
, 0);
13599 if (ix86_pic_register_p (XEXP (reg_addend
, 0)))
13600 reg_addend
= XEXP (reg_addend
, 1);
13601 else if (ix86_pic_register_p (XEXP (reg_addend
, 1)))
13602 reg_addend
= XEXP (reg_addend
, 0);
13605 reg_addend
= NULL_RTX
;
13606 addend
= XEXP (x
, 0);
13610 addend
= XEXP (x
, 0);
13612 x
= XEXP (XEXP (x
, 1), 0);
13613 if (GET_CODE (x
) == PLUS
13614 && CONST_INT_P (XEXP (x
, 1)))
13616 const_addend
= XEXP (x
, 1);
13620 if (GET_CODE (x
) == UNSPEC
13621 && ((XINT (x
, 1) == UNSPEC_GOT
&& MEM_P (orig_x
) && !addend
)
13622 || (XINT (x
, 1) == UNSPEC_GOTOFF
&& !MEM_P (orig_x
))))
13623 result
= XVECEXP (x
, 0, 0);
13625 if (TARGET_MACHO
&& darwin_local_data_pic (x
)
13626 && !MEM_P (orig_x
))
13627 result
= XVECEXP (x
, 0, 0);
13630 return ix86_delegitimize_tls_address (orig_x
);
13633 result
= gen_rtx_CONST (Pmode
, gen_rtx_PLUS (Pmode
, result
, const_addend
));
13635 result
= gen_rtx_PLUS (Pmode
, reg_addend
, result
);
13638 /* If the rest of original X doesn't involve the PIC register, add
13639 addend and subtract pic_offset_table_rtx. This can happen e.g.
13641 leal (%ebx, %ecx, 4), %ecx
13643 movl foo@GOTOFF(%ecx), %edx
13644 in which case we return (%ecx - %ebx) + foo. */
13645 if (pic_offset_table_rtx
)
13646 result
= gen_rtx_PLUS (Pmode
, gen_rtx_MINUS (Pmode
, copy_rtx (addend
),
13647 pic_offset_table_rtx
),
13652 if (GET_MODE (orig_x
) != Pmode
&& MEM_P (orig_x
))
13654 result
= simplify_gen_subreg (GET_MODE (orig_x
), result
, Pmode
, 0);
13655 if (result
== NULL_RTX
)
13661 /* If X is a machine specific address (i.e. a symbol or label being
13662 referenced as a displacement from the GOT implemented using an
13663 UNSPEC), then return the base term. Otherwise return X. */
13666 ix86_find_base_term (rtx x
)
13672 if (GET_CODE (x
) != CONST
)
13674 term
= XEXP (x
, 0);
13675 if (GET_CODE (term
) == PLUS
13676 && (CONST_INT_P (XEXP (term
, 1))
13677 || GET_CODE (XEXP (term
, 1)) == CONST_DOUBLE
))
13678 term
= XEXP (term
, 0);
13679 if (GET_CODE (term
) != UNSPEC
13680 || (XINT (term
, 1) != UNSPEC_GOTPCREL
13681 && XINT (term
, 1) != UNSPEC_PCREL
))
13684 return XVECEXP (term
, 0, 0);
13687 return ix86_delegitimize_address (x
);
13691 put_condition_code (enum rtx_code code
, enum machine_mode mode
, bool reverse
,
13692 bool fp
, FILE *file
)
13694 const char *suffix
;
13696 if (mode
== CCFPmode
|| mode
== CCFPUmode
)
13698 code
= ix86_fp_compare_code_to_integer (code
);
13702 code
= reverse_condition (code
);
13753 gcc_assert (mode
== CCmode
|| mode
== CCNOmode
|| mode
== CCGCmode
);
13757 /* ??? Use "nbe" instead of "a" for fcmov lossage on some assemblers.
13758 Those same assemblers have the same but opposite lossage on cmov. */
13759 if (mode
== CCmode
)
13760 suffix
= fp
? "nbe" : "a";
13761 else if (mode
== CCCmode
)
13764 gcc_unreachable ();
13780 gcc_unreachable ();
13784 gcc_assert (mode
== CCmode
|| mode
== CCCmode
);
13801 gcc_unreachable ();
13805 /* ??? As above. */
13806 gcc_assert (mode
== CCmode
|| mode
== CCCmode
);
13807 suffix
= fp
? "nb" : "ae";
13810 gcc_assert (mode
== CCmode
|| mode
== CCGCmode
|| mode
== CCNOmode
);
13814 /* ??? As above. */
13815 if (mode
== CCmode
)
13817 else if (mode
== CCCmode
)
13818 suffix
= fp
? "nb" : "ae";
13820 gcc_unreachable ();
13823 suffix
= fp
? "u" : "p";
13826 suffix
= fp
? "nu" : "np";
13829 gcc_unreachable ();
13831 fputs (suffix
, file
);
13834 /* Print the name of register X to FILE based on its machine mode and number.
13835 If CODE is 'w', pretend the mode is HImode.
13836 If CODE is 'b', pretend the mode is QImode.
13837 If CODE is 'k', pretend the mode is SImode.
13838 If CODE is 'q', pretend the mode is DImode.
13839 If CODE is 'x', pretend the mode is V4SFmode.
13840 If CODE is 't', pretend the mode is V8SFmode.
13841 If CODE is 'h', pretend the reg is the 'high' byte register.
13842 If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op.
13843 If CODE is 'd', duplicate the operand for AVX instruction.
13847 print_reg (rtx x
, int code
, FILE *file
)
13850 unsigned int regno
;
13851 bool duplicated
= code
== 'd' && TARGET_AVX
;
13853 if (ASSEMBLER_DIALECT
== ASM_ATT
)
13858 gcc_assert (TARGET_64BIT
);
13859 fputs ("rip", file
);
13863 regno
= true_regnum (x
);
13864 gcc_assert (regno
!= ARG_POINTER_REGNUM
13865 && regno
!= FRAME_POINTER_REGNUM
13866 && regno
!= FLAGS_REG
13867 && regno
!= FPSR_REG
13868 && regno
!= FPCR_REG
);
13870 if (code
== 'w' || MMX_REG_P (x
))
13872 else if (code
== 'b')
13874 else if (code
== 'k')
13876 else if (code
== 'q')
13878 else if (code
== 'y')
13880 else if (code
== 'h')
13882 else if (code
== 'x')
13884 else if (code
== 't')
13887 code
= GET_MODE_SIZE (GET_MODE (x
));
13889 /* Irritatingly, AMD extended registers use different naming convention
13890 from the normal registers: "r%d[bwd]" */
13891 if (REX_INT_REGNO_P (regno
))
13893 gcc_assert (TARGET_64BIT
);
13895 fprint_ul (file
, regno
- FIRST_REX_INT_REG
+ 8);
13899 error ("extended registers have no high halves");
13914 error ("unsupported operand size for extended register");
13924 if (STACK_TOP_P (x
))
13933 if (! ANY_FP_REG_P (x
))
13934 putc (code
== 8 && TARGET_64BIT
? 'r' : 'e', file
);
13939 reg
= hi_reg_name
[regno
];
13942 if (regno
>= ARRAY_SIZE (qi_reg_name
))
13944 reg
= qi_reg_name
[regno
];
13947 if (regno
>= ARRAY_SIZE (qi_high_reg_name
))
13949 reg
= qi_high_reg_name
[regno
];
13954 gcc_assert (!duplicated
);
13956 fputs (hi_reg_name
[regno
] + 1, file
);
13961 gcc_unreachable ();
13967 if (ASSEMBLER_DIALECT
== ASM_ATT
)
13968 fprintf (file
, ", %%%s", reg
);
13970 fprintf (file
, ", %s", reg
);
13974 /* Locate some local-dynamic symbol still in use by this function
13975 so that we can print its name in some tls_local_dynamic_base
13979 get_some_local_dynamic_name_1 (rtx
*px
, void *data ATTRIBUTE_UNUSED
)
13983 if (GET_CODE (x
) == SYMBOL_REF
13984 && SYMBOL_REF_TLS_MODEL (x
) == TLS_MODEL_LOCAL_DYNAMIC
)
13986 cfun
->machine
->some_ld_name
= XSTR (x
, 0);
13993 static const char *
13994 get_some_local_dynamic_name (void)
13998 if (cfun
->machine
->some_ld_name
)
13999 return cfun
->machine
->some_ld_name
;
14001 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
14002 if (NONDEBUG_INSN_P (insn
)
14003 && for_each_rtx (&PATTERN (insn
), get_some_local_dynamic_name_1
, 0))
14004 return cfun
->machine
->some_ld_name
;
14009 /* Meaning of CODE:
14010 L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
14011 C -- print opcode suffix for set/cmov insn.
14012 c -- like C, but print reversed condition
14013 F,f -- likewise, but for floating-point.
14014 O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
14016 R -- print the prefix for register names.
14017 z -- print the opcode suffix for the size of the current operand.
14018 Z -- likewise, with special suffixes for x87 instructions.
14019 * -- print a star (in certain assembler syntax)
14020 A -- print an absolute memory reference.
14021 E -- print address with DImode register names if TARGET_64BIT.
14022 w -- print the operand as if it's a "word" (HImode) even if it isn't.
14023 s -- print a shift double count, followed by the assemblers argument
14025 b -- print the QImode name of the register for the indicated operand.
14026 %b0 would print %al if operands[0] is reg 0.
14027 w -- likewise, print the HImode name of the register.
14028 k -- likewise, print the SImode name of the register.
14029 q -- likewise, print the DImode name of the register.
14030 x -- likewise, print the V4SFmode name of the register.
14031 t -- likewise, print the V8SFmode name of the register.
14032 h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
14033 y -- print "st(0)" instead of "st" as a register.
14034 d -- print duplicated register operand for AVX instruction.
14035 D -- print condition for SSE cmp instruction.
14036 P -- if PIC, print an @PLT suffix.
14037 p -- print raw symbol name.
14038 X -- don't print any sort of PIC '@' suffix for a symbol.
14039 & -- print some in-use local-dynamic symbol name.
14040 H -- print a memory address offset by 8; used for sse high-parts
14041 Y -- print condition for XOP pcom* instruction.
14042 + -- print a branch hint as 'cs' or 'ds' prefix
14043 ; -- print a semicolon (after prefixes due to bug in older gas).
14044 ~ -- print "i" if TARGET_AVX2, "f" otherwise.
14045 @ -- print a segment register of thread base pointer load
14046 ^ -- print addr32 prefix if TARGET_64BIT and Pmode != word_mode
14050 ix86_print_operand (FILE *file
, rtx x
, int code
)
14057 switch (ASSEMBLER_DIALECT
)
14064 /* Intel syntax. For absolute addresses, registers should not
14065 be surrounded by braces. */
14069 ix86_print_operand (file
, x
, 0);
14076 gcc_unreachable ();
14079 ix86_print_operand (file
, x
, 0);
14083 /* Wrap address in an UNSPEC to declare special handling. */
14085 x
= gen_rtx_UNSPEC (DImode
, gen_rtvec (1, x
), UNSPEC_LEA_ADDR
);
14087 output_address (x
);
14091 if (ASSEMBLER_DIALECT
== ASM_ATT
)
14096 if (ASSEMBLER_DIALECT
== ASM_ATT
)
14101 if (ASSEMBLER_DIALECT
== ASM_ATT
)
14106 if (ASSEMBLER_DIALECT
== ASM_ATT
)
14111 if (ASSEMBLER_DIALECT
== ASM_ATT
)
14116 if (ASSEMBLER_DIALECT
== ASM_ATT
)
14121 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
14122 if (ASSEMBLER_DIALECT
!= ASM_ATT
)
14125 switch (GET_MODE_SIZE (GET_MODE (x
)))
14140 output_operand_lossage
14141 ("invalid operand size for operand code 'O'");
14150 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_INT
)
14152 /* Opcodes don't get size suffixes if using Intel opcodes. */
14153 if (ASSEMBLER_DIALECT
== ASM_INTEL
)
14156 switch (GET_MODE_SIZE (GET_MODE (x
)))
14175 output_operand_lossage
14176 ("invalid operand size for operand code 'z'");
14181 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_FLOAT
)
14183 (0, "non-integer operand used with operand code 'z'");
14187 /* 387 opcodes don't get size suffixes if using Intel opcodes. */
14188 if (ASSEMBLER_DIALECT
== ASM_INTEL
)
14191 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_INT
)
14193 switch (GET_MODE_SIZE (GET_MODE (x
)))
14196 #ifdef HAVE_AS_IX86_FILDS
14206 #ifdef HAVE_AS_IX86_FILDQ
14209 fputs ("ll", file
);
14217 else if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_FLOAT
)
14219 /* 387 opcodes don't get size suffixes
14220 if the operands are registers. */
14221 if (STACK_REG_P (x
))
14224 switch (GET_MODE_SIZE (GET_MODE (x
)))
14245 output_operand_lossage
14246 ("invalid operand type used with operand code 'Z'");
14250 output_operand_lossage
14251 ("invalid operand size for operand code 'Z'");
14269 if (CONST_INT_P (x
) || ! SHIFT_DOUBLE_OMITS_COUNT
)
14271 ix86_print_operand (file
, x
, 0);
14272 fputs (", ", file
);
14277 switch (GET_CODE (x
))
14280 fputs ("neq", file
);
14283 fputs ("eq", file
);
14287 fputs (INTEGRAL_MODE_P (GET_MODE (x
)) ? "ge" : "unlt", file
);
14291 fputs (INTEGRAL_MODE_P (GET_MODE (x
)) ? "gt" : "unle", file
);
14295 fputs ("le", file
);
14299 fputs ("lt", file
);
14302 fputs ("unord", file
);
14305 fputs ("ord", file
);
14308 fputs ("ueq", file
);
14311 fputs ("nlt", file
);
14314 fputs ("nle", file
);
14317 fputs ("ule", file
);
14320 fputs ("ult", file
);
14323 fputs ("une", file
);
14326 output_operand_lossage ("operand is not a condition code, "
14327 "invalid operand code 'Y'");
14333 /* Little bit of braindamage here. The SSE compare instructions
14334 does use completely different names for the comparisons that the
14335 fp conditional moves. */
14336 switch (GET_CODE (x
))
14341 fputs ("eq_us", file
);
14345 fputs ("eq", file
);
14350 fputs ("nge", file
);
14354 fputs ("lt", file
);
14359 fputs ("ngt", file
);
14363 fputs ("le", file
);
14366 fputs ("unord", file
);
14371 fputs ("neq_oq", file
);
14375 fputs ("neq", file
);
14380 fputs ("ge", file
);
14384 fputs ("nlt", file
);
14389 fputs ("gt", file
);
14393 fputs ("nle", file
);
14396 fputs ("ord", file
);
14399 output_operand_lossage ("operand is not a condition code, "
14400 "invalid operand code 'D'");
14407 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
14408 if (ASSEMBLER_DIALECT
== ASM_ATT
)
14414 if (!COMPARISON_P (x
))
14416 output_operand_lossage ("operand is not a condition code, "
14417 "invalid operand code '%c'", code
);
14420 put_condition_code (GET_CODE (x
), GET_MODE (XEXP (x
, 0)),
14421 code
== 'c' || code
== 'f',
14422 code
== 'F' || code
== 'f',
14427 if (!offsettable_memref_p (x
))
14429 output_operand_lossage ("operand is not an offsettable memory "
14430 "reference, invalid operand code 'H'");
14433 /* It doesn't actually matter what mode we use here, as we're
14434 only going to use this for printing. */
14435 x
= adjust_address_nv (x
, DImode
, 8);
14439 gcc_assert (CONST_INT_P (x
));
14441 if (INTVAL (x
) & IX86_HLE_ACQUIRE
)
14442 #ifdef HAVE_AS_IX86_HLE
14443 fputs ("xacquire ", file
);
14445 fputs ("\n" ASM_BYTE
"0xf2\n\t", file
);
14447 else if (INTVAL (x
) & IX86_HLE_RELEASE
)
14448 #ifdef HAVE_AS_IX86_HLE
14449 fputs ("xrelease ", file
);
14451 fputs ("\n" ASM_BYTE
"0xf3\n\t", file
);
14453 /* We do not want to print value of the operand. */
14457 if (ASSEMBLER_DIALECT
== ASM_ATT
)
14463 const char *name
= get_some_local_dynamic_name ();
14465 output_operand_lossage ("'%%&' used without any "
14466 "local dynamic TLS references");
14468 assemble_name (file
, name
);
14477 || optimize_function_for_size_p (cfun
)
14478 || !TARGET_BRANCH_PREDICTION_HINTS
)
14481 x
= find_reg_note (current_output_insn
, REG_BR_PROB
, 0);
14484 int pred_val
= INTVAL (XEXP (x
, 0));
14486 if (pred_val
< REG_BR_PROB_BASE
* 45 / 100
14487 || pred_val
> REG_BR_PROB_BASE
* 55 / 100)
14489 bool taken
= pred_val
> REG_BR_PROB_BASE
/ 2;
14491 = final_forward_branch_p (current_output_insn
) == 0;
14493 /* Emit hints only in the case default branch prediction
14494 heuristics would fail. */
14495 if (taken
!= cputaken
)
14497 /* We use 3e (DS) prefix for taken branches and
14498 2e (CS) prefix for not taken branches. */
14500 fputs ("ds ; ", file
);
14502 fputs ("cs ; ", file
);
14510 #ifndef HAVE_AS_IX86_REP_LOCK_PREFIX
14516 if (ASSEMBLER_DIALECT
== ASM_ATT
)
14519 /* The kernel uses a different segment register for performance
14520 reasons; a system call would not have to trash the userspace
14521 segment register, which would be expensive. */
14522 if (TARGET_64BIT
&& ix86_cmodel
!= CM_KERNEL
)
14523 fputs ("fs", file
);
14525 fputs ("gs", file
);
14529 putc (TARGET_AVX2
? 'i' : 'f', file
);
14533 if (TARGET_64BIT
&& Pmode
!= word_mode
)
14534 fputs ("addr32 ", file
);
14538 output_operand_lossage ("invalid operand code '%c'", code
);
14543 print_reg (x
, code
, file
);
14545 else if (MEM_P (x
))
14547 /* No `byte ptr' prefix for call instructions or BLKmode operands. */
14548 if (ASSEMBLER_DIALECT
== ASM_INTEL
&& code
!= 'X' && code
!= 'P'
14549 && GET_MODE (x
) != BLKmode
)
14552 switch (GET_MODE_SIZE (GET_MODE (x
)))
14554 case 1: size
= "BYTE"; break;
14555 case 2: size
= "WORD"; break;
14556 case 4: size
= "DWORD"; break;
14557 case 8: size
= "QWORD"; break;
14558 case 12: size
= "TBYTE"; break;
14560 if (GET_MODE (x
) == XFmode
)
14565 case 32: size
= "YMMWORD"; break;
14567 gcc_unreachable ();
14570 /* Check for explicit size override (codes 'b', 'w', 'k',
14574 else if (code
== 'w')
14576 else if (code
== 'k')
14578 else if (code
== 'q')
14580 else if (code
== 'x')
14583 fputs (size
, file
);
14584 fputs (" PTR ", file
);
14588 /* Avoid (%rip) for call operands. */
14589 if (CONSTANT_ADDRESS_P (x
) && code
== 'P'
14590 && !CONST_INT_P (x
))
14591 output_addr_const (file
, x
);
14592 else if (this_is_asm_operands
&& ! address_operand (x
, VOIDmode
))
14593 output_operand_lossage ("invalid constraints for operand");
14595 output_address (x
);
14598 else if (GET_CODE (x
) == CONST_DOUBLE
&& GET_MODE (x
) == SFmode
)
14603 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
14604 REAL_VALUE_TO_TARGET_SINGLE (r
, l
);
14606 if (ASSEMBLER_DIALECT
== ASM_ATT
)
14608 /* Sign extend 32bit SFmode immediate to 8 bytes. */
14610 fprintf (file
, "0x%08llx", (unsigned long long) (int) l
);
14612 fprintf (file
, "0x%08x", (unsigned int) l
);
14615 else if (GET_CODE (x
) == CONST_DOUBLE
&& GET_MODE (x
) == DFmode
)
14620 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
14621 REAL_VALUE_TO_TARGET_DOUBLE (r
, l
);
14623 if (ASSEMBLER_DIALECT
== ASM_ATT
)
14625 fprintf (file
, "0x%lx%08lx", l
[1] & 0xffffffff, l
[0] & 0xffffffff);
14628 /* These float cases don't actually occur as immediate operands. */
14629 else if (GET_CODE (x
) == CONST_DOUBLE
&& GET_MODE (x
) == XFmode
)
14633 real_to_decimal (dstr
, CONST_DOUBLE_REAL_VALUE (x
), sizeof (dstr
), 0, 1);
14634 fputs (dstr
, file
);
14639 /* We have patterns that allow zero sets of memory, for instance.
14640 In 64-bit mode, we should probably support all 8-byte vectors,
14641 since we can in fact encode that into an immediate. */
14642 if (GET_CODE (x
) == CONST_VECTOR
)
14644 gcc_assert (x
== CONST0_RTX (GET_MODE (x
)));
14648 if (code
!= 'P' && code
!= 'p')
14650 if (CONST_INT_P (x
) || GET_CODE (x
) == CONST_DOUBLE
)
14652 if (ASSEMBLER_DIALECT
== ASM_ATT
)
14655 else if (GET_CODE (x
) == CONST
|| GET_CODE (x
) == SYMBOL_REF
14656 || GET_CODE (x
) == LABEL_REF
)
14658 if (ASSEMBLER_DIALECT
== ASM_ATT
)
14661 fputs ("OFFSET FLAT:", file
);
14664 if (CONST_INT_P (x
))
14665 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (x
));
14666 else if (flag_pic
|| MACHOPIC_INDIRECT
)
14667 output_pic_addr_const (file
, x
, code
);
14669 output_addr_const (file
, x
);
14674 ix86_print_operand_punct_valid_p (unsigned char code
)
14676 return (code
== '@' || code
== '*' || code
== '+' || code
== '&'
14677 || code
== ';' || code
== '~' || code
== '^');
14680 /* Print a memory operand whose address is ADDR. */
14683 ix86_print_operand_address (FILE *file
, rtx addr
)
14685 struct ix86_address parts
;
14686 rtx base
, index
, disp
;
14692 if (GET_CODE (addr
) == UNSPEC
&& XINT (addr
, 1) == UNSPEC_VSIBADDR
)
14694 ok
= ix86_decompose_address (XVECEXP (addr
, 0, 0), &parts
);
14695 gcc_assert (parts
.index
== NULL_RTX
);
14696 parts
.index
= XVECEXP (addr
, 0, 1);
14697 parts
.scale
= INTVAL (XVECEXP (addr
, 0, 2));
14698 addr
= XVECEXP (addr
, 0, 0);
14701 else if (GET_CODE (addr
) == UNSPEC
&& XINT (addr
, 1) == UNSPEC_LEA_ADDR
)
14703 gcc_assert (TARGET_64BIT
);
14704 ok
= ix86_decompose_address (XVECEXP (addr
, 0, 0), &parts
);
14708 ok
= ix86_decompose_address (addr
, &parts
);
14713 index
= parts
.index
;
14715 scale
= parts
.scale
;
14723 if (ASSEMBLER_DIALECT
== ASM_ATT
)
14725 fputs ((parts
.seg
== SEG_FS
? "fs:" : "gs:"), file
);
14728 gcc_unreachable ();
14731 /* Use one byte shorter RIP relative addressing for 64bit mode. */
14732 if (TARGET_64BIT
&& !base
&& !index
)
14736 if (GET_CODE (disp
) == CONST
14737 && GET_CODE (XEXP (disp
, 0)) == PLUS
14738 && CONST_INT_P (XEXP (XEXP (disp
, 0), 1)))
14739 symbol
= XEXP (XEXP (disp
, 0), 0);
14741 if (GET_CODE (symbol
) == LABEL_REF
14742 || (GET_CODE (symbol
) == SYMBOL_REF
14743 && SYMBOL_REF_TLS_MODEL (symbol
) == 0))
14746 if (!base
&& !index
)
14748 /* Displacement only requires special attention. */
14750 if (CONST_INT_P (disp
))
14752 if (ASSEMBLER_DIALECT
== ASM_INTEL
&& parts
.seg
== SEG_DEFAULT
)
14753 fputs ("ds:", file
);
14754 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (disp
));
14757 output_pic_addr_const (file
, disp
, 0);
14759 output_addr_const (file
, disp
);
14763 /* Print SImode register names to force addr32 prefix. */
14764 if (SImode_address_operand (addr
, VOIDmode
))
14766 #ifdef ENABLE_CHECKING
14767 gcc_assert (TARGET_64BIT
);
14768 switch (GET_CODE (addr
))
14771 gcc_assert (GET_MODE (addr
) == SImode
);
14772 gcc_assert (GET_MODE (SUBREG_REG (addr
)) == DImode
);
14776 gcc_assert (GET_MODE (addr
) == DImode
);
14779 gcc_unreachable ();
14782 gcc_assert (!code
);
14788 && CONST_INT_P (disp
)
14789 && INTVAL (disp
) < -16*1024*1024)
14791 /* X32 runs in 64-bit mode, where displacement, DISP, in
14792 address DISP(%r64), is encoded as 32-bit immediate sign-
14793 extended from 32-bit to 64-bit. For -0x40000300(%r64),
14794 address is %r64 + 0xffffffffbffffd00. When %r64 <
14795 0x40000300, like 0x37ffe064, address is 0xfffffffff7ffdd64,
14796 which is invalid for x32. The correct address is %r64
14797 - 0x40000300 == 0xf7ffdd64. To properly encode
14798 -0x40000300(%r64) for x32, we zero-extend negative
14799 displacement by forcing addr32 prefix which truncates
14800 0xfffffffff7ffdd64 to 0xf7ffdd64. In theory, we should
14801 zero-extend all negative displacements, including -1(%rsp).
14802 However, for small negative displacements, sign-extension
14803 won't cause overflow. We only zero-extend negative
14804 displacements if they < -16*1024*1024, which is also used
14805 to check legitimate address displacements for PIC. */
14809 if (ASSEMBLER_DIALECT
== ASM_ATT
)
14814 output_pic_addr_const (file
, disp
, 0);
14815 else if (GET_CODE (disp
) == LABEL_REF
)
14816 output_asm_label (disp
);
14818 output_addr_const (file
, disp
);
14823 print_reg (base
, code
, file
);
14827 print_reg (index
, vsib
? 0 : code
, file
);
14828 if (scale
!= 1 || vsib
)
14829 fprintf (file
, ",%d", scale
);
14835 rtx offset
= NULL_RTX
;
14839 /* Pull out the offset of a symbol; print any symbol itself. */
14840 if (GET_CODE (disp
) == CONST
14841 && GET_CODE (XEXP (disp
, 0)) == PLUS
14842 && CONST_INT_P (XEXP (XEXP (disp
, 0), 1)))
14844 offset
= XEXP (XEXP (disp
, 0), 1);
14845 disp
= gen_rtx_CONST (VOIDmode
,
14846 XEXP (XEXP (disp
, 0), 0));
14850 output_pic_addr_const (file
, disp
, 0);
14851 else if (GET_CODE (disp
) == LABEL_REF
)
14852 output_asm_label (disp
);
14853 else if (CONST_INT_P (disp
))
14856 output_addr_const (file
, disp
);
14862 print_reg (base
, code
, file
);
14865 if (INTVAL (offset
) >= 0)
14867 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (offset
));
14871 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (offset
));
14878 print_reg (index
, vsib
? 0 : code
, file
);
14879 if (scale
!= 1 || vsib
)
14880 fprintf (file
, "*%d", scale
);
14887 /* Implementation of TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA. */
14890 i386_asm_output_addr_const_extra (FILE *file
, rtx x
)
14894 if (GET_CODE (x
) != UNSPEC
)
14897 op
= XVECEXP (x
, 0, 0);
14898 switch (XINT (x
, 1))
14900 case UNSPEC_GOTTPOFF
:
14901 output_addr_const (file
, op
);
14902 /* FIXME: This might be @TPOFF in Sun ld. */
14903 fputs ("@gottpoff", file
);
14906 output_addr_const (file
, op
);
14907 fputs ("@tpoff", file
);
14909 case UNSPEC_NTPOFF
:
14910 output_addr_const (file
, op
);
14912 fputs ("@tpoff", file
);
14914 fputs ("@ntpoff", file
);
14916 case UNSPEC_DTPOFF
:
14917 output_addr_const (file
, op
);
14918 fputs ("@dtpoff", file
);
14920 case UNSPEC_GOTNTPOFF
:
14921 output_addr_const (file
, op
);
14923 fputs (ASSEMBLER_DIALECT
== ASM_ATT
?
14924 "@gottpoff(%rip)" : "@gottpoff[rip]", file
);
14926 fputs ("@gotntpoff", file
);
14928 case UNSPEC_INDNTPOFF
:
14929 output_addr_const (file
, op
);
14930 fputs ("@indntpoff", file
);
14933 case UNSPEC_MACHOPIC_OFFSET
:
14934 output_addr_const (file
, op
);
14936 machopic_output_function_base_name (file
);
14940 case UNSPEC_STACK_CHECK
:
14944 gcc_assert (flag_split_stack
);
14946 #ifdef TARGET_THREAD_SPLIT_STACK_OFFSET
14947 offset
= TARGET_THREAD_SPLIT_STACK_OFFSET
;
14949 gcc_unreachable ();
14952 fprintf (file
, "%s:%d", TARGET_64BIT
? "%fs" : "%gs", offset
);
14963 /* Split one or more double-mode RTL references into pairs of half-mode
14964 references. The RTL can be REG, offsettable MEM, integer constant, or
14965 CONST_DOUBLE. "operands" is a pointer to an array of double-mode RTLs to
14966 split and "num" is its length. lo_half and hi_half are output arrays
14967 that parallel "operands". */
14970 split_double_mode (enum machine_mode mode
, rtx operands
[],
14971 int num
, rtx lo_half
[], rtx hi_half
[])
14973 enum machine_mode half_mode
;
14979 half_mode
= DImode
;
14982 half_mode
= SImode
;
14985 gcc_unreachable ();
14988 byte
= GET_MODE_SIZE (half_mode
);
14992 rtx op
= operands
[num
];
14994 /* simplify_subreg refuse to split volatile memory addresses,
14995 but we still have to handle it. */
14998 lo_half
[num
] = adjust_address (op
, half_mode
, 0);
14999 hi_half
[num
] = adjust_address (op
, half_mode
, byte
);
15003 lo_half
[num
] = simplify_gen_subreg (half_mode
, op
,
15004 GET_MODE (op
) == VOIDmode
15005 ? mode
: GET_MODE (op
), 0);
15006 hi_half
[num
] = simplify_gen_subreg (half_mode
, op
,
15007 GET_MODE (op
) == VOIDmode
15008 ? mode
: GET_MODE (op
), byte
);
15013 /* Output code to perform a 387 binary operation in INSN, one of PLUS,
15014 MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
15015 is the expression of the binary operation. The output may either be
15016 emitted here, or returned to the caller, like all output_* functions.
15018 There is no guarantee that the operands are the same mode, as they
15019 might be within FLOAT or FLOAT_EXTEND expressions. */
15021 #ifndef SYSV386_COMPAT
15022 /* Set to 1 for compatibility with brain-damaged assemblers. No-one
15023 wants to fix the assemblers because that causes incompatibility
15024 with gcc. No-one wants to fix gcc because that causes
15025 incompatibility with assemblers... You can use the option of
15026 -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
15027 #define SYSV386_COMPAT 1
15031 output_387_binary_op (rtx insn
, rtx
*operands
)
15033 static char buf
[40];
15036 int is_sse
= SSE_REG_P (operands
[0]) || SSE_REG_P (operands
[1]) || SSE_REG_P (operands
[2]);
15038 #ifdef ENABLE_CHECKING
15039 /* Even if we do not want to check the inputs, this documents input
15040 constraints. Which helps in understanding the following code. */
15041 if (STACK_REG_P (operands
[0])
15042 && ((REG_P (operands
[1])
15043 && REGNO (operands
[0]) == REGNO (operands
[1])
15044 && (STACK_REG_P (operands
[2]) || MEM_P (operands
[2])))
15045 || (REG_P (operands
[2])
15046 && REGNO (operands
[0]) == REGNO (operands
[2])
15047 && (STACK_REG_P (operands
[1]) || MEM_P (operands
[1]))))
15048 && (STACK_TOP_P (operands
[1]) || STACK_TOP_P (operands
[2])))
15051 gcc_assert (is_sse
);
15054 switch (GET_CODE (operands
[3]))
15057 if (GET_MODE_CLASS (GET_MODE (operands
[1])) == MODE_INT
15058 || GET_MODE_CLASS (GET_MODE (operands
[2])) == MODE_INT
)
15066 if (GET_MODE_CLASS (GET_MODE (operands
[1])) == MODE_INT
15067 || GET_MODE_CLASS (GET_MODE (operands
[2])) == MODE_INT
)
15075 if (GET_MODE_CLASS (GET_MODE (operands
[1])) == MODE_INT
15076 || GET_MODE_CLASS (GET_MODE (operands
[2])) == MODE_INT
)
15084 if (GET_MODE_CLASS (GET_MODE (operands
[1])) == MODE_INT
15085 || GET_MODE_CLASS (GET_MODE (operands
[2])) == MODE_INT
)
15093 gcc_unreachable ();
15100 strcpy (buf
, ssep
);
15101 if (GET_MODE (operands
[0]) == SFmode
)
15102 strcat (buf
, "ss\t{%2, %1, %0|%0, %1, %2}");
15104 strcat (buf
, "sd\t{%2, %1, %0|%0, %1, %2}");
15108 strcpy (buf
, ssep
+ 1);
15109 if (GET_MODE (operands
[0]) == SFmode
)
15110 strcat (buf
, "ss\t{%2, %0|%0, %2}");
15112 strcat (buf
, "sd\t{%2, %0|%0, %2}");
15118 switch (GET_CODE (operands
[3]))
15122 if (REG_P (operands
[2]) && REGNO (operands
[0]) == REGNO (operands
[2]))
15124 rtx temp
= operands
[2];
15125 operands
[2] = operands
[1];
15126 operands
[1] = temp
;
15129 /* know operands[0] == operands[1]. */
15131 if (MEM_P (operands
[2]))
15137 if (find_regno_note (insn
, REG_DEAD
, REGNO (operands
[2])))
15139 if (STACK_TOP_P (operands
[0]))
15140 /* How is it that we are storing to a dead operand[2]?
15141 Well, presumably operands[1] is dead too. We can't
15142 store the result to st(0) as st(0) gets popped on this
15143 instruction. Instead store to operands[2] (which I
15144 think has to be st(1)). st(1) will be popped later.
15145 gcc <= 2.8.1 didn't have this check and generated
15146 assembly code that the Unixware assembler rejected. */
15147 p
= "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
15149 p
= "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
15153 if (STACK_TOP_P (operands
[0]))
15154 p
= "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
15156 p
= "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
15161 if (MEM_P (operands
[1]))
15167 if (MEM_P (operands
[2]))
15173 if (find_regno_note (insn
, REG_DEAD
, REGNO (operands
[2])))
15176 /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
15177 derived assemblers, confusingly reverse the direction of
15178 the operation for fsub{r} and fdiv{r} when the
15179 destination register is not st(0). The Intel assembler
15180 doesn't have this brain damage. Read !SYSV386_COMPAT to
15181 figure out what the hardware really does. */
15182 if (STACK_TOP_P (operands
[0]))
15183 p
= "{p\t%0, %2|rp\t%2, %0}";
15185 p
= "{rp\t%2, %0|p\t%0, %2}";
15187 if (STACK_TOP_P (operands
[0]))
15188 /* As above for fmul/fadd, we can't store to st(0). */
15189 p
= "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
15191 p
= "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
15196 if (find_regno_note (insn
, REG_DEAD
, REGNO (operands
[1])))
15199 if (STACK_TOP_P (operands
[0]))
15200 p
= "{rp\t%0, %1|p\t%1, %0}";
15202 p
= "{p\t%1, %0|rp\t%0, %1}";
15204 if (STACK_TOP_P (operands
[0]))
15205 p
= "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
15207 p
= "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
15212 if (STACK_TOP_P (operands
[0]))
15214 if (STACK_TOP_P (operands
[1]))
15215 p
= "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
15217 p
= "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
15220 else if (STACK_TOP_P (operands
[1]))
15223 p
= "{\t%1, %0|r\t%0, %1}";
15225 p
= "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
15231 p
= "{r\t%2, %0|\t%0, %2}";
15233 p
= "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
15239 gcc_unreachable ();
15246 /* Check if a 256bit AVX register is referenced inside of EXP. */
15249 ix86_check_avx256_register (rtx
*pexp
, void *data ATTRIBUTE_UNUSED
)
15253 if (GET_CODE (exp
) == SUBREG
)
15254 exp
= SUBREG_REG (exp
);
15257 && VALID_AVX256_REG_OR_OI_MODE (GET_MODE (exp
)))
15263 /* Return needed mode for entity in optimize_mode_switching pass. */
15266 ix86_avx_u128_mode_needed (rtx insn
)
15272 /* Needed mode is set to AVX_U128_CLEAN if there are
15273 no 256bit modes used in function arguments. */
15274 for (link
= CALL_INSN_FUNCTION_USAGE (insn
);
15276 link
= XEXP (link
, 1))
15278 if (GET_CODE (XEXP (link
, 0)) == USE
)
15280 rtx arg
= XEXP (XEXP (link
, 0), 0);
15282 if (ix86_check_avx256_register (&arg
, NULL
))
15283 return AVX_U128_ANY
;
15287 return AVX_U128_CLEAN
;
15290 /* Require DIRTY mode if a 256bit AVX register is referenced. Hardware
15291 changes state only when a 256bit register is written to, but we need
15292 to prevent the compiler from moving optimal insertion point above
15293 eventual read from 256bit register. */
15294 if (for_each_rtx (&PATTERN (insn
), ix86_check_avx256_register
, NULL
))
15295 return AVX_U128_DIRTY
;
15297 return AVX_U128_ANY
;
15300 /* Return mode that i387 must be switched into
15301 prior to the execution of insn. */
15304 ix86_i387_mode_needed (int entity
, rtx insn
)
15306 enum attr_i387_cw mode
;
15308 /* The mode UNINITIALIZED is used to store control word after a
15309 function call or ASM pattern. The mode ANY specify that function
15310 has no requirements on the control word and make no changes in the
15311 bits we are interested in. */
15314 || (NONJUMP_INSN_P (insn
)
15315 && (asm_noperands (PATTERN (insn
)) >= 0
15316 || GET_CODE (PATTERN (insn
)) == ASM_INPUT
)))
15317 return I387_CW_UNINITIALIZED
;
15319 if (recog_memoized (insn
) < 0)
15320 return I387_CW_ANY
;
15322 mode
= get_attr_i387_cw (insn
);
15327 if (mode
== I387_CW_TRUNC
)
15332 if (mode
== I387_CW_FLOOR
)
15337 if (mode
== I387_CW_CEIL
)
15342 if (mode
== I387_CW_MASK_PM
)
15347 gcc_unreachable ();
15350 return I387_CW_ANY
;
15353 /* Return mode that entity must be switched into
15354 prior to the execution of insn. */
15357 ix86_mode_needed (int entity
, rtx insn
)
15362 return ix86_avx_u128_mode_needed (insn
);
15367 return ix86_i387_mode_needed (entity
, insn
);
15369 gcc_unreachable ();
15374 /* Check if a 256bit AVX register is referenced in stores. */
15377 ix86_check_avx256_stores (rtx dest
, const_rtx set ATTRIBUTE_UNUSED
, void *data
)
15379 if (ix86_check_avx256_register (&dest
, NULL
))
15381 bool *used
= (bool *) data
;
15386 /* Calculate mode of upper 128bit AVX registers after the insn. */
15389 ix86_avx_u128_mode_after (int mode
, rtx insn
)
15391 rtx pat
= PATTERN (insn
);
15393 if (vzeroupper_operation (pat
, VOIDmode
)
15394 || vzeroall_operation (pat
, VOIDmode
))
15395 return AVX_U128_CLEAN
;
15397 /* We know that state is clean after CALL insn if there are no
15398 256bit registers used in the function return register. */
15401 bool avx_reg256_found
= false;
15402 note_stores (pat
, ix86_check_avx256_stores
, &avx_reg256_found
);
15403 if (!avx_reg256_found
)
15404 return AVX_U128_CLEAN
;
15407 /* Otherwise, return current mode. Remember that if insn
15408 references AVX 256bit registers, the mode was already changed
15409 to DIRTY from MODE_NEEDED. */
15413 /* Return the mode that an insn results in. */
15416 ix86_mode_after (int entity
, int mode
, rtx insn
)
15421 return ix86_avx_u128_mode_after (mode
, insn
);
15428 gcc_unreachable ();
15433 ix86_avx_u128_mode_entry (void)
15437 /* Entry mode is set to AVX_U128_DIRTY if there are
15438 256bit modes used in function arguments. */
15439 for (arg
= DECL_ARGUMENTS (current_function_decl
); arg
;
15440 arg
= TREE_CHAIN (arg
))
15442 rtx incoming
= DECL_INCOMING_RTL (arg
);
15444 if (incoming
&& ix86_check_avx256_register (&incoming
, NULL
))
15445 return AVX_U128_DIRTY
;
15448 return AVX_U128_CLEAN
;
15451 /* Return a mode that ENTITY is assumed to be
15452 switched to at function entry. */
15455 ix86_mode_entry (int entity
)
15460 return ix86_avx_u128_mode_entry ();
15465 return I387_CW_ANY
;
15467 gcc_unreachable ();
15472 ix86_avx_u128_mode_exit (void)
15474 rtx reg
= crtl
->return_rtx
;
15476 /* Exit mode is set to AVX_U128_DIRTY if there are
15477 256bit modes used in the function return register. */
15478 if (reg
&& ix86_check_avx256_register (®
, NULL
))
15479 return AVX_U128_DIRTY
;
15481 return AVX_U128_CLEAN
;
15484 /* Return a mode that ENTITY is assumed to be
15485 switched to at function exit. */
15488 ix86_mode_exit (int entity
)
15493 return ix86_avx_u128_mode_exit ();
15498 return I387_CW_ANY
;
15500 gcc_unreachable ();
15504 /* Output code to initialize control word copies used by trunc?f?i and
15505 rounding patterns. CURRENT_MODE is set to current control word,
15506 while NEW_MODE is set to new control word. */
15509 emit_i387_cw_initialization (int mode
)
15511 rtx stored_mode
= assign_386_stack_local (HImode
, SLOT_CW_STORED
);
15514 enum ix86_stack_slot slot
;
15516 rtx reg
= gen_reg_rtx (HImode
);
15518 emit_insn (gen_x86_fnstcw_1 (stored_mode
));
15519 emit_move_insn (reg
, copy_rtx (stored_mode
));
15521 if (TARGET_64BIT
|| TARGET_PARTIAL_REG_STALL
15522 || optimize_function_for_size_p (cfun
))
15526 case I387_CW_TRUNC
:
15527 /* round toward zero (truncate) */
15528 emit_insn (gen_iorhi3 (reg
, reg
, GEN_INT (0x0c00)));
15529 slot
= SLOT_CW_TRUNC
;
15532 case I387_CW_FLOOR
:
15533 /* round down toward -oo */
15534 emit_insn (gen_andhi3 (reg
, reg
, GEN_INT (~0x0c00)));
15535 emit_insn (gen_iorhi3 (reg
, reg
, GEN_INT (0x0400)));
15536 slot
= SLOT_CW_FLOOR
;
15540 /* round up toward +oo */
15541 emit_insn (gen_andhi3 (reg
, reg
, GEN_INT (~0x0c00)));
15542 emit_insn (gen_iorhi3 (reg
, reg
, GEN_INT (0x0800)));
15543 slot
= SLOT_CW_CEIL
;
15546 case I387_CW_MASK_PM
:
15547 /* mask precision exception for nearbyint() */
15548 emit_insn (gen_iorhi3 (reg
, reg
, GEN_INT (0x0020)));
15549 slot
= SLOT_CW_MASK_PM
;
15553 gcc_unreachable ();
15560 case I387_CW_TRUNC
:
15561 /* round toward zero (truncate) */
15562 emit_insn (gen_movsi_insv_1 (reg
, GEN_INT (0xc)));
15563 slot
= SLOT_CW_TRUNC
;
15566 case I387_CW_FLOOR
:
15567 /* round down toward -oo */
15568 emit_insn (gen_movsi_insv_1 (reg
, GEN_INT (0x4)));
15569 slot
= SLOT_CW_FLOOR
;
15573 /* round up toward +oo */
15574 emit_insn (gen_movsi_insv_1 (reg
, GEN_INT (0x8)));
15575 slot
= SLOT_CW_CEIL
;
15578 case I387_CW_MASK_PM
:
15579 /* mask precision exception for nearbyint() */
15580 emit_insn (gen_iorhi3 (reg
, reg
, GEN_INT (0x0020)));
15581 slot
= SLOT_CW_MASK_PM
;
15585 gcc_unreachable ();
15589 gcc_assert (slot
< MAX_386_STACK_LOCALS
);
15591 new_mode
= assign_386_stack_local (HImode
, slot
);
15592 emit_move_insn (new_mode
, reg
);
15595 /* Emit vzeroupper. */
15598 ix86_avx_emit_vzeroupper (HARD_REG_SET regs_live
)
15602 /* Cancel automatic vzeroupper insertion if there are
15603 live call-saved SSE registers at the insertion point. */
15605 for (i
= FIRST_SSE_REG
; i
<= LAST_SSE_REG
; i
++)
15606 if (TEST_HARD_REG_BIT (regs_live
, i
) && !call_used_regs
[i
])
15610 for (i
= FIRST_REX_SSE_REG
; i
<= LAST_REX_SSE_REG
; i
++)
15611 if (TEST_HARD_REG_BIT (regs_live
, i
) && !call_used_regs
[i
])
15614 emit_insn (gen_avx_vzeroupper ());
15617 /* Generate one or more insns to set ENTITY to MODE. */
15620 ix86_emit_mode_set (int entity
, int mode
, HARD_REG_SET regs_live
)
15625 if (mode
== AVX_U128_CLEAN
)
15626 ix86_avx_emit_vzeroupper (regs_live
);
15632 if (mode
!= I387_CW_ANY
15633 && mode
!= I387_CW_UNINITIALIZED
)
15634 emit_i387_cw_initialization (mode
);
15637 gcc_unreachable ();
15641 /* Output code for INSN to convert a float to a signed int. OPERANDS
15642 are the insn operands. The output may be [HSD]Imode and the input
15643 operand may be [SDX]Fmode. */
15646 output_fix_trunc (rtx insn
, rtx
*operands
, bool fisttp
)
15648 int stack_top_dies
= find_regno_note (insn
, REG_DEAD
, FIRST_STACK_REG
) != 0;
15649 int dimode_p
= GET_MODE (operands
[0]) == DImode
;
15650 int round_mode
= get_attr_i387_cw (insn
);
15652 /* Jump through a hoop or two for DImode, since the hardware has no
15653 non-popping instruction. We used to do this a different way, but
15654 that was somewhat fragile and broke with post-reload splitters. */
15655 if ((dimode_p
|| fisttp
) && !stack_top_dies
)
15656 output_asm_insn ("fld\t%y1", operands
);
15658 gcc_assert (STACK_TOP_P (operands
[1]));
15659 gcc_assert (MEM_P (operands
[0]));
15660 gcc_assert (GET_MODE (operands
[1]) != TFmode
);
15663 output_asm_insn ("fisttp%Z0\t%0", operands
);
15666 if (round_mode
!= I387_CW_ANY
)
15667 output_asm_insn ("fldcw\t%3", operands
);
15668 if (stack_top_dies
|| dimode_p
)
15669 output_asm_insn ("fistp%Z0\t%0", operands
);
15671 output_asm_insn ("fist%Z0\t%0", operands
);
15672 if (round_mode
!= I387_CW_ANY
)
15673 output_asm_insn ("fldcw\t%2", operands
);
15679 /* Output code for x87 ffreep insn. The OPNO argument, which may only
15680 have the values zero or one, indicates the ffreep insn's operand
15681 from the OPERANDS array. */
15683 static const char *
15684 output_387_ffreep (rtx
*operands ATTRIBUTE_UNUSED
, int opno
)
15686 if (TARGET_USE_FFREEP
)
15687 #ifdef HAVE_AS_IX86_FFREEP
15688 return opno
? "ffreep\t%y1" : "ffreep\t%y0";
15691 static char retval
[32];
15692 int regno
= REGNO (operands
[opno
]);
15694 gcc_assert (STACK_REGNO_P (regno
));
15696 regno
-= FIRST_STACK_REG
;
15698 snprintf (retval
, sizeof (retval
), ASM_SHORT
"0xc%ddf", regno
);
15703 return opno
? "fstp\t%y1" : "fstp\t%y0";
15707 /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
15708 should be used. UNORDERED_P is true when fucom should be used. */
15711 output_fp_compare (rtx insn
, rtx
*operands
, bool eflags_p
, bool unordered_p
)
15713 int stack_top_dies
;
15714 rtx cmp_op0
, cmp_op1
;
15715 int is_sse
= SSE_REG_P (operands
[0]) || SSE_REG_P (operands
[1]);
15719 cmp_op0
= operands
[0];
15720 cmp_op1
= operands
[1];
15724 cmp_op0
= operands
[1];
15725 cmp_op1
= operands
[2];
15730 if (GET_MODE (operands
[0]) == SFmode
)
15732 return "%vucomiss\t{%1, %0|%0, %1}";
15734 return "%vcomiss\t{%1, %0|%0, %1}";
15737 return "%vucomisd\t{%1, %0|%0, %1}";
15739 return "%vcomisd\t{%1, %0|%0, %1}";
15742 gcc_assert (STACK_TOP_P (cmp_op0
));
15744 stack_top_dies
= find_regno_note (insn
, REG_DEAD
, FIRST_STACK_REG
) != 0;
15746 if (cmp_op1
== CONST0_RTX (GET_MODE (cmp_op1
)))
15748 if (stack_top_dies
)
15750 output_asm_insn ("ftst\n\tfnstsw\t%0", operands
);
15751 return output_387_ffreep (operands
, 1);
15754 return "ftst\n\tfnstsw\t%0";
15757 if (STACK_REG_P (cmp_op1
)
15759 && find_regno_note (insn
, REG_DEAD
, REGNO (cmp_op1
))
15760 && REGNO (cmp_op1
) != FIRST_STACK_REG
)
15762 /* If both the top of the 387 stack dies, and the other operand
15763 is also a stack register that dies, then this must be a
15764 `fcompp' float compare */
15768 /* There is no double popping fcomi variant. Fortunately,
15769 eflags is immune from the fstp's cc clobbering. */
15771 output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands
);
15773 output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands
);
15774 return output_387_ffreep (operands
, 0);
15779 return "fucompp\n\tfnstsw\t%0";
15781 return "fcompp\n\tfnstsw\t%0";
15786 /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */
15788 static const char * const alt
[16] =
15790 "fcom%Z2\t%y2\n\tfnstsw\t%0",
15791 "fcomp%Z2\t%y2\n\tfnstsw\t%0",
15792 "fucom%Z2\t%y2\n\tfnstsw\t%0",
15793 "fucomp%Z2\t%y2\n\tfnstsw\t%0",
15795 "ficom%Z2\t%y2\n\tfnstsw\t%0",
15796 "ficomp%Z2\t%y2\n\tfnstsw\t%0",
15800 "fcomi\t{%y1, %0|%0, %y1}",
15801 "fcomip\t{%y1, %0|%0, %y1}",
15802 "fucomi\t{%y1, %0|%0, %y1}",
15803 "fucomip\t{%y1, %0|%0, %y1}",
15814 mask
= eflags_p
<< 3;
15815 mask
|= (GET_MODE_CLASS (GET_MODE (cmp_op1
)) == MODE_INT
) << 2;
15816 mask
|= unordered_p
<< 1;
15817 mask
|= stack_top_dies
;
15819 gcc_assert (mask
< 16);
15828 ix86_output_addr_vec_elt (FILE *file
, int value
)
15830 const char *directive
= ASM_LONG
;
15834 directive
= ASM_QUAD
;
15836 gcc_assert (!TARGET_64BIT
);
15839 fprintf (file
, "%s%s%d\n", directive
, LPREFIX
, value
);
15843 ix86_output_addr_diff_elt (FILE *file
, int value
, int rel
)
15845 const char *directive
= ASM_LONG
;
15848 if (TARGET_64BIT
&& CASE_VECTOR_MODE
== DImode
)
15849 directive
= ASM_QUAD
;
15851 gcc_assert (!TARGET_64BIT
);
15853 /* We can't use @GOTOFF for text labels on VxWorks; see gotoff_operand. */
15854 if (TARGET_64BIT
|| TARGET_VXWORKS_RTP
)
15855 fprintf (file
, "%s%s%d-%s%d\n",
15856 directive
, LPREFIX
, value
, LPREFIX
, rel
);
15857 else if (HAVE_AS_GOTOFF_IN_DATA
)
15858 fprintf (file
, ASM_LONG
"%s%d@GOTOFF\n", LPREFIX
, value
);
15860 else if (TARGET_MACHO
)
15862 fprintf (file
, ASM_LONG
"%s%d-", LPREFIX
, value
);
15863 machopic_output_function_base_name (file
);
15868 asm_fprintf (file
, ASM_LONG
"%U%s+[.-%s%d]\n",
15869 GOT_SYMBOL_NAME
, LPREFIX
, value
);
15872 /* Generate either "mov $0, reg" or "xor reg, reg", as appropriate
15876 ix86_expand_clear (rtx dest
)
15880 /* We play register width games, which are only valid after reload. */
15881 gcc_assert (reload_completed
);
15883 /* Avoid HImode and its attendant prefix byte. */
15884 if (GET_MODE_SIZE (GET_MODE (dest
)) < 4)
15885 dest
= gen_rtx_REG (SImode
, REGNO (dest
));
15886 tmp
= gen_rtx_SET (VOIDmode
, dest
, const0_rtx
);
15888 /* This predicate should match that for movsi_xor and movdi_xor_rex64. */
15889 if (!TARGET_USE_MOV0
|| optimize_insn_for_speed_p ())
15891 rtx clob
= gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (CCmode
, FLAGS_REG
));
15892 tmp
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, tmp
, clob
));
15898 /* X is an unchanging MEM. If it is a constant pool reference, return
15899 the constant pool rtx, else NULL. */
15902 maybe_get_pool_constant (rtx x
)
15904 x
= ix86_delegitimize_address (XEXP (x
, 0));
15906 if (GET_CODE (x
) == SYMBOL_REF
&& CONSTANT_POOL_ADDRESS_P (x
))
15907 return get_pool_constant (x
);
15913 ix86_expand_move (enum machine_mode mode
, rtx operands
[])
15916 enum tls_model model
;
15921 if (GET_CODE (op1
) == SYMBOL_REF
)
15923 model
= SYMBOL_REF_TLS_MODEL (op1
);
15926 op1
= legitimize_tls_address (op1
, model
, true);
15927 op1
= force_operand (op1
, op0
);
15930 if (GET_MODE (op1
) != mode
)
15931 op1
= convert_to_mode (mode
, op1
, 1);
15933 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
15934 && SYMBOL_REF_DLLIMPORT_P (op1
))
15935 op1
= legitimize_dllimport_symbol (op1
, false);
15937 else if (GET_CODE (op1
) == CONST
15938 && GET_CODE (XEXP (op1
, 0)) == PLUS
15939 && GET_CODE (XEXP (XEXP (op1
, 0), 0)) == SYMBOL_REF
)
15941 rtx addend
= XEXP (XEXP (op1
, 0), 1);
15942 rtx symbol
= XEXP (XEXP (op1
, 0), 0);
15945 model
= SYMBOL_REF_TLS_MODEL (symbol
);
15947 tmp
= legitimize_tls_address (symbol
, model
, true);
15948 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
15949 && SYMBOL_REF_DLLIMPORT_P (symbol
))
15950 tmp
= legitimize_dllimport_symbol (symbol
, true);
15954 tmp
= force_operand (tmp
, NULL
);
15955 tmp
= expand_simple_binop (Pmode
, PLUS
, tmp
, addend
,
15956 op0
, 1, OPTAB_DIRECT
);
15959 if (GET_MODE (tmp
) != mode
)
15960 op1
= convert_to_mode (mode
, tmp
, 1);
15964 if ((flag_pic
|| MACHOPIC_INDIRECT
)
15965 && symbolic_operand (op1
, mode
))
15967 if (TARGET_MACHO
&& !TARGET_64BIT
)
15970 /* dynamic-no-pic */
15971 if (MACHOPIC_INDIRECT
)
15973 rtx temp
= ((reload_in_progress
15974 || ((op0
&& REG_P (op0
))
15976 ? op0
: gen_reg_rtx (Pmode
));
15977 op1
= machopic_indirect_data_reference (op1
, temp
);
15979 op1
= machopic_legitimize_pic_address (op1
, mode
,
15980 temp
== op1
? 0 : temp
);
15982 if (op0
!= op1
&& GET_CODE (op0
) != MEM
)
15984 rtx insn
= gen_rtx_SET (VOIDmode
, op0
, op1
);
15988 if (GET_CODE (op0
) == MEM
)
15989 op1
= force_reg (Pmode
, op1
);
15993 if (GET_CODE (temp
) != REG
)
15994 temp
= gen_reg_rtx (Pmode
);
15995 temp
= legitimize_pic_address (op1
, temp
);
16000 /* dynamic-no-pic */
16006 op1
= force_reg (mode
, op1
);
16007 else if (!(TARGET_64BIT
&& x86_64_movabs_operand (op1
, DImode
)))
16009 rtx reg
= can_create_pseudo_p () ? NULL_RTX
: op0
;
16010 op1
= legitimize_pic_address (op1
, reg
);
16013 if (GET_MODE (op1
) != mode
)
16014 op1
= convert_to_mode (mode
, op1
, 1);
16021 && (PUSH_ROUNDING (GET_MODE_SIZE (mode
)) != GET_MODE_SIZE (mode
)
16022 || !push_operand (op0
, mode
))
16024 op1
= force_reg (mode
, op1
);
16026 if (push_operand (op0
, mode
)
16027 && ! general_no_elim_operand (op1
, mode
))
16028 op1
= copy_to_mode_reg (mode
, op1
);
16030 /* Force large constants in 64bit compilation into register
16031 to get them CSEed. */
16032 if (can_create_pseudo_p ()
16033 && (mode
== DImode
) && TARGET_64BIT
16034 && immediate_operand (op1
, mode
)
16035 && !x86_64_zext_immediate_operand (op1
, VOIDmode
)
16036 && !register_operand (op0
, mode
)
16038 op1
= copy_to_mode_reg (mode
, op1
);
16040 if (can_create_pseudo_p ()
16041 && FLOAT_MODE_P (mode
)
16042 && GET_CODE (op1
) == CONST_DOUBLE
)
16044 /* If we are loading a floating point constant to a register,
16045 force the value to memory now, since we'll get better code
16046 out the back end. */
16048 op1
= validize_mem (force_const_mem (mode
, op1
));
16049 if (!register_operand (op0
, mode
))
16051 rtx temp
= gen_reg_rtx (mode
);
16052 emit_insn (gen_rtx_SET (VOIDmode
, temp
, op1
));
16053 emit_move_insn (op0
, temp
);
16059 emit_insn (gen_rtx_SET (VOIDmode
, op0
, op1
));
16063 ix86_expand_vector_move (enum machine_mode mode
, rtx operands
[])
16065 rtx op0
= operands
[0], op1
= operands
[1];
16066 unsigned int align
= GET_MODE_ALIGNMENT (mode
);
16068 /* Force constants other than zero into memory. We do not know how
16069 the instructions used to build constants modify the upper 64 bits
16070 of the register, once we have that information we may be able
16071 to handle some of them more efficiently. */
16072 if (can_create_pseudo_p ()
16073 && register_operand (op0
, mode
)
16074 && (CONSTANT_P (op1
)
16075 || (GET_CODE (op1
) == SUBREG
16076 && CONSTANT_P (SUBREG_REG (op1
))))
16077 && !standard_sse_constant_p (op1
))
16078 op1
= validize_mem (force_const_mem (mode
, op1
));
16080 /* We need to check memory alignment for SSE mode since attribute
16081 can make operands unaligned. */
16082 if (can_create_pseudo_p ()
16083 && SSE_REG_MODE_P (mode
)
16084 && ((MEM_P (op0
) && (MEM_ALIGN (op0
) < align
))
16085 || (MEM_P (op1
) && (MEM_ALIGN (op1
) < align
))))
16089 /* ix86_expand_vector_move_misalign() does not like constants ... */
16090 if (CONSTANT_P (op1
)
16091 || (GET_CODE (op1
) == SUBREG
16092 && CONSTANT_P (SUBREG_REG (op1
))))
16093 op1
= validize_mem (force_const_mem (mode
, op1
));
16095 /* ... nor both arguments in memory. */
16096 if (!register_operand (op0
, mode
)
16097 && !register_operand (op1
, mode
))
16098 op1
= force_reg (mode
, op1
);
16100 tmp
[0] = op0
; tmp
[1] = op1
;
16101 ix86_expand_vector_move_misalign (mode
, tmp
);
16105 /* Make operand1 a register if it isn't already. */
16106 if (can_create_pseudo_p ()
16107 && !register_operand (op0
, mode
)
16108 && !register_operand (op1
, mode
))
16110 emit_move_insn (op0
, force_reg (GET_MODE (op0
), op1
));
16114 emit_insn (gen_rtx_SET (VOIDmode
, op0
, op1
));
16117 /* Split 32-byte AVX unaligned load and store if needed. */
16120 ix86_avx256_split_vector_move_misalign (rtx op0
, rtx op1
)
16123 rtx (*extract
) (rtx
, rtx
, rtx
);
16124 rtx (*load_unaligned
) (rtx
, rtx
);
16125 rtx (*store_unaligned
) (rtx
, rtx
);
16126 enum machine_mode mode
;
16128 switch (GET_MODE (op0
))
16131 gcc_unreachable ();
16133 extract
= gen_avx_vextractf128v32qi
;
16134 load_unaligned
= gen_avx_loaddqu256
;
16135 store_unaligned
= gen_avx_storedqu256
;
16139 extract
= gen_avx_vextractf128v8sf
;
16140 load_unaligned
= gen_avx_loadups256
;
16141 store_unaligned
= gen_avx_storeups256
;
16145 extract
= gen_avx_vextractf128v4df
;
16146 load_unaligned
= gen_avx_loadupd256
;
16147 store_unaligned
= gen_avx_storeupd256
;
16154 if (TARGET_AVX256_SPLIT_UNALIGNED_LOAD
)
16156 rtx r
= gen_reg_rtx (mode
);
16157 m
= adjust_address (op1
, mode
, 0);
16158 emit_move_insn (r
, m
);
16159 m
= adjust_address (op1
, mode
, 16);
16160 r
= gen_rtx_VEC_CONCAT (GET_MODE (op0
), r
, m
);
16161 emit_move_insn (op0
, r
);
16164 emit_insn (load_unaligned (op0
, op1
));
16166 else if (MEM_P (op0
))
16168 if (TARGET_AVX256_SPLIT_UNALIGNED_STORE
)
16170 m
= adjust_address (op0
, mode
, 0);
16171 emit_insn (extract (m
, op1
, const0_rtx
));
16172 m
= adjust_address (op0
, mode
, 16);
16173 emit_insn (extract (m
, op1
, const1_rtx
));
16176 emit_insn (store_unaligned (op0
, op1
));
16179 gcc_unreachable ();
16182 /* Implement the movmisalign patterns for SSE. Non-SSE modes go
16183 straight to ix86_expand_vector_move. */
16184 /* Code generation for scalar reg-reg moves of single and double precision data:
16185 if (x86_sse_partial_reg_dependency == true | x86_sse_split_regs == true)
16189 if (x86_sse_partial_reg_dependency == true)
16194 Code generation for scalar loads of double precision data:
16195 if (x86_sse_split_regs == true)
16196 movlpd mem, reg (gas syntax)
16200 Code generation for unaligned packed loads of single precision data
16201 (x86_sse_unaligned_move_optimal overrides x86_sse_partial_reg_dependency):
16202 if (x86_sse_unaligned_move_optimal)
16205 if (x86_sse_partial_reg_dependency == true)
16217 Code generation for unaligned packed loads of double precision data
16218 (x86_sse_unaligned_move_optimal overrides x86_sse_split_regs):
16219 if (x86_sse_unaligned_move_optimal)
16222 if (x86_sse_split_regs == true)
16235 ix86_expand_vector_move_misalign (enum machine_mode mode
, rtx operands
[])
16243 && GET_MODE_SIZE (mode
) == 32)
16245 switch (GET_MODE_CLASS (mode
))
16247 case MODE_VECTOR_INT
:
16249 op0
= gen_lowpart (V32QImode
, op0
);
16250 op1
= gen_lowpart (V32QImode
, op1
);
16253 case MODE_VECTOR_FLOAT
:
16254 ix86_avx256_split_vector_move_misalign (op0
, op1
);
16258 gcc_unreachable ();
16266 /* ??? If we have typed data, then it would appear that using
16267 movdqu is the only way to get unaligned data loaded with
16269 if (TARGET_SSE2
&& GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
16271 op0
= gen_lowpart (V16QImode
, op0
);
16272 op1
= gen_lowpart (V16QImode
, op1
);
16273 /* We will eventually emit movups based on insn attributes. */
16274 emit_insn (gen_sse2_loaddqu (op0
, op1
));
16276 else if (TARGET_SSE2
&& mode
== V2DFmode
)
16281 || TARGET_SSE_UNALIGNED_LOAD_OPTIMAL
16282 || TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL
16283 || optimize_function_for_size_p (cfun
))
16285 /* We will eventually emit movups based on insn attributes. */
16286 emit_insn (gen_sse2_loadupd (op0
, op1
));
16290 /* When SSE registers are split into halves, we can avoid
16291 writing to the top half twice. */
16292 if (TARGET_SSE_SPLIT_REGS
)
16294 emit_clobber (op0
);
16299 /* ??? Not sure about the best option for the Intel chips.
16300 The following would seem to satisfy; the register is
16301 entirely cleared, breaking the dependency chain. We
16302 then store to the upper half, with a dependency depth
16303 of one. A rumor has it that Intel recommends two movsd
16304 followed by an unpacklpd, but this is unconfirmed. And
16305 given that the dependency depth of the unpacklpd would
16306 still be one, I'm not sure why this would be better. */
16307 zero
= CONST0_RTX (V2DFmode
);
16310 m
= adjust_address (op1
, DFmode
, 0);
16311 emit_insn (gen_sse2_loadlpd (op0
, zero
, m
));
16312 m
= adjust_address (op1
, DFmode
, 8);
16313 emit_insn (gen_sse2_loadhpd (op0
, op0
, m
));
16318 || TARGET_SSE_UNALIGNED_LOAD_OPTIMAL
16319 || TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL
16320 || optimize_function_for_size_p (cfun
))
16322 op0
= gen_lowpart (V4SFmode
, op0
);
16323 op1
= gen_lowpart (V4SFmode
, op1
);
16324 emit_insn (gen_sse_loadups (op0
, op1
));
16328 if (TARGET_SSE_PARTIAL_REG_DEPENDENCY
)
16329 emit_move_insn (op0
, CONST0_RTX (mode
));
16331 emit_clobber (op0
);
16333 if (mode
!= V4SFmode
)
16334 op0
= gen_lowpart (V4SFmode
, op0
);
16336 m
= adjust_address (op1
, V2SFmode
, 0);
16337 emit_insn (gen_sse_loadlps (op0
, op0
, m
));
16338 m
= adjust_address (op1
, V2SFmode
, 8);
16339 emit_insn (gen_sse_loadhps (op0
, op0
, m
));
16342 else if (MEM_P (op0
))
16344 if (TARGET_SSE2
&& GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
16346 op0
= gen_lowpart (V16QImode
, op0
);
16347 op1
= gen_lowpart (V16QImode
, op1
);
16348 /* We will eventually emit movups based on insn attributes. */
16349 emit_insn (gen_sse2_storedqu (op0
, op1
));
16351 else if (TARGET_SSE2
&& mode
== V2DFmode
)
16354 || TARGET_SSE_UNALIGNED_STORE_OPTIMAL
16355 || TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL
16356 || optimize_function_for_size_p (cfun
))
16357 /* We will eventually emit movups based on insn attributes. */
16358 emit_insn (gen_sse2_storeupd (op0
, op1
));
16361 m
= adjust_address (op0
, DFmode
, 0);
16362 emit_insn (gen_sse2_storelpd (m
, op1
));
16363 m
= adjust_address (op0
, DFmode
, 8);
16364 emit_insn (gen_sse2_storehpd (m
, op1
));
16369 if (mode
!= V4SFmode
)
16370 op1
= gen_lowpart (V4SFmode
, op1
);
16373 || TARGET_SSE_UNALIGNED_STORE_OPTIMAL
16374 || TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL
16375 || optimize_function_for_size_p (cfun
))
16377 op0
= gen_lowpart (V4SFmode
, op0
);
16378 emit_insn (gen_sse_storeups (op0
, op1
));
16382 m
= adjust_address (op0
, V2SFmode
, 0);
16383 emit_insn (gen_sse_storelps (m
, op1
));
16384 m
= adjust_address (op0
, V2SFmode
, 8);
16385 emit_insn (gen_sse_storehps (m
, op1
));
16390 gcc_unreachable ();
16393 /* Expand a push in MODE. This is some mode for which we do not support
16394 proper push instructions, at least from the registers that we expect
16395 the value to live in. */
16398 ix86_expand_push (enum machine_mode mode
, rtx x
)
16402 tmp
= expand_simple_binop (Pmode
, PLUS
, stack_pointer_rtx
,
16403 GEN_INT (-GET_MODE_SIZE (mode
)),
16404 stack_pointer_rtx
, 1, OPTAB_DIRECT
);
16405 if (tmp
!= stack_pointer_rtx
)
16406 emit_move_insn (stack_pointer_rtx
, tmp
);
16408 tmp
= gen_rtx_MEM (mode
, stack_pointer_rtx
);
16410 /* When we push an operand onto stack, it has to be aligned at least
16411 at the function argument boundary. However since we don't have
16412 the argument type, we can't determine the actual argument
16414 emit_move_insn (tmp
, x
);
16417 /* Helper function of ix86_fixup_binary_operands to canonicalize
16418 operand order. Returns true if the operands should be swapped. */
16421 ix86_swap_binary_operands_p (enum rtx_code code
, enum machine_mode mode
,
16424 rtx dst
= operands
[0];
16425 rtx src1
= operands
[1];
16426 rtx src2
= operands
[2];
16428 /* If the operation is not commutative, we can't do anything. */
16429 if (GET_RTX_CLASS (code
) != RTX_COMM_ARITH
)
16432 /* Highest priority is that src1 should match dst. */
16433 if (rtx_equal_p (dst
, src1
))
16435 if (rtx_equal_p (dst
, src2
))
16438 /* Next highest priority is that immediate constants come second. */
16439 if (immediate_operand (src2
, mode
))
16441 if (immediate_operand (src1
, mode
))
16444 /* Lowest priority is that memory references should come second. */
16454 /* Fix up OPERANDS to satisfy ix86_binary_operator_ok. Return the
16455 destination to use for the operation. If different from the true
16456 destination in operands[0], a copy operation will be required. */
16459 ix86_fixup_binary_operands (enum rtx_code code
, enum machine_mode mode
,
16462 rtx dst
= operands
[0];
16463 rtx src1
= operands
[1];
16464 rtx src2
= operands
[2];
16466 /* Canonicalize operand order. */
16467 if (ix86_swap_binary_operands_p (code
, mode
, operands
))
16471 /* It is invalid to swap operands of different modes. */
16472 gcc_assert (GET_MODE (src1
) == GET_MODE (src2
));
16479 /* Both source operands cannot be in memory. */
16480 if (MEM_P (src1
) && MEM_P (src2
))
16482 /* Optimization: Only read from memory once. */
16483 if (rtx_equal_p (src1
, src2
))
16485 src2
= force_reg (mode
, src2
);
16489 src2
= force_reg (mode
, src2
);
16492 /* If the destination is memory, and we do not have matching source
16493 operands, do things in registers. */
16494 if (MEM_P (dst
) && !rtx_equal_p (dst
, src1
))
16495 dst
= gen_reg_rtx (mode
);
16497 /* Source 1 cannot be a constant. */
16498 if (CONSTANT_P (src1
))
16499 src1
= force_reg (mode
, src1
);
16501 /* Source 1 cannot be a non-matching memory. */
16502 if (MEM_P (src1
) && !rtx_equal_p (dst
, src1
))
16503 src1
= force_reg (mode
, src1
);
16505 /* Improve address combine. */
16507 && GET_MODE_CLASS (mode
) == MODE_INT
16509 src2
= force_reg (mode
, src2
);
16511 operands
[1] = src1
;
16512 operands
[2] = src2
;
16516 /* Similarly, but assume that the destination has already been
16517 set up properly. */
16520 ix86_fixup_binary_operands_no_copy (enum rtx_code code
,
16521 enum machine_mode mode
, rtx operands
[])
16523 rtx dst
= ix86_fixup_binary_operands (code
, mode
, operands
);
16524 gcc_assert (dst
== operands
[0]);
16527 /* Attempt to expand a binary operator. Make the expansion closer to the
16528 actual machine, then just general_operand, which will allow 3 separate
16529 memory references (one output, two input) in a single insn. */
16532 ix86_expand_binary_operator (enum rtx_code code
, enum machine_mode mode
,
16535 rtx src1
, src2
, dst
, op
, clob
;
16537 dst
= ix86_fixup_binary_operands (code
, mode
, operands
);
16538 src1
= operands
[1];
16539 src2
= operands
[2];
16541 /* Emit the instruction. */
16543 op
= gen_rtx_SET (VOIDmode
, dst
, gen_rtx_fmt_ee (code
, mode
, src1
, src2
));
16544 if (reload_in_progress
)
16546 /* Reload doesn't know about the flags register, and doesn't know that
16547 it doesn't want to clobber it. We can only do this with PLUS. */
16548 gcc_assert (code
== PLUS
);
16551 else if (reload_completed
16553 && !rtx_equal_p (dst
, src1
))
16555 /* This is going to be an LEA; avoid splitting it later. */
16560 clob
= gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (CCmode
, FLAGS_REG
));
16561 emit_insn (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, op
, clob
)));
16564 /* Fix up the destination if needed. */
16565 if (dst
!= operands
[0])
16566 emit_move_insn (operands
[0], dst
);
16569 /* Expand vector logical operation CODE (AND, IOR, XOR) in MODE with
16570 the given OPERANDS. */
16573 ix86_expand_vector_logical_operator (enum rtx_code code
, enum machine_mode mode
,
16576 rtx op1
= NULL_RTX
, op2
= NULL_RTX
;
16577 if (GET_CODE (operands
[1]) == SUBREG
)
16582 else if (GET_CODE (operands
[2]) == SUBREG
)
16587 /* Optimize (__m128i) d | (__m128i) e and similar code
16588 when d and e are float vectors into float vector logical
16589 insn. In C/C++ without using intrinsics there is no other way
16590 to express vector logical operation on float vectors than
16591 to cast them temporarily to integer vectors. */
16593 && !TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL
16594 && ((GET_CODE (op2
) == SUBREG
|| GET_CODE (op2
) == CONST_VECTOR
))
16595 && GET_MODE_CLASS (GET_MODE (SUBREG_REG (op1
))) == MODE_VECTOR_FLOAT
16596 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (op1
))) == GET_MODE_SIZE (mode
)
16597 && SUBREG_BYTE (op1
) == 0
16598 && (GET_CODE (op2
) == CONST_VECTOR
16599 || (GET_MODE (SUBREG_REG (op1
)) == GET_MODE (SUBREG_REG (op2
))
16600 && SUBREG_BYTE (op2
) == 0))
16601 && can_create_pseudo_p ())
16604 switch (GET_MODE (SUBREG_REG (op1
)))
16610 dst
= gen_reg_rtx (GET_MODE (SUBREG_REG (op1
)));
16611 if (GET_CODE (op2
) == CONST_VECTOR
)
16613 op2
= gen_lowpart (GET_MODE (dst
), op2
);
16614 op2
= force_reg (GET_MODE (dst
), op2
);
16619 op2
= SUBREG_REG (operands
[2]);
16620 if (!nonimmediate_operand (op2
, GET_MODE (dst
)))
16621 op2
= force_reg (GET_MODE (dst
), op2
);
16623 op1
= SUBREG_REG (op1
);
16624 if (!nonimmediate_operand (op1
, GET_MODE (dst
)))
16625 op1
= force_reg (GET_MODE (dst
), op1
);
16626 emit_insn (gen_rtx_SET (VOIDmode
, dst
,
16627 gen_rtx_fmt_ee (code
, GET_MODE (dst
),
16629 emit_move_insn (operands
[0], gen_lowpart (mode
, dst
));
16635 if (!nonimmediate_operand (operands
[1], mode
))
16636 operands
[1] = force_reg (mode
, operands
[1]);
16637 if (!nonimmediate_operand (operands
[2], mode
))
16638 operands
[2] = force_reg (mode
, operands
[2]);
16639 ix86_fixup_binary_operands_no_copy (code
, mode
, operands
);
16640 emit_insn (gen_rtx_SET (VOIDmode
, operands
[0],
16641 gen_rtx_fmt_ee (code
, mode
, operands
[1],
16645 /* Return TRUE or FALSE depending on whether the binary operator meets the
16646 appropriate constraints. */
16649 ix86_binary_operator_ok (enum rtx_code code
, enum machine_mode mode
,
16652 rtx dst
= operands
[0];
16653 rtx src1
= operands
[1];
16654 rtx src2
= operands
[2];
16656 /* Both source operands cannot be in memory. */
16657 if (MEM_P (src1
) && MEM_P (src2
))
16660 /* Canonicalize operand order for commutative operators. */
16661 if (ix86_swap_binary_operands_p (code
, mode
, operands
))
16668 /* If the destination is memory, we must have a matching source operand. */
16669 if (MEM_P (dst
) && !rtx_equal_p (dst
, src1
))
16672 /* Source 1 cannot be a constant. */
16673 if (CONSTANT_P (src1
))
16676 /* Source 1 cannot be a non-matching memory. */
16677 if (MEM_P (src1
) && !rtx_equal_p (dst
, src1
))
16678 /* Support "andhi/andsi/anddi" as a zero-extending move. */
16679 return (code
== AND
16682 || (TARGET_64BIT
&& mode
== DImode
))
16683 && satisfies_constraint_L (src2
));
16688 /* Attempt to expand a unary operator. Make the expansion closer to the
16689 actual machine, then just general_operand, which will allow 2 separate
16690 memory references (one output, one input) in a single insn. */
16693 ix86_expand_unary_operator (enum rtx_code code
, enum machine_mode mode
,
16696 int matching_memory
;
16697 rtx src
, dst
, op
, clob
;
16702 /* If the destination is memory, and we do not have matching source
16703 operands, do things in registers. */
16704 matching_memory
= 0;
16707 if (rtx_equal_p (dst
, src
))
16708 matching_memory
= 1;
16710 dst
= gen_reg_rtx (mode
);
16713 /* When source operand is memory, destination must match. */
16714 if (MEM_P (src
) && !matching_memory
)
16715 src
= force_reg (mode
, src
);
16717 /* Emit the instruction. */
16719 op
= gen_rtx_SET (VOIDmode
, dst
, gen_rtx_fmt_e (code
, mode
, src
));
16720 if (reload_in_progress
|| code
== NOT
)
16722 /* Reload doesn't know about the flags register, and doesn't know that
16723 it doesn't want to clobber it. */
16724 gcc_assert (code
== NOT
);
16729 clob
= gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (CCmode
, FLAGS_REG
));
16730 emit_insn (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, op
, clob
)));
16733 /* Fix up the destination if needed. */
16734 if (dst
!= operands
[0])
16735 emit_move_insn (operands
[0], dst
);
16738 /* Split 32bit/64bit divmod with 8bit unsigned divmod if dividend and
16739 divisor are within the range [0-255]. */
16742 ix86_split_idivmod (enum machine_mode mode
, rtx operands
[],
16745 rtx end_label
, qimode_label
;
16746 rtx insn
, div
, mod
;
16747 rtx scratch
, tmp0
, tmp1
, tmp2
;
16748 rtx (*gen_divmod4_1
) (rtx
, rtx
, rtx
, rtx
);
16749 rtx (*gen_zero_extend
) (rtx
, rtx
);
16750 rtx (*gen_test_ccno_1
) (rtx
, rtx
);
16755 gen_divmod4_1
= signed_p
? gen_divmodsi4_1
: gen_udivmodsi4_1
;
16756 gen_test_ccno_1
= gen_testsi_ccno_1
;
16757 gen_zero_extend
= gen_zero_extendqisi2
;
16760 gen_divmod4_1
= signed_p
? gen_divmoddi4_1
: gen_udivmoddi4_1
;
16761 gen_test_ccno_1
= gen_testdi_ccno_1
;
16762 gen_zero_extend
= gen_zero_extendqidi2
;
16765 gcc_unreachable ();
16768 end_label
= gen_label_rtx ();
16769 qimode_label
= gen_label_rtx ();
16771 scratch
= gen_reg_rtx (mode
);
16773 /* Use 8bit unsigned divimod if dividend and divisor are within
16774 the range [0-255]. */
16775 emit_move_insn (scratch
, operands
[2]);
16776 scratch
= expand_simple_binop (mode
, IOR
, scratch
, operands
[3],
16777 scratch
, 1, OPTAB_DIRECT
);
16778 emit_insn (gen_test_ccno_1 (scratch
, GEN_INT (-0x100)));
16779 tmp0
= gen_rtx_REG (CCNOmode
, FLAGS_REG
);
16780 tmp0
= gen_rtx_EQ (VOIDmode
, tmp0
, const0_rtx
);
16781 tmp0
= gen_rtx_IF_THEN_ELSE (VOIDmode
, tmp0
,
16782 gen_rtx_LABEL_REF (VOIDmode
, qimode_label
),
16784 insn
= emit_jump_insn (gen_rtx_SET (VOIDmode
, pc_rtx
, tmp0
));
16785 predict_jump (REG_BR_PROB_BASE
* 50 / 100);
16786 JUMP_LABEL (insn
) = qimode_label
;
16788 /* Generate original signed/unsigned divimod. */
16789 div
= gen_divmod4_1 (operands
[0], operands
[1],
16790 operands
[2], operands
[3]);
16793 /* Branch to the end. */
16794 emit_jump_insn (gen_jump (end_label
));
16797 /* Generate 8bit unsigned divide. */
16798 emit_label (qimode_label
);
16799 /* Don't use operands[0] for result of 8bit divide since not all
16800 registers support QImode ZERO_EXTRACT. */
16801 tmp0
= simplify_gen_subreg (HImode
, scratch
, mode
, 0);
16802 tmp1
= simplify_gen_subreg (HImode
, operands
[2], mode
, 0);
16803 tmp2
= simplify_gen_subreg (QImode
, operands
[3], mode
, 0);
16804 emit_insn (gen_udivmodhiqi3 (tmp0
, tmp1
, tmp2
));
16808 div
= gen_rtx_DIV (SImode
, operands
[2], operands
[3]);
16809 mod
= gen_rtx_MOD (SImode
, operands
[2], operands
[3]);
16813 div
= gen_rtx_UDIV (SImode
, operands
[2], operands
[3]);
16814 mod
= gen_rtx_UMOD (SImode
, operands
[2], operands
[3]);
16817 /* Extract remainder from AH. */
16818 tmp1
= gen_rtx_ZERO_EXTRACT (mode
, tmp0
, GEN_INT (8), GEN_INT (8));
16819 if (REG_P (operands
[1]))
16820 insn
= emit_move_insn (operands
[1], tmp1
);
16823 /* Need a new scratch register since the old one has result
16825 scratch
= gen_reg_rtx (mode
);
16826 emit_move_insn (scratch
, tmp1
);
16827 insn
= emit_move_insn (operands
[1], scratch
);
16829 set_unique_reg_note (insn
, REG_EQUAL
, mod
);
16831 /* Zero extend quotient from AL. */
16832 tmp1
= gen_lowpart (QImode
, tmp0
);
16833 insn
= emit_insn (gen_zero_extend (operands
[0], tmp1
));
16834 set_unique_reg_note (insn
, REG_EQUAL
, div
);
16836 emit_label (end_label
);
16839 #define LEA_MAX_STALL (3)
16840 #define LEA_SEARCH_THRESHOLD (LEA_MAX_STALL << 1)
16842 /* Increase given DISTANCE in half-cycles according to
16843 dependencies between PREV and NEXT instructions.
16844 Add 1 half-cycle if there is no dependency and
16845 go to next cycle if there is some dependecy. */
16847 static unsigned int
16848 increase_distance (rtx prev
, rtx next
, unsigned int distance
)
16853 if (!prev
|| !next
)
16854 return distance
+ (distance
& 1) + 2;
16856 if (!DF_INSN_USES (next
) || !DF_INSN_DEFS (prev
))
16857 return distance
+ 1;
16859 for (use_rec
= DF_INSN_USES (next
); *use_rec
; use_rec
++)
16860 for (def_rec
= DF_INSN_DEFS (prev
); *def_rec
; def_rec
++)
16861 if (!DF_REF_IS_ARTIFICIAL (*def_rec
)
16862 && DF_REF_REGNO (*use_rec
) == DF_REF_REGNO (*def_rec
))
16863 return distance
+ (distance
& 1) + 2;
16865 return distance
+ 1;
16868 /* Function checks if instruction INSN defines register number
16869 REGNO1 or REGNO2. */
16872 insn_defines_reg (unsigned int regno1
, unsigned int regno2
,
16877 for (def_rec
= DF_INSN_DEFS (insn
); *def_rec
; def_rec
++)
16878 if (DF_REF_REG_DEF_P (*def_rec
)
16879 && !DF_REF_IS_ARTIFICIAL (*def_rec
)
16880 && (regno1
== DF_REF_REGNO (*def_rec
)
16881 || regno2
== DF_REF_REGNO (*def_rec
)))
16889 /* Function checks if instruction INSN uses register number
16890 REGNO as a part of address expression. */
16893 insn_uses_reg_mem (unsigned int regno
, rtx insn
)
16897 for (use_rec
= DF_INSN_USES (insn
); *use_rec
; use_rec
++)
16898 if (DF_REF_REG_MEM_P (*use_rec
) && regno
== DF_REF_REGNO (*use_rec
))
16904 /* Search backward for non-agu definition of register number REGNO1
16905 or register number REGNO2 in basic block starting from instruction
16906 START up to head of basic block or instruction INSN.
16908 Function puts true value into *FOUND var if definition was found
16909 and false otherwise.
16911 Distance in half-cycles between START and found instruction or head
16912 of BB is added to DISTANCE and returned. */
16915 distance_non_agu_define_in_bb (unsigned int regno1
, unsigned int regno2
,
16916 rtx insn
, int distance
,
16917 rtx start
, bool *found
)
16919 basic_block bb
= start
? BLOCK_FOR_INSN (start
) : NULL
;
16927 && distance
< LEA_SEARCH_THRESHOLD
)
16929 if (NONDEBUG_INSN_P (prev
) && NONJUMP_INSN_P (prev
))
16931 distance
= increase_distance (prev
, next
, distance
);
16932 if (insn_defines_reg (regno1
, regno2
, prev
))
16934 if (recog_memoized (prev
) < 0
16935 || get_attr_type (prev
) != TYPE_LEA
)
16944 if (prev
== BB_HEAD (bb
))
16947 prev
= PREV_INSN (prev
);
16953 /* Search backward for non-agu definition of register number REGNO1
16954 or register number REGNO2 in INSN's basic block until
16955 1. Pass LEA_SEARCH_THRESHOLD instructions, or
16956 2. Reach neighbour BBs boundary, or
16957 3. Reach agu definition.
16958 Returns the distance between the non-agu definition point and INSN.
16959 If no definition point, returns -1. */
16962 distance_non_agu_define (unsigned int regno1
, unsigned int regno2
,
16965 basic_block bb
= BLOCK_FOR_INSN (insn
);
16967 bool found
= false;
16969 if (insn
!= BB_HEAD (bb
))
16970 distance
= distance_non_agu_define_in_bb (regno1
, regno2
, insn
,
16971 distance
, PREV_INSN (insn
),
16974 if (!found
&& distance
< LEA_SEARCH_THRESHOLD
)
16978 bool simple_loop
= false;
16980 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
16983 simple_loop
= true;
16988 distance
= distance_non_agu_define_in_bb (regno1
, regno2
,
16990 BB_END (bb
), &found
);
16993 int shortest_dist
= -1;
16994 bool found_in_bb
= false;
16996 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
16999 = distance_non_agu_define_in_bb (regno1
, regno2
,
17005 if (shortest_dist
< 0)
17006 shortest_dist
= bb_dist
;
17007 else if (bb_dist
> 0)
17008 shortest_dist
= MIN (bb_dist
, shortest_dist
);
17014 distance
= shortest_dist
;
17018 /* get_attr_type may modify recog data. We want to make sure
17019 that recog data is valid for instruction INSN, on which
17020 distance_non_agu_define is called. INSN is unchanged here. */
17021 extract_insn_cached (insn
);
17026 return distance
>> 1;
17029 /* Return the distance in half-cycles between INSN and the next
17030 insn that uses register number REGNO in memory address added
17031 to DISTANCE. Return -1 if REGNO0 is set.
17033 Put true value into *FOUND if register usage was found and
17035 Put true value into *REDEFINED if register redefinition was
17036 found and false otherwise. */
17039 distance_agu_use_in_bb (unsigned int regno
,
17040 rtx insn
, int distance
, rtx start
,
17041 bool *found
, bool *redefined
)
17043 basic_block bb
= start
? BLOCK_FOR_INSN (start
) : NULL
;
17048 *redefined
= false;
17052 && distance
< LEA_SEARCH_THRESHOLD
)
17054 if (NONDEBUG_INSN_P (next
) && NONJUMP_INSN_P (next
))
17056 distance
= increase_distance(prev
, next
, distance
);
17057 if (insn_uses_reg_mem (regno
, next
))
17059 /* Return DISTANCE if OP0 is used in memory
17060 address in NEXT. */
17065 if (insn_defines_reg (regno
, INVALID_REGNUM
, next
))
17067 /* Return -1 if OP0 is set in NEXT. */
17075 if (next
== BB_END (bb
))
17078 next
= NEXT_INSN (next
);
17084 /* Return the distance between INSN and the next insn that uses
17085 register number REGNO0 in memory address. Return -1 if no such
17086 a use is found within LEA_SEARCH_THRESHOLD or REGNO0 is set. */
17089 distance_agu_use (unsigned int regno0
, rtx insn
)
17091 basic_block bb
= BLOCK_FOR_INSN (insn
);
17093 bool found
= false;
17094 bool redefined
= false;
17096 if (insn
!= BB_END (bb
))
17097 distance
= distance_agu_use_in_bb (regno0
, insn
, distance
,
17099 &found
, &redefined
);
17101 if (!found
&& !redefined
&& distance
< LEA_SEARCH_THRESHOLD
)
17105 bool simple_loop
= false;
17107 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
17110 simple_loop
= true;
17115 distance
= distance_agu_use_in_bb (regno0
, insn
,
17116 distance
, BB_HEAD (bb
),
17117 &found
, &redefined
);
17120 int shortest_dist
= -1;
17121 bool found_in_bb
= false;
17122 bool redefined_in_bb
= false;
17124 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
17127 = distance_agu_use_in_bb (regno0
, insn
,
17128 distance
, BB_HEAD (e
->dest
),
17129 &found_in_bb
, &redefined_in_bb
);
17132 if (shortest_dist
< 0)
17133 shortest_dist
= bb_dist
;
17134 else if (bb_dist
> 0)
17135 shortest_dist
= MIN (bb_dist
, shortest_dist
);
17141 distance
= shortest_dist
;
17145 if (!found
|| redefined
)
17148 return distance
>> 1;
17151 /* Define this macro to tune LEA priority vs ADD, it take effect when
17152 there is a dilemma of choicing LEA or ADD
17153 Negative value: ADD is more preferred than LEA
17155 Positive value: LEA is more preferred than ADD*/
17156 #define IX86_LEA_PRIORITY 0
17158 /* Return true if usage of lea INSN has performance advantage
17159 over a sequence of instructions. Instructions sequence has
17160 SPLIT_COST cycles higher latency than lea latency. */
17163 ix86_lea_outperforms (rtx insn
, unsigned int regno0
, unsigned int regno1
,
17164 unsigned int regno2
, int split_cost
)
17166 int dist_define
, dist_use
;
17168 dist_define
= distance_non_agu_define (regno1
, regno2
, insn
);
17169 dist_use
= distance_agu_use (regno0
, insn
);
17171 if (dist_define
< 0 || dist_define
>= LEA_MAX_STALL
)
17173 /* If there is no non AGU operand definition, no AGU
17174 operand usage and split cost is 0 then both lea
17175 and non lea variants have same priority. Currently
17176 we prefer lea for 64 bit code and non lea on 32 bit
17178 if (dist_use
< 0 && split_cost
== 0)
17179 return TARGET_64BIT
|| IX86_LEA_PRIORITY
;
17184 /* With longer definitions distance lea is more preferable.
17185 Here we change it to take into account splitting cost and
17187 dist_define
+= split_cost
+ IX86_LEA_PRIORITY
;
17189 /* If there is no use in memory addess then we just check
17190 that split cost exceeds AGU stall. */
17192 return dist_define
> LEA_MAX_STALL
;
17194 /* If this insn has both backward non-agu dependence and forward
17195 agu dependence, the one with short distance takes effect. */
17196 return dist_define
>= dist_use
;
17199 /* Return true if it is legal to clobber flags by INSN and
17200 false otherwise. */
17203 ix86_ok_to_clobber_flags (rtx insn
)
17205 basic_block bb
= BLOCK_FOR_INSN (insn
);
17211 if (NONDEBUG_INSN_P (insn
))
17213 for (use
= DF_INSN_USES (insn
); *use
; use
++)
17214 if (DF_REF_REG_USE_P (*use
) && DF_REF_REGNO (*use
) == FLAGS_REG
)
17217 if (insn_defines_reg (FLAGS_REG
, INVALID_REGNUM
, insn
))
17221 if (insn
== BB_END (bb
))
17224 insn
= NEXT_INSN (insn
);
17227 live
= df_get_live_out(bb
);
17228 return !REGNO_REG_SET_P (live
, FLAGS_REG
);
17231 /* Return true if we need to split op0 = op1 + op2 into a sequence of
17232 move and add to avoid AGU stalls. */
17235 ix86_avoid_lea_for_add (rtx insn
, rtx operands
[])
17237 unsigned int regno0
, regno1
, regno2
;
17239 /* Check if we need to optimize. */
17240 if (!TARGET_OPT_AGU
|| optimize_function_for_size_p (cfun
))
17243 /* Check it is correct to split here. */
17244 if (!ix86_ok_to_clobber_flags(insn
))
17247 regno0
= true_regnum (operands
[0]);
17248 regno1
= true_regnum (operands
[1]);
17249 regno2
= true_regnum (operands
[2]);
17251 /* We need to split only adds with non destructive
17252 destination operand. */
17253 if (regno0
== regno1
|| regno0
== regno2
)
17256 return !ix86_lea_outperforms (insn
, regno0
, regno1
, regno2
, 1);
17259 /* Return true if we should emit lea instruction instead of mov
17263 ix86_use_lea_for_mov (rtx insn
, rtx operands
[])
17265 unsigned int regno0
, regno1
;
17267 /* Check if we need to optimize. */
17268 if (!TARGET_OPT_AGU
|| optimize_function_for_size_p (cfun
))
17271 /* Use lea for reg to reg moves only. */
17272 if (!REG_P (operands
[0]) || !REG_P (operands
[1]))
17275 regno0
= true_regnum (operands
[0]);
17276 regno1
= true_regnum (operands
[1]);
17278 return ix86_lea_outperforms (insn
, regno0
, regno1
, INVALID_REGNUM
, 0);
17281 /* Return true if we need to split lea into a sequence of
17282 instructions to avoid AGU stalls. */
17285 ix86_avoid_lea_for_addr (rtx insn
, rtx operands
[])
17287 unsigned int regno0
, regno1
, regno2
;
17289 struct ix86_address parts
;
17292 /* Check we need to optimize. */
17293 if (!TARGET_OPT_AGU
|| optimize_function_for_size_p (cfun
))
17296 /* Check it is correct to split here. */
17297 if (!ix86_ok_to_clobber_flags(insn
))
17300 ok
= ix86_decompose_address (operands
[1], &parts
);
17303 /* There should be at least two components in the address. */
17304 if ((parts
.base
!= NULL_RTX
) + (parts
.index
!= NULL_RTX
)
17305 + (parts
.disp
!= NULL_RTX
) + (parts
.scale
> 1) < 2)
17308 /* We should not split into add if non legitimate pic
17309 operand is used as displacement. */
17310 if (parts
.disp
&& flag_pic
&& !LEGITIMATE_PIC_OPERAND_P (parts
.disp
))
17313 regno0
= true_regnum (operands
[0]) ;
17314 regno1
= INVALID_REGNUM
;
17315 regno2
= INVALID_REGNUM
;
17318 regno1
= true_regnum (parts
.base
);
17320 regno2
= true_regnum (parts
.index
);
17324 /* Compute how many cycles we will add to execution time
17325 if split lea into a sequence of instructions. */
17326 if (parts
.base
|| parts
.index
)
17328 /* Have to use mov instruction if non desctructive
17329 destination form is used. */
17330 if (regno1
!= regno0
&& regno2
!= regno0
)
17333 /* Have to add index to base if both exist. */
17334 if (parts
.base
&& parts
.index
)
17337 /* Have to use shift and adds if scale is 2 or greater. */
17338 if (parts
.scale
> 1)
17340 if (regno0
!= regno1
)
17342 else if (regno2
== regno0
)
17345 split_cost
+= parts
.scale
;
17348 /* Have to use add instruction with immediate if
17349 disp is non zero. */
17350 if (parts
.disp
&& parts
.disp
!= const0_rtx
)
17353 /* Subtract the price of lea. */
17357 return !ix86_lea_outperforms (insn
, regno0
, regno1
, regno2
, split_cost
);
17360 /* Emit x86 binary operand CODE in mode MODE, where the first operand
17361 matches destination. RTX includes clobber of FLAGS_REG. */
17364 ix86_emit_binop (enum rtx_code code
, enum machine_mode mode
,
17369 op
= gen_rtx_SET (VOIDmode
, dst
, gen_rtx_fmt_ee (code
, mode
, dst
, src
));
17370 clob
= gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (CCmode
, FLAGS_REG
));
17372 emit_insn (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, op
, clob
)));
17375 /* Return true if regno1 def is nearest to the insn. */
17378 find_nearest_reg_def (rtx insn
, int regno1
, int regno2
)
17381 rtx start
= BB_HEAD (BLOCK_FOR_INSN (insn
));
17385 while (prev
&& prev
!= start
)
17387 if (!INSN_P (prev
) || !NONDEBUG_INSN_P (prev
))
17389 prev
= PREV_INSN (prev
);
17392 if (insn_defines_reg (regno1
, INVALID_REGNUM
, prev
))
17394 else if (insn_defines_reg (regno2
, INVALID_REGNUM
, prev
))
17396 prev
= PREV_INSN (prev
);
17399 /* None of the regs is defined in the bb. */
17403 /* Split lea instructions into a sequence of instructions
17404 which are executed on ALU to avoid AGU stalls.
17405 It is assumed that it is allowed to clobber flags register
17406 at lea position. */
17409 ix86_split_lea_for_addr (rtx insn
, rtx operands
[], enum machine_mode mode
)
17411 unsigned int regno0
, regno1
, regno2
;
17412 struct ix86_address parts
;
17416 ok
= ix86_decompose_address (operands
[1], &parts
);
17419 target
= gen_lowpart (mode
, operands
[0]);
17421 regno0
= true_regnum (target
);
17422 regno1
= INVALID_REGNUM
;
17423 regno2
= INVALID_REGNUM
;
17427 parts
.base
= gen_lowpart (mode
, parts
.base
);
17428 regno1
= true_regnum (parts
.base
);
17433 parts
.index
= gen_lowpart (mode
, parts
.index
);
17434 regno2
= true_regnum (parts
.index
);
17438 parts
.disp
= gen_lowpart (mode
, parts
.disp
);
17440 if (parts
.scale
> 1)
17442 /* Case r1 = r1 + ... */
17443 if (regno1
== regno0
)
17445 /* If we have a case r1 = r1 + C * r1 then we
17446 should use multiplication which is very
17447 expensive. Assume cost model is wrong if we
17448 have such case here. */
17449 gcc_assert (regno2
!= regno0
);
17451 for (adds
= parts
.scale
; adds
> 0; adds
--)
17452 ix86_emit_binop (PLUS
, mode
, target
, parts
.index
);
17456 /* r1 = r2 + r3 * C case. Need to move r3 into r1. */
17457 if (regno0
!= regno2
)
17458 emit_insn (gen_rtx_SET (VOIDmode
, target
, parts
.index
));
17460 /* Use shift for scaling. */
17461 ix86_emit_binop (ASHIFT
, mode
, target
,
17462 GEN_INT (exact_log2 (parts
.scale
)));
17465 ix86_emit_binop (PLUS
, mode
, target
, parts
.base
);
17467 if (parts
.disp
&& parts
.disp
!= const0_rtx
)
17468 ix86_emit_binop (PLUS
, mode
, target
, parts
.disp
);
17471 else if (!parts
.base
&& !parts
.index
)
17473 gcc_assert(parts
.disp
);
17474 emit_insn (gen_rtx_SET (VOIDmode
, target
, parts
.disp
));
17480 if (regno0
!= regno2
)
17481 emit_insn (gen_rtx_SET (VOIDmode
, target
, parts
.index
));
17483 else if (!parts
.index
)
17485 if (regno0
!= regno1
)
17486 emit_insn (gen_rtx_SET (VOIDmode
, target
, parts
.base
));
17490 if (regno0
== regno1
)
17492 else if (regno0
== regno2
)
17498 /* Find better operand for SET instruction, depending
17499 on which definition is farther from the insn. */
17500 if (find_nearest_reg_def (insn
, regno1
, regno2
))
17501 tmp
= parts
.index
, tmp1
= parts
.base
;
17503 tmp
= parts
.base
, tmp1
= parts
.index
;
17505 emit_insn (gen_rtx_SET (VOIDmode
, target
, tmp
));
17507 if (parts
.disp
&& parts
.disp
!= const0_rtx
)
17508 ix86_emit_binop (PLUS
, mode
, target
, parts
.disp
);
17510 ix86_emit_binop (PLUS
, mode
, target
, tmp1
);
17514 ix86_emit_binop (PLUS
, mode
, target
, tmp
);
17517 if (parts
.disp
&& parts
.disp
!= const0_rtx
)
17518 ix86_emit_binop (PLUS
, mode
, target
, parts
.disp
);
17522 /* Return true if it is ok to optimize an ADD operation to LEA
17523 operation to avoid flag register consumation. For most processors,
17524 ADD is faster than LEA. For the processors like ATOM, if the
17525 destination register of LEA holds an actual address which will be
17526 used soon, LEA is better and otherwise ADD is better. */
17529 ix86_lea_for_add_ok (rtx insn
, rtx operands
[])
17531 unsigned int regno0
= true_regnum (operands
[0]);
17532 unsigned int regno1
= true_regnum (operands
[1]);
17533 unsigned int regno2
= true_regnum (operands
[2]);
17535 /* If a = b + c, (a!=b && a!=c), must use lea form. */
17536 if (regno0
!= regno1
&& regno0
!= regno2
)
17539 if (!TARGET_OPT_AGU
|| optimize_function_for_size_p (cfun
))
17542 return ix86_lea_outperforms (insn
, regno0
, regno1
, regno2
, 0);
17545 /* Return true if destination reg of SET_BODY is shift count of
17549 ix86_dep_by_shift_count_body (const_rtx set_body
, const_rtx use_body
)
17555 /* Retrieve destination of SET_BODY. */
17556 switch (GET_CODE (set_body
))
17559 set_dest
= SET_DEST (set_body
);
17560 if (!set_dest
|| !REG_P (set_dest
))
17564 for (i
= XVECLEN (set_body
, 0) - 1; i
>= 0; i
--)
17565 if (ix86_dep_by_shift_count_body (XVECEXP (set_body
, 0, i
),
17573 /* Retrieve shift count of USE_BODY. */
17574 switch (GET_CODE (use_body
))
17577 shift_rtx
= XEXP (use_body
, 1);
17580 for (i
= XVECLEN (use_body
, 0) - 1; i
>= 0; i
--)
17581 if (ix86_dep_by_shift_count_body (set_body
,
17582 XVECEXP (use_body
, 0, i
)))
17590 && (GET_CODE (shift_rtx
) == ASHIFT
17591 || GET_CODE (shift_rtx
) == LSHIFTRT
17592 || GET_CODE (shift_rtx
) == ASHIFTRT
17593 || GET_CODE (shift_rtx
) == ROTATE
17594 || GET_CODE (shift_rtx
) == ROTATERT
))
17596 rtx shift_count
= XEXP (shift_rtx
, 1);
17598 /* Return true if shift count is dest of SET_BODY. */
17599 if (REG_P (shift_count
))
17601 /* Add check since it can be invoked before register
17602 allocation in pre-reload schedule. */
17603 if (reload_completed
17604 && true_regnum (set_dest
) == true_regnum (shift_count
))
17606 else if (REGNO(set_dest
) == REGNO(shift_count
))
17614 /* Return true if destination reg of SET_INSN is shift count of
17618 ix86_dep_by_shift_count (const_rtx set_insn
, const_rtx use_insn
)
17620 return ix86_dep_by_shift_count_body (PATTERN (set_insn
),
17621 PATTERN (use_insn
));
17624 /* Return TRUE or FALSE depending on whether the unary operator meets the
17625 appropriate constraints. */
17628 ix86_unary_operator_ok (enum rtx_code code ATTRIBUTE_UNUSED
,
17629 enum machine_mode mode ATTRIBUTE_UNUSED
,
17630 rtx operands
[2] ATTRIBUTE_UNUSED
)
17632 /* If one of operands is memory, source and destination must match. */
17633 if ((MEM_P (operands
[0])
17634 || MEM_P (operands
[1]))
17635 && ! rtx_equal_p (operands
[0], operands
[1]))
17640 /* Return TRUE if the operands to a vec_interleave_{high,low}v2df
17641 are ok, keeping in mind the possible movddup alternative. */
17644 ix86_vec_interleave_v2df_operator_ok (rtx operands
[3], bool high
)
17646 if (MEM_P (operands
[0]))
17647 return rtx_equal_p (operands
[0], operands
[1 + high
]);
17648 if (MEM_P (operands
[1]) && MEM_P (operands
[2]))
17649 return TARGET_SSE3
&& rtx_equal_p (operands
[1], operands
[2]);
17653 /* Post-reload splitter for converting an SF or DFmode value in an
17654 SSE register into an unsigned SImode. */
17657 ix86_split_convert_uns_si_sse (rtx operands
[])
17659 enum machine_mode vecmode
;
17660 rtx value
, large
, zero_or_two31
, input
, two31
, x
;
17662 large
= operands
[1];
17663 zero_or_two31
= operands
[2];
17664 input
= operands
[3];
17665 two31
= operands
[4];
17666 vecmode
= GET_MODE (large
);
17667 value
= gen_rtx_REG (vecmode
, REGNO (operands
[0]));
17669 /* Load up the value into the low element. We must ensure that the other
17670 elements are valid floats -- zero is the easiest such value. */
17673 if (vecmode
== V4SFmode
)
17674 emit_insn (gen_vec_setv4sf_0 (value
, CONST0_RTX (V4SFmode
), input
));
17676 emit_insn (gen_sse2_loadlpd (value
, CONST0_RTX (V2DFmode
), input
));
17680 input
= gen_rtx_REG (vecmode
, REGNO (input
));
17681 emit_move_insn (value
, CONST0_RTX (vecmode
));
17682 if (vecmode
== V4SFmode
)
17683 emit_insn (gen_sse_movss (value
, value
, input
));
17685 emit_insn (gen_sse2_movsd (value
, value
, input
));
17688 emit_move_insn (large
, two31
);
17689 emit_move_insn (zero_or_two31
, MEM_P (two31
) ? large
: two31
);
17691 x
= gen_rtx_fmt_ee (LE
, vecmode
, large
, value
);
17692 emit_insn (gen_rtx_SET (VOIDmode
, large
, x
));
17694 x
= gen_rtx_AND (vecmode
, zero_or_two31
, large
);
17695 emit_insn (gen_rtx_SET (VOIDmode
, zero_or_two31
, x
));
17697 x
= gen_rtx_MINUS (vecmode
, value
, zero_or_two31
);
17698 emit_insn (gen_rtx_SET (VOIDmode
, value
, x
));
17700 large
= gen_rtx_REG (V4SImode
, REGNO (large
));
17701 emit_insn (gen_ashlv4si3 (large
, large
, GEN_INT (31)));
17703 x
= gen_rtx_REG (V4SImode
, REGNO (value
));
17704 if (vecmode
== V4SFmode
)
17705 emit_insn (gen_fix_truncv4sfv4si2 (x
, value
));
17707 emit_insn (gen_sse2_cvttpd2dq (x
, value
));
17710 emit_insn (gen_xorv4si3 (value
, value
, large
));
17713 /* Convert an unsigned DImode value into a DFmode, using only SSE.
17714 Expects the 64-bit DImode to be supplied in a pair of integral
17715 registers. Requires SSE2; will use SSE3 if available. For x86_32,
17716 -mfpmath=sse, !optimize_size only. */
17719 ix86_expand_convert_uns_didf_sse (rtx target
, rtx input
)
17721 REAL_VALUE_TYPE bias_lo_rvt
, bias_hi_rvt
;
17722 rtx int_xmm
, fp_xmm
;
17723 rtx biases
, exponents
;
17726 int_xmm
= gen_reg_rtx (V4SImode
);
17727 if (TARGET_INTER_UNIT_MOVES
)
17728 emit_insn (gen_movdi_to_sse (int_xmm
, input
));
17729 else if (TARGET_SSE_SPLIT_REGS
)
17731 emit_clobber (int_xmm
);
17732 emit_move_insn (gen_lowpart (DImode
, int_xmm
), input
);
17736 x
= gen_reg_rtx (V2DImode
);
17737 ix86_expand_vector_init_one_nonzero (false, V2DImode
, x
, input
, 0);
17738 emit_move_insn (int_xmm
, gen_lowpart (V4SImode
, x
));
17741 x
= gen_rtx_CONST_VECTOR (V4SImode
,
17742 gen_rtvec (4, GEN_INT (0x43300000UL
),
17743 GEN_INT (0x45300000UL
),
17744 const0_rtx
, const0_rtx
));
17745 exponents
= validize_mem (force_const_mem (V4SImode
, x
));
17747 /* int_xmm = {0x45300000UL, fp_xmm/hi, 0x43300000, fp_xmm/lo } */
17748 emit_insn (gen_vec_interleave_lowv4si (int_xmm
, int_xmm
, exponents
));
17750 /* Concatenating (juxtaposing) (0x43300000UL ## fp_value_low_xmm)
17751 yields a valid DF value equal to (0x1.0p52 + double(fp_value_lo_xmm)).
17752 Similarly (0x45300000UL ## fp_value_hi_xmm) yields
17753 (0x1.0p84 + double(fp_value_hi_xmm)).
17754 Note these exponents differ by 32. */
17756 fp_xmm
= copy_to_mode_reg (V2DFmode
, gen_lowpart (V2DFmode
, int_xmm
));
17758 /* Subtract off those 0x1.0p52 and 0x1.0p84 biases, to produce values
17759 in [0,2**32-1] and [0]+[2**32,2**64-1] respectively. */
17760 real_ldexp (&bias_lo_rvt
, &dconst1
, 52);
17761 real_ldexp (&bias_hi_rvt
, &dconst1
, 84);
17762 biases
= const_double_from_real_value (bias_lo_rvt
, DFmode
);
17763 x
= const_double_from_real_value (bias_hi_rvt
, DFmode
);
17764 biases
= gen_rtx_CONST_VECTOR (V2DFmode
, gen_rtvec (2, biases
, x
));
17765 biases
= validize_mem (force_const_mem (V2DFmode
, biases
));
17766 emit_insn (gen_subv2df3 (fp_xmm
, fp_xmm
, biases
));
17768 /* Add the upper and lower DFmode values together. */
17770 emit_insn (gen_sse3_haddv2df3 (fp_xmm
, fp_xmm
, fp_xmm
));
17773 x
= copy_to_mode_reg (V2DFmode
, fp_xmm
);
17774 emit_insn (gen_vec_interleave_highv2df (fp_xmm
, fp_xmm
, fp_xmm
));
17775 emit_insn (gen_addv2df3 (fp_xmm
, fp_xmm
, x
));
17778 ix86_expand_vector_extract (false, target
, fp_xmm
, 0);
17781 /* Not used, but eases macroization of patterns. */
17783 ix86_expand_convert_uns_sixf_sse (rtx target ATTRIBUTE_UNUSED
,
17784 rtx input ATTRIBUTE_UNUSED
)
17786 gcc_unreachable ();
17789 /* Convert an unsigned SImode value into a DFmode. Only currently used
17790 for SSE, but applicable anywhere. */
17793 ix86_expand_convert_uns_sidf_sse (rtx target
, rtx input
)
17795 REAL_VALUE_TYPE TWO31r
;
17798 x
= expand_simple_binop (SImode
, PLUS
, input
, GEN_INT (-2147483647 - 1),
17799 NULL
, 1, OPTAB_DIRECT
);
17801 fp
= gen_reg_rtx (DFmode
);
17802 emit_insn (gen_floatsidf2 (fp
, x
));
17804 real_ldexp (&TWO31r
, &dconst1
, 31);
17805 x
= const_double_from_real_value (TWO31r
, DFmode
);
17807 x
= expand_simple_binop (DFmode
, PLUS
, fp
, x
, target
, 0, OPTAB_DIRECT
);
17809 emit_move_insn (target
, x
);
17812 /* Convert a signed DImode value into a DFmode. Only used for SSE in
17813 32-bit mode; otherwise we have a direct convert instruction. */
17816 ix86_expand_convert_sign_didf_sse (rtx target
, rtx input
)
17818 REAL_VALUE_TYPE TWO32r
;
17819 rtx fp_lo
, fp_hi
, x
;
17821 fp_lo
= gen_reg_rtx (DFmode
);
17822 fp_hi
= gen_reg_rtx (DFmode
);
17824 emit_insn (gen_floatsidf2 (fp_hi
, gen_highpart (SImode
, input
)));
17826 real_ldexp (&TWO32r
, &dconst1
, 32);
17827 x
= const_double_from_real_value (TWO32r
, DFmode
);
17828 fp_hi
= expand_simple_binop (DFmode
, MULT
, fp_hi
, x
, fp_hi
, 0, OPTAB_DIRECT
);
17830 ix86_expand_convert_uns_sidf_sse (fp_lo
, gen_lowpart (SImode
, input
));
17832 x
= expand_simple_binop (DFmode
, PLUS
, fp_hi
, fp_lo
, target
,
17835 emit_move_insn (target
, x
);
17838 /* Convert an unsigned SImode value into a SFmode, using only SSE.
17839 For x86_32, -mfpmath=sse, !optimize_size only. */
17841 ix86_expand_convert_uns_sisf_sse (rtx target
, rtx input
)
17843 REAL_VALUE_TYPE ONE16r
;
17844 rtx fp_hi
, fp_lo
, int_hi
, int_lo
, x
;
17846 real_ldexp (&ONE16r
, &dconst1
, 16);
17847 x
= const_double_from_real_value (ONE16r
, SFmode
);
17848 int_lo
= expand_simple_binop (SImode
, AND
, input
, GEN_INT(0xffff),
17849 NULL
, 0, OPTAB_DIRECT
);
17850 int_hi
= expand_simple_binop (SImode
, LSHIFTRT
, input
, GEN_INT(16),
17851 NULL
, 0, OPTAB_DIRECT
);
17852 fp_hi
= gen_reg_rtx (SFmode
);
17853 fp_lo
= gen_reg_rtx (SFmode
);
17854 emit_insn (gen_floatsisf2 (fp_hi
, int_hi
));
17855 emit_insn (gen_floatsisf2 (fp_lo
, int_lo
));
17856 fp_hi
= expand_simple_binop (SFmode
, MULT
, fp_hi
, x
, fp_hi
,
17858 fp_hi
= expand_simple_binop (SFmode
, PLUS
, fp_hi
, fp_lo
, target
,
17860 if (!rtx_equal_p (target
, fp_hi
))
17861 emit_move_insn (target
, fp_hi
);
17864 /* floatunsv{4,8}siv{4,8}sf2 expander. Expand code to convert
17865 a vector of unsigned ints VAL to vector of floats TARGET. */
17868 ix86_expand_vector_convert_uns_vsivsf (rtx target
, rtx val
)
17871 REAL_VALUE_TYPE TWO16r
;
17872 enum machine_mode intmode
= GET_MODE (val
);
17873 enum machine_mode fltmode
= GET_MODE (target
);
17874 rtx (*cvt
) (rtx
, rtx
);
17876 if (intmode
== V4SImode
)
17877 cvt
= gen_floatv4siv4sf2
;
17879 cvt
= gen_floatv8siv8sf2
;
17880 tmp
[0] = ix86_build_const_vector (intmode
, 1, GEN_INT (0xffff));
17881 tmp
[0] = force_reg (intmode
, tmp
[0]);
17882 tmp
[1] = expand_simple_binop (intmode
, AND
, val
, tmp
[0], NULL_RTX
, 1,
17884 tmp
[2] = expand_simple_binop (intmode
, LSHIFTRT
, val
, GEN_INT (16),
17885 NULL_RTX
, 1, OPTAB_DIRECT
);
17886 tmp
[3] = gen_reg_rtx (fltmode
);
17887 emit_insn (cvt (tmp
[3], tmp
[1]));
17888 tmp
[4] = gen_reg_rtx (fltmode
);
17889 emit_insn (cvt (tmp
[4], tmp
[2]));
17890 real_ldexp (&TWO16r
, &dconst1
, 16);
17891 tmp
[5] = const_double_from_real_value (TWO16r
, SFmode
);
17892 tmp
[5] = force_reg (fltmode
, ix86_build_const_vector (fltmode
, 1, tmp
[5]));
17893 tmp
[6] = expand_simple_binop (fltmode
, MULT
, tmp
[4], tmp
[5], NULL_RTX
, 1,
17895 tmp
[7] = expand_simple_binop (fltmode
, PLUS
, tmp
[3], tmp
[6], target
, 1,
17897 if (tmp
[7] != target
)
17898 emit_move_insn (target
, tmp
[7]);
17901 /* Adjust a V*SFmode/V*DFmode value VAL so that *sfix_trunc* resp. fix_trunc*
17902 pattern can be used on it instead of *ufix_trunc* resp. fixuns_trunc*.
17903 This is done by doing just signed conversion if < 0x1p31, and otherwise by
17904 subtracting 0x1p31 first and xoring in 0x80000000 from *XORP afterwards. */
17907 ix86_expand_adjust_ufix_to_sfix_si (rtx val
, rtx
*xorp
)
17909 REAL_VALUE_TYPE TWO31r
;
17910 rtx two31r
, tmp
[4];
17911 enum machine_mode mode
= GET_MODE (val
);
17912 enum machine_mode scalarmode
= GET_MODE_INNER (mode
);
17913 enum machine_mode intmode
= GET_MODE_SIZE (mode
) == 32 ? V8SImode
: V4SImode
;
17914 rtx (*cmp
) (rtx
, rtx
, rtx
, rtx
);
17917 for (i
= 0; i
< 3; i
++)
17918 tmp
[i
] = gen_reg_rtx (mode
);
17919 real_ldexp (&TWO31r
, &dconst1
, 31);
17920 two31r
= const_double_from_real_value (TWO31r
, scalarmode
);
17921 two31r
= ix86_build_const_vector (mode
, 1, two31r
);
17922 two31r
= force_reg (mode
, two31r
);
17925 case V8SFmode
: cmp
= gen_avx_maskcmpv8sf3
; break;
17926 case V4SFmode
: cmp
= gen_sse_maskcmpv4sf3
; break;
17927 case V4DFmode
: cmp
= gen_avx_maskcmpv4df3
; break;
17928 case V2DFmode
: cmp
= gen_sse2_maskcmpv2df3
; break;
17929 default: gcc_unreachable ();
17931 tmp
[3] = gen_rtx_LE (mode
, two31r
, val
);
17932 emit_insn (cmp (tmp
[0], two31r
, val
, tmp
[3]));
17933 tmp
[1] = expand_simple_binop (mode
, AND
, tmp
[0], two31r
, tmp
[1],
17935 if (intmode
== V4SImode
|| TARGET_AVX2
)
17936 *xorp
= expand_simple_binop (intmode
, ASHIFT
,
17937 gen_lowpart (intmode
, tmp
[0]),
17938 GEN_INT (31), NULL_RTX
, 0,
17942 rtx two31
= GEN_INT ((unsigned HOST_WIDE_INT
) 1 << 31);
17943 two31
= ix86_build_const_vector (intmode
, 1, two31
);
17944 *xorp
= expand_simple_binop (intmode
, AND
,
17945 gen_lowpart (intmode
, tmp
[0]),
17946 two31
, NULL_RTX
, 0,
17949 return expand_simple_binop (mode
, MINUS
, val
, tmp
[1], tmp
[2],
17953 /* A subroutine of ix86_build_signbit_mask. If VECT is true,
17954 then replicate the value for all elements of the vector
17958 ix86_build_const_vector (enum machine_mode mode
, bool vect
, rtx value
)
17962 enum machine_mode scalar_mode
;
17979 n_elt
= GET_MODE_NUNITS (mode
);
17980 v
= rtvec_alloc (n_elt
);
17981 scalar_mode
= GET_MODE_INNER (mode
);
17983 RTVEC_ELT (v
, 0) = value
;
17985 for (i
= 1; i
< n_elt
; ++i
)
17986 RTVEC_ELT (v
, i
) = vect
? value
: CONST0_RTX (scalar_mode
);
17988 return gen_rtx_CONST_VECTOR (mode
, v
);
17991 gcc_unreachable ();
17995 /* A subroutine of ix86_expand_fp_absneg_operator, copysign expanders
17996 and ix86_expand_int_vcond. Create a mask for the sign bit in MODE
17997 for an SSE register. If VECT is true, then replicate the mask for
17998 all elements of the vector register. If INVERT is true, then create
17999 a mask excluding the sign bit. */
18002 ix86_build_signbit_mask (enum machine_mode mode
, bool vect
, bool invert
)
18004 enum machine_mode vec_mode
, imode
;
18005 HOST_WIDE_INT hi
, lo
;
18010 /* Find the sign bit, sign extended to 2*HWI. */
18018 mode
= GET_MODE_INNER (mode
);
18020 lo
= 0x80000000, hi
= lo
< 0;
18028 mode
= GET_MODE_INNER (mode
);
18030 if (HOST_BITS_PER_WIDE_INT
>= 64)
18031 lo
= (HOST_WIDE_INT
)1 << shift
, hi
= -1;
18033 lo
= 0, hi
= (HOST_WIDE_INT
)1 << (shift
- HOST_BITS_PER_WIDE_INT
);
18038 vec_mode
= VOIDmode
;
18039 if (HOST_BITS_PER_WIDE_INT
>= 64)
18042 lo
= 0, hi
= (HOST_WIDE_INT
)1 << shift
;
18049 lo
= 0, hi
= (HOST_WIDE_INT
)1 << (shift
- HOST_BITS_PER_WIDE_INT
);
18053 lo
= ~lo
, hi
= ~hi
;
18059 mask
= immed_double_const (lo
, hi
, imode
);
18061 vec
= gen_rtvec (2, v
, mask
);
18062 v
= gen_rtx_CONST_VECTOR (V2DImode
, vec
);
18063 v
= copy_to_mode_reg (mode
, gen_lowpart (mode
, v
));
18070 gcc_unreachable ();
18074 lo
= ~lo
, hi
= ~hi
;
18076 /* Force this value into the low part of a fp vector constant. */
18077 mask
= immed_double_const (lo
, hi
, imode
);
18078 mask
= gen_lowpart (mode
, mask
);
18080 if (vec_mode
== VOIDmode
)
18081 return force_reg (mode
, mask
);
18083 v
= ix86_build_const_vector (vec_mode
, vect
, mask
);
18084 return force_reg (vec_mode
, v
);
18087 /* Generate code for floating point ABS or NEG. */
18090 ix86_expand_fp_absneg_operator (enum rtx_code code
, enum machine_mode mode
,
18093 rtx mask
, set
, dst
, src
;
18094 bool use_sse
= false;
18095 bool vector_mode
= VECTOR_MODE_P (mode
);
18096 enum machine_mode vmode
= mode
;
18100 else if (mode
== TFmode
)
18102 else if (TARGET_SSE_MATH
)
18104 use_sse
= SSE_FLOAT_MODE_P (mode
);
18105 if (mode
== SFmode
)
18107 else if (mode
== DFmode
)
18111 /* NEG and ABS performed with SSE use bitwise mask operations.
18112 Create the appropriate mask now. */
18114 mask
= ix86_build_signbit_mask (vmode
, vector_mode
, code
== ABS
);
18121 set
= gen_rtx_fmt_e (code
, mode
, src
);
18122 set
= gen_rtx_SET (VOIDmode
, dst
, set
);
18129 use
= gen_rtx_USE (VOIDmode
, mask
);
18131 par
= gen_rtvec (2, set
, use
);
18134 clob
= gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (CCmode
, FLAGS_REG
));
18135 par
= gen_rtvec (3, set
, use
, clob
);
18137 emit_insn (gen_rtx_PARALLEL (VOIDmode
, par
));
18143 /* Expand a copysign operation. Special case operand 0 being a constant. */
18146 ix86_expand_copysign (rtx operands
[])
18148 enum machine_mode mode
, vmode
;
18149 rtx dest
, op0
, op1
, mask
, nmask
;
18151 dest
= operands
[0];
18155 mode
= GET_MODE (dest
);
18157 if (mode
== SFmode
)
18159 else if (mode
== DFmode
)
18164 if (GET_CODE (op0
) == CONST_DOUBLE
)
18166 rtx (*copysign_insn
)(rtx
, rtx
, rtx
, rtx
);
18168 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0
)))
18169 op0
= simplify_unary_operation (ABS
, mode
, op0
, mode
);
18171 if (mode
== SFmode
|| mode
== DFmode
)
18173 if (op0
== CONST0_RTX (mode
))
18174 op0
= CONST0_RTX (vmode
);
18177 rtx v
= ix86_build_const_vector (vmode
, false, op0
);
18179 op0
= force_reg (vmode
, v
);
18182 else if (op0
!= CONST0_RTX (mode
))
18183 op0
= force_reg (mode
, op0
);
18185 mask
= ix86_build_signbit_mask (vmode
, 0, 0);
18187 if (mode
== SFmode
)
18188 copysign_insn
= gen_copysignsf3_const
;
18189 else if (mode
== DFmode
)
18190 copysign_insn
= gen_copysigndf3_const
;
18192 copysign_insn
= gen_copysigntf3_const
;
18194 emit_insn (copysign_insn (dest
, op0
, op1
, mask
));
18198 rtx (*copysign_insn
)(rtx
, rtx
, rtx
, rtx
, rtx
, rtx
);
18200 nmask
= ix86_build_signbit_mask (vmode
, 0, 1);
18201 mask
= ix86_build_signbit_mask (vmode
, 0, 0);
18203 if (mode
== SFmode
)
18204 copysign_insn
= gen_copysignsf3_var
;
18205 else if (mode
== DFmode
)
18206 copysign_insn
= gen_copysigndf3_var
;
18208 copysign_insn
= gen_copysigntf3_var
;
18210 emit_insn (copysign_insn (dest
, NULL_RTX
, op0
, op1
, nmask
, mask
));
18214 /* Deconstruct a copysign operation into bit masks. Operand 0 is known to
18215 be a constant, and so has already been expanded into a vector constant. */
18218 ix86_split_copysign_const (rtx operands
[])
18220 enum machine_mode mode
, vmode
;
18221 rtx dest
, op0
, mask
, x
;
18223 dest
= operands
[0];
18225 mask
= operands
[3];
18227 mode
= GET_MODE (dest
);
18228 vmode
= GET_MODE (mask
);
18230 dest
= simplify_gen_subreg (vmode
, dest
, mode
, 0);
18231 x
= gen_rtx_AND (vmode
, dest
, mask
);
18232 emit_insn (gen_rtx_SET (VOIDmode
, dest
, x
));
18234 if (op0
!= CONST0_RTX (vmode
))
18236 x
= gen_rtx_IOR (vmode
, dest
, op0
);
18237 emit_insn (gen_rtx_SET (VOIDmode
, dest
, x
));
18241 /* Deconstruct a copysign operation into bit masks. Operand 0 is variable,
18242 so we have to do two masks. */
18245 ix86_split_copysign_var (rtx operands
[])
18247 enum machine_mode mode
, vmode
;
18248 rtx dest
, scratch
, op0
, op1
, mask
, nmask
, x
;
18250 dest
= operands
[0];
18251 scratch
= operands
[1];
18254 nmask
= operands
[4];
18255 mask
= operands
[5];
18257 mode
= GET_MODE (dest
);
18258 vmode
= GET_MODE (mask
);
18260 if (rtx_equal_p (op0
, op1
))
18262 /* Shouldn't happen often (it's useless, obviously), but when it does
18263 we'd generate incorrect code if we continue below. */
18264 emit_move_insn (dest
, op0
);
18268 if (REG_P (mask
) && REGNO (dest
) == REGNO (mask
)) /* alternative 0 */
18270 gcc_assert (REGNO (op1
) == REGNO (scratch
));
18272 x
= gen_rtx_AND (vmode
, scratch
, mask
);
18273 emit_insn (gen_rtx_SET (VOIDmode
, scratch
, x
));
18276 op0
= simplify_gen_subreg (vmode
, op0
, mode
, 0);
18277 x
= gen_rtx_NOT (vmode
, dest
);
18278 x
= gen_rtx_AND (vmode
, x
, op0
);
18279 emit_insn (gen_rtx_SET (VOIDmode
, dest
, x
));
18283 if (REGNO (op1
) == REGNO (scratch
)) /* alternative 1,3 */
18285 x
= gen_rtx_AND (vmode
, scratch
, mask
);
18287 else /* alternative 2,4 */
18289 gcc_assert (REGNO (mask
) == REGNO (scratch
));
18290 op1
= simplify_gen_subreg (vmode
, op1
, mode
, 0);
18291 x
= gen_rtx_AND (vmode
, scratch
, op1
);
18293 emit_insn (gen_rtx_SET (VOIDmode
, scratch
, x
));
18295 if (REGNO (op0
) == REGNO (dest
)) /* alternative 1,2 */
18297 dest
= simplify_gen_subreg (vmode
, op0
, mode
, 0);
18298 x
= gen_rtx_AND (vmode
, dest
, nmask
);
18300 else /* alternative 3,4 */
18302 gcc_assert (REGNO (nmask
) == REGNO (dest
));
18304 op0
= simplify_gen_subreg (vmode
, op0
, mode
, 0);
18305 x
= gen_rtx_AND (vmode
, dest
, op0
);
18307 emit_insn (gen_rtx_SET (VOIDmode
, dest
, x
));
18310 x
= gen_rtx_IOR (vmode
, dest
, scratch
);
18311 emit_insn (gen_rtx_SET (VOIDmode
, dest
, x
));
18314 /* Return TRUE or FALSE depending on whether the first SET in INSN
18315 has source and destination with matching CC modes, and that the
18316 CC mode is at least as constrained as REQ_MODE. */
18319 ix86_match_ccmode (rtx insn
, enum machine_mode req_mode
)
18322 enum machine_mode set_mode
;
18324 set
= PATTERN (insn
);
18325 if (GET_CODE (set
) == PARALLEL
)
18326 set
= XVECEXP (set
, 0, 0);
18327 gcc_assert (GET_CODE (set
) == SET
);
18328 gcc_assert (GET_CODE (SET_SRC (set
)) == COMPARE
);
18330 set_mode
= GET_MODE (SET_DEST (set
));
18334 if (req_mode
!= CCNOmode
18335 && (req_mode
!= CCmode
18336 || XEXP (SET_SRC (set
), 1) != const0_rtx
))
18340 if (req_mode
== CCGCmode
)
18344 if (req_mode
== CCGOCmode
|| req_mode
== CCNOmode
)
18348 if (req_mode
== CCZmode
)
18358 if (set_mode
!= req_mode
)
18363 gcc_unreachable ();
18366 return GET_MODE (SET_SRC (set
)) == set_mode
;
18369 /* Generate insn patterns to do an integer compare of OPERANDS. */
18372 ix86_expand_int_compare (enum rtx_code code
, rtx op0
, rtx op1
)
18374 enum machine_mode cmpmode
;
18377 cmpmode
= SELECT_CC_MODE (code
, op0
, op1
);
18378 flags
= gen_rtx_REG (cmpmode
, FLAGS_REG
);
18380 /* This is very simple, but making the interface the same as in the
18381 FP case makes the rest of the code easier. */
18382 tmp
= gen_rtx_COMPARE (cmpmode
, op0
, op1
);
18383 emit_insn (gen_rtx_SET (VOIDmode
, flags
, tmp
));
18385 /* Return the test that should be put into the flags user, i.e.
18386 the bcc, scc, or cmov instruction. */
18387 return gen_rtx_fmt_ee (code
, VOIDmode
, flags
, const0_rtx
);
18390 /* Figure out whether to use ordered or unordered fp comparisons.
18391 Return the appropriate mode to use. */
18394 ix86_fp_compare_mode (enum rtx_code code ATTRIBUTE_UNUSED
)
18396 /* ??? In order to make all comparisons reversible, we do all comparisons
18397 non-trapping when compiling for IEEE. Once gcc is able to distinguish
18398 all forms trapping and nontrapping comparisons, we can make inequality
18399 comparisons trapping again, since it results in better code when using
18400 FCOM based compares. */
18401 return TARGET_IEEE_FP
? CCFPUmode
: CCFPmode
;
18405 ix86_cc_mode (enum rtx_code code
, rtx op0
, rtx op1
)
18407 enum machine_mode mode
= GET_MODE (op0
);
18409 if (SCALAR_FLOAT_MODE_P (mode
))
18411 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode
));
18412 return ix86_fp_compare_mode (code
);
18417 /* Only zero flag is needed. */
18418 case EQ
: /* ZF=0 */
18419 case NE
: /* ZF!=0 */
18421 /* Codes needing carry flag. */
18422 case GEU
: /* CF=0 */
18423 case LTU
: /* CF=1 */
18424 /* Detect overflow checks. They need just the carry flag. */
18425 if (GET_CODE (op0
) == PLUS
18426 && rtx_equal_p (op1
, XEXP (op0
, 0)))
18430 case GTU
: /* CF=0 & ZF=0 */
18431 case LEU
: /* CF=1 | ZF=1 */
18432 /* Detect overflow checks. They need just the carry flag. */
18433 if (GET_CODE (op0
) == MINUS
18434 && rtx_equal_p (op1
, XEXP (op0
, 0)))
18438 /* Codes possibly doable only with sign flag when
18439 comparing against zero. */
18440 case GE
: /* SF=OF or SF=0 */
18441 case LT
: /* SF<>OF or SF=1 */
18442 if (op1
== const0_rtx
)
18445 /* For other cases Carry flag is not required. */
18447 /* Codes doable only with sign flag when comparing
18448 against zero, but we miss jump instruction for it
18449 so we need to use relational tests against overflow
18450 that thus needs to be zero. */
18451 case GT
: /* ZF=0 & SF=OF */
18452 case LE
: /* ZF=1 | SF<>OF */
18453 if (op1
== const0_rtx
)
18457 /* strcmp pattern do (use flags) and combine may ask us for proper
18462 gcc_unreachable ();
18466 /* Return the fixed registers used for condition codes. */
18469 ix86_fixed_condition_code_regs (unsigned int *p1
, unsigned int *p2
)
18476 /* If two condition code modes are compatible, return a condition code
18477 mode which is compatible with both. Otherwise, return
18480 static enum machine_mode
18481 ix86_cc_modes_compatible (enum machine_mode m1
, enum machine_mode m2
)
18486 if (GET_MODE_CLASS (m1
) != MODE_CC
|| GET_MODE_CLASS (m2
) != MODE_CC
)
18489 if ((m1
== CCGCmode
&& m2
== CCGOCmode
)
18490 || (m1
== CCGOCmode
&& m2
== CCGCmode
))
18493 if (m1
== CCZmode
&& (m2
== CCGCmode
|| m2
== CCGOCmode
))
18495 else if (m2
== CCZmode
&& (m1
== CCGCmode
|| m1
== CCGOCmode
))
18501 gcc_unreachable ();
18531 /* These are only compatible with themselves, which we already
18538 /* Return a comparison we can do and that it is equivalent to
18539 swap_condition (code) apart possibly from orderedness.
18540 But, never change orderedness if TARGET_IEEE_FP, returning
18541 UNKNOWN in that case if necessary. */
18543 static enum rtx_code
18544 ix86_fp_swap_condition (enum rtx_code code
)
18548 case GT
: /* GTU - CF=0 & ZF=0 */
18549 return TARGET_IEEE_FP
? UNKNOWN
: UNLT
;
18550 case GE
: /* GEU - CF=0 */
18551 return TARGET_IEEE_FP
? UNKNOWN
: UNLE
;
18552 case UNLT
: /* LTU - CF=1 */
18553 return TARGET_IEEE_FP
? UNKNOWN
: GT
;
18554 case UNLE
: /* LEU - CF=1 | ZF=1 */
18555 return TARGET_IEEE_FP
? UNKNOWN
: GE
;
18557 return swap_condition (code
);
18561 /* Return cost of comparison CODE using the best strategy for performance.
18562 All following functions do use number of instructions as a cost metrics.
18563 In future this should be tweaked to compute bytes for optimize_size and
18564 take into account performance of various instructions on various CPUs. */
18567 ix86_fp_comparison_cost (enum rtx_code code
)
18571 /* The cost of code using bit-twiddling on %ah. */
18588 arith_cost
= TARGET_IEEE_FP
? 5 : 4;
18592 arith_cost
= TARGET_IEEE_FP
? 6 : 4;
18595 gcc_unreachable ();
18598 switch (ix86_fp_comparison_strategy (code
))
18600 case IX86_FPCMP_COMI
:
18601 return arith_cost
> 4 ? 3 : 2;
18602 case IX86_FPCMP_SAHF
:
18603 return arith_cost
> 4 ? 4 : 3;
18609 /* Return strategy to use for floating-point. We assume that fcomi is always
18610 preferrable where available, since that is also true when looking at size
18611 (2 bytes, vs. 3 for fnstsw+sahf and at least 5 for fnstsw+test). */
18613 enum ix86_fpcmp_strategy
18614 ix86_fp_comparison_strategy (enum rtx_code code ATTRIBUTE_UNUSED
)
18616 /* Do fcomi/sahf based test when profitable. */
18619 return IX86_FPCMP_COMI
;
18621 if (TARGET_SAHF
&& (TARGET_USE_SAHF
|| optimize_function_for_size_p (cfun
)))
18622 return IX86_FPCMP_SAHF
;
18624 return IX86_FPCMP_ARITH
;
18627 /* Swap, force into registers, or otherwise massage the two operands
18628 to a fp comparison. The operands are updated in place; the new
18629 comparison code is returned. */
18631 static enum rtx_code
18632 ix86_prepare_fp_compare_args (enum rtx_code code
, rtx
*pop0
, rtx
*pop1
)
18634 enum machine_mode fpcmp_mode
= ix86_fp_compare_mode (code
);
18635 rtx op0
= *pop0
, op1
= *pop1
;
18636 enum machine_mode op_mode
= GET_MODE (op0
);
18637 int is_sse
= TARGET_SSE_MATH
&& SSE_FLOAT_MODE_P (op_mode
);
18639 /* All of the unordered compare instructions only work on registers.
18640 The same is true of the fcomi compare instructions. The XFmode
18641 compare instructions require registers except when comparing
18642 against zero or when converting operand 1 from fixed point to
18646 && (fpcmp_mode
== CCFPUmode
18647 || (op_mode
== XFmode
18648 && ! (standard_80387_constant_p (op0
) == 1
18649 || standard_80387_constant_p (op1
) == 1)
18650 && GET_CODE (op1
) != FLOAT
)
18651 || ix86_fp_comparison_strategy (code
) == IX86_FPCMP_COMI
))
18653 op0
= force_reg (op_mode
, op0
);
18654 op1
= force_reg (op_mode
, op1
);
18658 /* %%% We only allow op1 in memory; op0 must be st(0). So swap
18659 things around if they appear profitable, otherwise force op0
18660 into a register. */
18662 if (standard_80387_constant_p (op0
) == 0
18664 && ! (standard_80387_constant_p (op1
) == 0
18667 enum rtx_code new_code
= ix86_fp_swap_condition (code
);
18668 if (new_code
!= UNKNOWN
)
18671 tmp
= op0
, op0
= op1
, op1
= tmp
;
18677 op0
= force_reg (op_mode
, op0
);
18679 if (CONSTANT_P (op1
))
18681 int tmp
= standard_80387_constant_p (op1
);
18683 op1
= validize_mem (force_const_mem (op_mode
, op1
));
18687 op1
= force_reg (op_mode
, op1
);
18690 op1
= force_reg (op_mode
, op1
);
18694 /* Try to rearrange the comparison to make it cheaper. */
18695 if (ix86_fp_comparison_cost (code
)
18696 > ix86_fp_comparison_cost (swap_condition (code
))
18697 && (REG_P (op1
) || can_create_pseudo_p ()))
18700 tmp
= op0
, op0
= op1
, op1
= tmp
;
18701 code
= swap_condition (code
);
18703 op0
= force_reg (op_mode
, op0
);
18711 /* Convert comparison codes we use to represent FP comparison to integer
18712 code that will result in proper branch. Return UNKNOWN if no such code
18716 ix86_fp_compare_code_to_integer (enum rtx_code code
)
18745 /* Generate insn patterns to do a floating point compare of OPERANDS. */
18748 ix86_expand_fp_compare (enum rtx_code code
, rtx op0
, rtx op1
, rtx scratch
)
18750 enum machine_mode fpcmp_mode
, intcmp_mode
;
18753 fpcmp_mode
= ix86_fp_compare_mode (code
);
18754 code
= ix86_prepare_fp_compare_args (code
, &op0
, &op1
);
18756 /* Do fcomi/sahf based test when profitable. */
18757 switch (ix86_fp_comparison_strategy (code
))
18759 case IX86_FPCMP_COMI
:
18760 intcmp_mode
= fpcmp_mode
;
18761 tmp
= gen_rtx_COMPARE (fpcmp_mode
, op0
, op1
);
18762 tmp
= gen_rtx_SET (VOIDmode
, gen_rtx_REG (fpcmp_mode
, FLAGS_REG
),
18767 case IX86_FPCMP_SAHF
:
18768 intcmp_mode
= fpcmp_mode
;
18769 tmp
= gen_rtx_COMPARE (fpcmp_mode
, op0
, op1
);
18770 tmp
= gen_rtx_SET (VOIDmode
, gen_rtx_REG (fpcmp_mode
, FLAGS_REG
),
18774 scratch
= gen_reg_rtx (HImode
);
18775 tmp2
= gen_rtx_CLOBBER (VOIDmode
, scratch
);
18776 emit_insn (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, tmp
, tmp2
)));
18779 case IX86_FPCMP_ARITH
:
18780 /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */
18781 tmp
= gen_rtx_COMPARE (fpcmp_mode
, op0
, op1
);
18782 tmp2
= gen_rtx_UNSPEC (HImode
, gen_rtvec (1, tmp
), UNSPEC_FNSTSW
);
18784 scratch
= gen_reg_rtx (HImode
);
18785 emit_insn (gen_rtx_SET (VOIDmode
, scratch
, tmp2
));
18787 /* In the unordered case, we have to check C2 for NaN's, which
18788 doesn't happen to work out to anything nice combination-wise.
18789 So do some bit twiddling on the value we've got in AH to come
18790 up with an appropriate set of condition codes. */
18792 intcmp_mode
= CCNOmode
;
18797 if (code
== GT
|| !TARGET_IEEE_FP
)
18799 emit_insn (gen_testqi_ext_ccno_0 (scratch
, GEN_INT (0x45)));
18804 emit_insn (gen_andqi_ext_0 (scratch
, scratch
, GEN_INT (0x45)));
18805 emit_insn (gen_addqi_ext_1 (scratch
, scratch
, constm1_rtx
));
18806 emit_insn (gen_cmpqi_ext_3 (scratch
, GEN_INT (0x44)));
18807 intcmp_mode
= CCmode
;
18813 if (code
== LT
&& TARGET_IEEE_FP
)
18815 emit_insn (gen_andqi_ext_0 (scratch
, scratch
, GEN_INT (0x45)));
18816 emit_insn (gen_cmpqi_ext_3 (scratch
, const1_rtx
));
18817 intcmp_mode
= CCmode
;
18822 emit_insn (gen_testqi_ext_ccno_0 (scratch
, const1_rtx
));
18828 if (code
== GE
|| !TARGET_IEEE_FP
)
18830 emit_insn (gen_testqi_ext_ccno_0 (scratch
, GEN_INT (0x05)));
18835 emit_insn (gen_andqi_ext_0 (scratch
, scratch
, GEN_INT (0x45)));
18836 emit_insn (gen_xorqi_cc_ext_1 (scratch
, scratch
, const1_rtx
));
18842 if (code
== LE
&& TARGET_IEEE_FP
)
18844 emit_insn (gen_andqi_ext_0 (scratch
, scratch
, GEN_INT (0x45)));
18845 emit_insn (gen_addqi_ext_1 (scratch
, scratch
, constm1_rtx
));
18846 emit_insn (gen_cmpqi_ext_3 (scratch
, GEN_INT (0x40)));
18847 intcmp_mode
= CCmode
;
18852 emit_insn (gen_testqi_ext_ccno_0 (scratch
, GEN_INT (0x45)));
18858 if (code
== EQ
&& TARGET_IEEE_FP
)
18860 emit_insn (gen_andqi_ext_0 (scratch
, scratch
, GEN_INT (0x45)));
18861 emit_insn (gen_cmpqi_ext_3 (scratch
, GEN_INT (0x40)));
18862 intcmp_mode
= CCmode
;
18867 emit_insn (gen_testqi_ext_ccno_0 (scratch
, GEN_INT (0x40)));
18873 if (code
== NE
&& TARGET_IEEE_FP
)
18875 emit_insn (gen_andqi_ext_0 (scratch
, scratch
, GEN_INT (0x45)));
18876 emit_insn (gen_xorqi_cc_ext_1 (scratch
, scratch
,
18882 emit_insn (gen_testqi_ext_ccno_0 (scratch
, GEN_INT (0x40)));
18888 emit_insn (gen_testqi_ext_ccno_0 (scratch
, GEN_INT (0x04)));
18892 emit_insn (gen_testqi_ext_ccno_0 (scratch
, GEN_INT (0x04)));
18897 gcc_unreachable ();
18905 /* Return the test that should be put into the flags user, i.e.
18906 the bcc, scc, or cmov instruction. */
18907 return gen_rtx_fmt_ee (code
, VOIDmode
,
18908 gen_rtx_REG (intcmp_mode
, FLAGS_REG
),
18913 ix86_expand_compare (enum rtx_code code
, rtx op0
, rtx op1
)
18917 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
)
18918 ret
= gen_rtx_fmt_ee (code
, VOIDmode
, op0
, op1
);
18920 else if (SCALAR_FLOAT_MODE_P (GET_MODE (op0
)))
18922 gcc_assert (!DECIMAL_FLOAT_MODE_P (GET_MODE (op0
)));
18923 ret
= ix86_expand_fp_compare (code
, op0
, op1
, NULL_RTX
);
18926 ret
= ix86_expand_int_compare (code
, op0
, op1
);
18932 ix86_expand_branch (enum rtx_code code
, rtx op0
, rtx op1
, rtx label
)
18934 enum machine_mode mode
= GET_MODE (op0
);
18946 tmp
= ix86_expand_compare (code
, op0
, op1
);
18947 tmp
= gen_rtx_IF_THEN_ELSE (VOIDmode
, tmp
,
18948 gen_rtx_LABEL_REF (VOIDmode
, label
),
18950 emit_jump_insn (gen_rtx_SET (VOIDmode
, pc_rtx
, tmp
));
18957 /* Expand DImode branch into multiple compare+branch. */
18959 rtx lo
[2], hi
[2], label2
;
18960 enum rtx_code code1
, code2
, code3
;
18961 enum machine_mode submode
;
18963 if (CONSTANT_P (op0
) && !CONSTANT_P (op1
))
18965 tmp
= op0
, op0
= op1
, op1
= tmp
;
18966 code
= swap_condition (code
);
18969 split_double_mode (mode
, &op0
, 1, lo
+0, hi
+0);
18970 split_double_mode (mode
, &op1
, 1, lo
+1, hi
+1);
18972 submode
= mode
== DImode
? SImode
: DImode
;
18974 /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to
18975 avoid two branches. This costs one extra insn, so disable when
18976 optimizing for size. */
18978 if ((code
== EQ
|| code
== NE
)
18979 && (!optimize_insn_for_size_p ()
18980 || hi
[1] == const0_rtx
|| lo
[1] == const0_rtx
))
18985 if (hi
[1] != const0_rtx
)
18986 xor1
= expand_binop (submode
, xor_optab
, xor1
, hi
[1],
18987 NULL_RTX
, 0, OPTAB_WIDEN
);
18990 if (lo
[1] != const0_rtx
)
18991 xor0
= expand_binop (submode
, xor_optab
, xor0
, lo
[1],
18992 NULL_RTX
, 0, OPTAB_WIDEN
);
18994 tmp
= expand_binop (submode
, ior_optab
, xor1
, xor0
,
18995 NULL_RTX
, 0, OPTAB_WIDEN
);
18997 ix86_expand_branch (code
, tmp
, const0_rtx
, label
);
19001 /* Otherwise, if we are doing less-than or greater-or-equal-than,
19002 op1 is a constant and the low word is zero, then we can just
19003 examine the high word. Similarly for low word -1 and
19004 less-or-equal-than or greater-than. */
19006 if (CONST_INT_P (hi
[1]))
19009 case LT
: case LTU
: case GE
: case GEU
:
19010 if (lo
[1] == const0_rtx
)
19012 ix86_expand_branch (code
, hi
[0], hi
[1], label
);
19016 case LE
: case LEU
: case GT
: case GTU
:
19017 if (lo
[1] == constm1_rtx
)
19019 ix86_expand_branch (code
, hi
[0], hi
[1], label
);
19027 /* Otherwise, we need two or three jumps. */
19029 label2
= gen_label_rtx ();
19032 code2
= swap_condition (code
);
19033 code3
= unsigned_condition (code
);
19037 case LT
: case GT
: case LTU
: case GTU
:
19040 case LE
: code1
= LT
; code2
= GT
; break;
19041 case GE
: code1
= GT
; code2
= LT
; break;
19042 case LEU
: code1
= LTU
; code2
= GTU
; break;
19043 case GEU
: code1
= GTU
; code2
= LTU
; break;
19045 case EQ
: code1
= UNKNOWN
; code2
= NE
; break;
19046 case NE
: code2
= UNKNOWN
; break;
19049 gcc_unreachable ();
19054 * if (hi(a) < hi(b)) goto true;
19055 * if (hi(a) > hi(b)) goto false;
19056 * if (lo(a) < lo(b)) goto true;
19060 if (code1
!= UNKNOWN
)
19061 ix86_expand_branch (code1
, hi
[0], hi
[1], label
);
19062 if (code2
!= UNKNOWN
)
19063 ix86_expand_branch (code2
, hi
[0], hi
[1], label2
);
19065 ix86_expand_branch (code3
, lo
[0], lo
[1], label
);
19067 if (code2
!= UNKNOWN
)
19068 emit_label (label2
);
19073 gcc_assert (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
);
19078 /* Split branch based on floating point condition. */
19080 ix86_split_fp_branch (enum rtx_code code
, rtx op1
, rtx op2
,
19081 rtx target1
, rtx target2
, rtx tmp
, rtx pushed
)
19086 if (target2
!= pc_rtx
)
19089 code
= reverse_condition_maybe_unordered (code
);
19094 condition
= ix86_expand_fp_compare (code
, op1
, op2
,
19097 /* Remove pushed operand from stack. */
19099 ix86_free_from_memory (GET_MODE (pushed
));
19101 i
= emit_jump_insn (gen_rtx_SET
19103 gen_rtx_IF_THEN_ELSE (VOIDmode
,
19104 condition
, target1
, target2
)));
19105 if (split_branch_probability
>= 0)
19106 add_reg_note (i
, REG_BR_PROB
, GEN_INT (split_branch_probability
));
19110 ix86_expand_setcc (rtx dest
, enum rtx_code code
, rtx op0
, rtx op1
)
19114 gcc_assert (GET_MODE (dest
) == QImode
);
19116 ret
= ix86_expand_compare (code
, op0
, op1
);
19117 PUT_MODE (ret
, QImode
);
19118 emit_insn (gen_rtx_SET (VOIDmode
, dest
, ret
));
19121 /* Expand comparison setting or clearing carry flag. Return true when
19122 successful and set pop for the operation. */
19124 ix86_expand_carry_flag_compare (enum rtx_code code
, rtx op0
, rtx op1
, rtx
*pop
)
19126 enum machine_mode mode
=
19127 GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
19129 /* Do not handle double-mode compares that go through special path. */
19130 if (mode
== (TARGET_64BIT
? TImode
: DImode
))
19133 if (SCALAR_FLOAT_MODE_P (mode
))
19135 rtx compare_op
, compare_seq
;
19137 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode
));
19139 /* Shortcut: following common codes never translate
19140 into carry flag compares. */
19141 if (code
== EQ
|| code
== NE
|| code
== UNEQ
|| code
== LTGT
19142 || code
== ORDERED
|| code
== UNORDERED
)
19145 /* These comparisons require zero flag; swap operands so they won't. */
19146 if ((code
== GT
|| code
== UNLE
|| code
== LE
|| code
== UNGT
)
19147 && !TARGET_IEEE_FP
)
19152 code
= swap_condition (code
);
19155 /* Try to expand the comparison and verify that we end up with
19156 carry flag based comparison. This fails to be true only when
19157 we decide to expand comparison using arithmetic that is not
19158 too common scenario. */
19160 compare_op
= ix86_expand_fp_compare (code
, op0
, op1
, NULL_RTX
);
19161 compare_seq
= get_insns ();
19164 if (GET_MODE (XEXP (compare_op
, 0)) == CCFPmode
19165 || GET_MODE (XEXP (compare_op
, 0)) == CCFPUmode
)
19166 code
= ix86_fp_compare_code_to_integer (GET_CODE (compare_op
));
19168 code
= GET_CODE (compare_op
);
19170 if (code
!= LTU
&& code
!= GEU
)
19173 emit_insn (compare_seq
);
19178 if (!INTEGRAL_MODE_P (mode
))
19187 /* Convert a==0 into (unsigned)a<1. */
19190 if (op1
!= const0_rtx
)
19193 code
= (code
== EQ
? LTU
: GEU
);
19196 /* Convert a>b into b<a or a>=b-1. */
19199 if (CONST_INT_P (op1
))
19201 op1
= gen_int_mode (INTVAL (op1
) + 1, GET_MODE (op0
));
19202 /* Bail out on overflow. We still can swap operands but that
19203 would force loading of the constant into register. */
19204 if (op1
== const0_rtx
19205 || !x86_64_immediate_operand (op1
, GET_MODE (op1
)))
19207 code
= (code
== GTU
? GEU
: LTU
);
19214 code
= (code
== GTU
? LTU
: GEU
);
19218 /* Convert a>=0 into (unsigned)a<0x80000000. */
19221 if (mode
== DImode
|| op1
!= const0_rtx
)
19223 op1
= gen_int_mode (1 << (GET_MODE_BITSIZE (mode
) - 1), mode
);
19224 code
= (code
== LT
? GEU
: LTU
);
19228 if (mode
== DImode
|| op1
!= constm1_rtx
)
19230 op1
= gen_int_mode (1 << (GET_MODE_BITSIZE (mode
) - 1), mode
);
19231 code
= (code
== LE
? GEU
: LTU
);
19237 /* Swapping operands may cause constant to appear as first operand. */
19238 if (!nonimmediate_operand (op0
, VOIDmode
))
19240 if (!can_create_pseudo_p ())
19242 op0
= force_reg (mode
, op0
);
19244 *pop
= ix86_expand_compare (code
, op0
, op1
);
19245 gcc_assert (GET_CODE (*pop
) == LTU
|| GET_CODE (*pop
) == GEU
);
19250 ix86_expand_int_movcc (rtx operands
[])
19252 enum rtx_code code
= GET_CODE (operands
[1]), compare_code
;
19253 rtx compare_seq
, compare_op
;
19254 enum machine_mode mode
= GET_MODE (operands
[0]);
19255 bool sign_bit_compare_p
= false;
19256 rtx op0
= XEXP (operands
[1], 0);
19257 rtx op1
= XEXP (operands
[1], 1);
19259 if (GET_MODE (op0
) == TImode
19260 || (GET_MODE (op0
) == DImode
19265 compare_op
= ix86_expand_compare (code
, op0
, op1
);
19266 compare_seq
= get_insns ();
19269 compare_code
= GET_CODE (compare_op
);
19271 if ((op1
== const0_rtx
&& (code
== GE
|| code
== LT
))
19272 || (op1
== constm1_rtx
&& (code
== GT
|| code
== LE
)))
19273 sign_bit_compare_p
= true;
19275 /* Don't attempt mode expansion here -- if we had to expand 5 or 6
19276 HImode insns, we'd be swallowed in word prefix ops. */
19278 if ((mode
!= HImode
|| TARGET_FAST_PREFIX
)
19279 && (mode
!= (TARGET_64BIT
? TImode
: DImode
))
19280 && CONST_INT_P (operands
[2])
19281 && CONST_INT_P (operands
[3]))
19283 rtx out
= operands
[0];
19284 HOST_WIDE_INT ct
= INTVAL (operands
[2]);
19285 HOST_WIDE_INT cf
= INTVAL (operands
[3]);
19286 HOST_WIDE_INT diff
;
19289 /* Sign bit compares are better done using shifts than we do by using
19291 if (sign_bit_compare_p
19292 || ix86_expand_carry_flag_compare (code
, op0
, op1
, &compare_op
))
19294 /* Detect overlap between destination and compare sources. */
19297 if (!sign_bit_compare_p
)
19300 bool fpcmp
= false;
19302 compare_code
= GET_CODE (compare_op
);
19304 flags
= XEXP (compare_op
, 0);
19306 if (GET_MODE (flags
) == CCFPmode
19307 || GET_MODE (flags
) == CCFPUmode
)
19311 = ix86_fp_compare_code_to_integer (compare_code
);
19314 /* To simplify rest of code, restrict to the GEU case. */
19315 if (compare_code
== LTU
)
19317 HOST_WIDE_INT tmp
= ct
;
19320 compare_code
= reverse_condition (compare_code
);
19321 code
= reverse_condition (code
);
19326 PUT_CODE (compare_op
,
19327 reverse_condition_maybe_unordered
19328 (GET_CODE (compare_op
)));
19330 PUT_CODE (compare_op
,
19331 reverse_condition (GET_CODE (compare_op
)));
19335 if (reg_overlap_mentioned_p (out
, op0
)
19336 || reg_overlap_mentioned_p (out
, op1
))
19337 tmp
= gen_reg_rtx (mode
);
19339 if (mode
== DImode
)
19340 emit_insn (gen_x86_movdicc_0_m1 (tmp
, flags
, compare_op
));
19342 emit_insn (gen_x86_movsicc_0_m1 (gen_lowpart (SImode
, tmp
),
19343 flags
, compare_op
));
19347 if (code
== GT
|| code
== GE
)
19348 code
= reverse_condition (code
);
19351 HOST_WIDE_INT tmp
= ct
;
19356 tmp
= emit_store_flag (tmp
, code
, op0
, op1
, VOIDmode
, 0, -1);
19369 tmp
= expand_simple_binop (mode
, PLUS
,
19371 copy_rtx (tmp
), 1, OPTAB_DIRECT
);
19382 tmp
= expand_simple_binop (mode
, IOR
,
19384 copy_rtx (tmp
), 1, OPTAB_DIRECT
);
19386 else if (diff
== -1 && ct
)
19396 tmp
= expand_simple_unop (mode
, NOT
, tmp
, copy_rtx (tmp
), 1);
19398 tmp
= expand_simple_binop (mode
, PLUS
,
19399 copy_rtx (tmp
), GEN_INT (cf
),
19400 copy_rtx (tmp
), 1, OPTAB_DIRECT
);
19408 * andl cf - ct, dest
19418 tmp
= expand_simple_unop (mode
, NOT
, tmp
, copy_rtx (tmp
), 1);
19421 tmp
= expand_simple_binop (mode
, AND
,
19423 gen_int_mode (cf
- ct
, mode
),
19424 copy_rtx (tmp
), 1, OPTAB_DIRECT
);
19426 tmp
= expand_simple_binop (mode
, PLUS
,
19427 copy_rtx (tmp
), GEN_INT (ct
),
19428 copy_rtx (tmp
), 1, OPTAB_DIRECT
);
19431 if (!rtx_equal_p (tmp
, out
))
19432 emit_move_insn (copy_rtx (out
), copy_rtx (tmp
));
19439 enum machine_mode cmp_mode
= GET_MODE (op0
);
19442 tmp
= ct
, ct
= cf
, cf
= tmp
;
19445 if (SCALAR_FLOAT_MODE_P (cmp_mode
))
19447 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode
));
19449 /* We may be reversing unordered compare to normal compare, that
19450 is not valid in general (we may convert non-trapping condition
19451 to trapping one), however on i386 we currently emit all
19452 comparisons unordered. */
19453 compare_code
= reverse_condition_maybe_unordered (compare_code
);
19454 code
= reverse_condition_maybe_unordered (code
);
19458 compare_code
= reverse_condition (compare_code
);
19459 code
= reverse_condition (code
);
19463 compare_code
= UNKNOWN
;
19464 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_INT
19465 && CONST_INT_P (op1
))
19467 if (op1
== const0_rtx
19468 && (code
== LT
|| code
== GE
))
19469 compare_code
= code
;
19470 else if (op1
== constm1_rtx
)
19474 else if (code
== GT
)
19479 /* Optimize dest = (op0 < 0) ? -1 : cf. */
19480 if (compare_code
!= UNKNOWN
19481 && GET_MODE (op0
) == GET_MODE (out
)
19482 && (cf
== -1 || ct
== -1))
19484 /* If lea code below could be used, only optimize
19485 if it results in a 2 insn sequence. */
19487 if (! (diff
== 1 || diff
== 2 || diff
== 4 || diff
== 8
19488 || diff
== 3 || diff
== 5 || diff
== 9)
19489 || (compare_code
== LT
&& ct
== -1)
19490 || (compare_code
== GE
&& cf
== -1))
19493 * notl op1 (if necessary)
19501 code
= reverse_condition (code
);
19504 out
= emit_store_flag (out
, code
, op0
, op1
, VOIDmode
, 0, -1);
19506 out
= expand_simple_binop (mode
, IOR
,
19508 out
, 1, OPTAB_DIRECT
);
19509 if (out
!= operands
[0])
19510 emit_move_insn (operands
[0], out
);
19517 if ((diff
== 1 || diff
== 2 || diff
== 4 || diff
== 8
19518 || diff
== 3 || diff
== 5 || diff
== 9)
19519 && ((mode
!= QImode
&& mode
!= HImode
) || !TARGET_PARTIAL_REG_STALL
)
19521 || x86_64_immediate_operand (GEN_INT (cf
), VOIDmode
)))
19527 * lea cf(dest*(ct-cf)),dest
19531 * This also catches the degenerate setcc-only case.
19537 out
= emit_store_flag (out
, code
, op0
, op1
, VOIDmode
, 0, 1);
19540 /* On x86_64 the lea instruction operates on Pmode, so we need
19541 to get arithmetics done in proper mode to match. */
19543 tmp
= copy_rtx (out
);
19547 out1
= copy_rtx (out
);
19548 tmp
= gen_rtx_MULT (mode
, out1
, GEN_INT (diff
& ~1));
19552 tmp
= gen_rtx_PLUS (mode
, tmp
, out1
);
19558 tmp
= gen_rtx_PLUS (mode
, tmp
, GEN_INT (cf
));
19561 if (!rtx_equal_p (tmp
, out
))
19564 out
= force_operand (tmp
, copy_rtx (out
));
19566 emit_insn (gen_rtx_SET (VOIDmode
, copy_rtx (out
), copy_rtx (tmp
)));
19568 if (!rtx_equal_p (out
, operands
[0]))
19569 emit_move_insn (operands
[0], copy_rtx (out
));
19575 * General case: Jumpful:
19576 * xorl dest,dest cmpl op1, op2
19577 * cmpl op1, op2 movl ct, dest
19578 * setcc dest jcc 1f
19579 * decl dest movl cf, dest
19580 * andl (cf-ct),dest 1:
19583 * Size 20. Size 14.
19585 * This is reasonably steep, but branch mispredict costs are
19586 * high on modern cpus, so consider failing only if optimizing
19590 if ((!TARGET_CMOVE
|| (mode
== QImode
&& TARGET_PARTIAL_REG_STALL
))
19591 && BRANCH_COST (optimize_insn_for_speed_p (),
19596 enum machine_mode cmp_mode
= GET_MODE (op0
);
19601 if (SCALAR_FLOAT_MODE_P (cmp_mode
))
19603 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode
));
19605 /* We may be reversing unordered compare to normal compare,
19606 that is not valid in general (we may convert non-trapping
19607 condition to trapping one), however on i386 we currently
19608 emit all comparisons unordered. */
19609 code
= reverse_condition_maybe_unordered (code
);
19613 code
= reverse_condition (code
);
19614 if (compare_code
!= UNKNOWN
)
19615 compare_code
= reverse_condition (compare_code
);
19619 if (compare_code
!= UNKNOWN
)
19621 /* notl op1 (if needed)
19626 For x < 0 (resp. x <= -1) there will be no notl,
19627 so if possible swap the constants to get rid of the
19629 True/false will be -1/0 while code below (store flag
19630 followed by decrement) is 0/-1, so the constants need
19631 to be exchanged once more. */
19633 if (compare_code
== GE
|| !cf
)
19635 code
= reverse_condition (code
);
19640 HOST_WIDE_INT tmp
= cf
;
19645 out
= emit_store_flag (out
, code
, op0
, op1
, VOIDmode
, 0, -1);
19649 out
= emit_store_flag (out
, code
, op0
, op1
, VOIDmode
, 0, 1);
19651 out
= expand_simple_binop (mode
, PLUS
, copy_rtx (out
),
19653 copy_rtx (out
), 1, OPTAB_DIRECT
);
19656 out
= expand_simple_binop (mode
, AND
, copy_rtx (out
),
19657 gen_int_mode (cf
- ct
, mode
),
19658 copy_rtx (out
), 1, OPTAB_DIRECT
);
19660 out
= expand_simple_binop (mode
, PLUS
, copy_rtx (out
), GEN_INT (ct
),
19661 copy_rtx (out
), 1, OPTAB_DIRECT
);
19662 if (!rtx_equal_p (out
, operands
[0]))
19663 emit_move_insn (operands
[0], copy_rtx (out
));
19669 if (!TARGET_CMOVE
|| (mode
== QImode
&& TARGET_PARTIAL_REG_STALL
))
19671 /* Try a few things more with specific constants and a variable. */
19674 rtx var
, orig_out
, out
, tmp
;
19676 if (BRANCH_COST (optimize_insn_for_speed_p (), false) <= 2)
19679 /* If one of the two operands is an interesting constant, load a
19680 constant with the above and mask it in with a logical operation. */
19682 if (CONST_INT_P (operands
[2]))
19685 if (INTVAL (operands
[2]) == 0 && operands
[3] != constm1_rtx
)
19686 operands
[3] = constm1_rtx
, op
= and_optab
;
19687 else if (INTVAL (operands
[2]) == -1 && operands
[3] != const0_rtx
)
19688 operands
[3] = const0_rtx
, op
= ior_optab
;
19692 else if (CONST_INT_P (operands
[3]))
19695 if (INTVAL (operands
[3]) == 0 && operands
[2] != constm1_rtx
)
19696 operands
[2] = constm1_rtx
, op
= and_optab
;
19697 else if (INTVAL (operands
[3]) == -1 && operands
[3] != const0_rtx
)
19698 operands
[2] = const0_rtx
, op
= ior_optab
;
19705 orig_out
= operands
[0];
19706 tmp
= gen_reg_rtx (mode
);
19709 /* Recurse to get the constant loaded. */
19710 if (ix86_expand_int_movcc (operands
) == 0)
19713 /* Mask in the interesting variable. */
19714 out
= expand_binop (mode
, op
, var
, tmp
, orig_out
, 0,
19716 if (!rtx_equal_p (out
, orig_out
))
19717 emit_move_insn (copy_rtx (orig_out
), copy_rtx (out
));
19723 * For comparison with above,
19733 if (! nonimmediate_operand (operands
[2], mode
))
19734 operands
[2] = force_reg (mode
, operands
[2]);
19735 if (! nonimmediate_operand (operands
[3], mode
))
19736 operands
[3] = force_reg (mode
, operands
[3]);
19738 if (! register_operand (operands
[2], VOIDmode
)
19740 || ! register_operand (operands
[3], VOIDmode
)))
19741 operands
[2] = force_reg (mode
, operands
[2]);
19744 && ! register_operand (operands
[3], VOIDmode
))
19745 operands
[3] = force_reg (mode
, operands
[3]);
19747 emit_insn (compare_seq
);
19748 emit_insn (gen_rtx_SET (VOIDmode
, operands
[0],
19749 gen_rtx_IF_THEN_ELSE (mode
,
19750 compare_op
, operands
[2],
19755 /* Swap, force into registers, or otherwise massage the two operands
19756 to an sse comparison with a mask result. Thus we differ a bit from
19757 ix86_prepare_fp_compare_args which expects to produce a flags result.
19759 The DEST operand exists to help determine whether to commute commutative
19760 operators. The POP0/POP1 operands are updated in place. The new
19761 comparison code is returned, or UNKNOWN if not implementable. */
19763 static enum rtx_code
19764 ix86_prepare_sse_fp_compare_args (rtx dest
, enum rtx_code code
,
19765 rtx
*pop0
, rtx
*pop1
)
19773 /* AVX supports all the needed comparisons. */
19776 /* We have no LTGT as an operator. We could implement it with
19777 NE & ORDERED, but this requires an extra temporary. It's
19778 not clear that it's worth it. */
19785 /* These are supported directly. */
19792 /* AVX has 3 operand comparisons, no need to swap anything. */
19795 /* For commutative operators, try to canonicalize the destination
19796 operand to be first in the comparison - this helps reload to
19797 avoid extra moves. */
19798 if (!dest
|| !rtx_equal_p (dest
, *pop1
))
19806 /* These are not supported directly before AVX, and furthermore
19807 ix86_expand_sse_fp_minmax only optimizes LT/UNGE. Swap the
19808 comparison operands to transform into something that is
19813 code
= swap_condition (code
);
19817 gcc_unreachable ();
19823 /* Detect conditional moves that exactly match min/max operational
19824 semantics. Note that this is IEEE safe, as long as we don't
19825 interchange the operands.
19827 Returns FALSE if this conditional move doesn't match a MIN/MAX,
19828 and TRUE if the operation is successful and instructions are emitted. */
19831 ix86_expand_sse_fp_minmax (rtx dest
, enum rtx_code code
, rtx cmp_op0
,
19832 rtx cmp_op1
, rtx if_true
, rtx if_false
)
19834 enum machine_mode mode
;
19840 else if (code
== UNGE
)
19843 if_true
= if_false
;
19849 if (rtx_equal_p (cmp_op0
, if_true
) && rtx_equal_p (cmp_op1
, if_false
))
19851 else if (rtx_equal_p (cmp_op1
, if_true
) && rtx_equal_p (cmp_op0
, if_false
))
19856 mode
= GET_MODE (dest
);
19858 /* We want to check HONOR_NANS and HONOR_SIGNED_ZEROS here,
19859 but MODE may be a vector mode and thus not appropriate. */
19860 if (!flag_finite_math_only
|| !flag_unsafe_math_optimizations
)
19862 int u
= is_min
? UNSPEC_IEEE_MIN
: UNSPEC_IEEE_MAX
;
19865 if_true
= force_reg (mode
, if_true
);
19866 v
= gen_rtvec (2, if_true
, if_false
);
19867 tmp
= gen_rtx_UNSPEC (mode
, v
, u
);
19871 code
= is_min
? SMIN
: SMAX
;
19872 tmp
= gen_rtx_fmt_ee (code
, mode
, if_true
, if_false
);
19875 emit_insn (gen_rtx_SET (VOIDmode
, dest
, tmp
));
19879 /* Expand an sse vector comparison. Return the register with the result. */
19882 ix86_expand_sse_cmp (rtx dest
, enum rtx_code code
, rtx cmp_op0
, rtx cmp_op1
,
19883 rtx op_true
, rtx op_false
)
19885 enum machine_mode mode
= GET_MODE (dest
);
19886 enum machine_mode cmp_mode
= GET_MODE (cmp_op0
);
19889 cmp_op0
= force_reg (cmp_mode
, cmp_op0
);
19890 if (!nonimmediate_operand (cmp_op1
, cmp_mode
))
19891 cmp_op1
= force_reg (cmp_mode
, cmp_op1
);
19894 || reg_overlap_mentioned_p (dest
, op_true
)
19895 || reg_overlap_mentioned_p (dest
, op_false
))
19896 dest
= gen_reg_rtx (mode
);
19898 x
= gen_rtx_fmt_ee (code
, cmp_mode
, cmp_op0
, cmp_op1
);
19899 if (cmp_mode
!= mode
)
19901 x
= force_reg (cmp_mode
, x
);
19902 convert_move (dest
, x
, false);
19905 emit_insn (gen_rtx_SET (VOIDmode
, dest
, x
));
19910 /* Expand DEST = CMP ? OP_TRUE : OP_FALSE into a sequence of logical
19911 operations. This is used for both scalar and vector conditional moves. */
19914 ix86_expand_sse_movcc (rtx dest
, rtx cmp
, rtx op_true
, rtx op_false
)
19916 enum machine_mode mode
= GET_MODE (dest
);
19919 if (vector_all_ones_operand (op_true
, mode
)
19920 && rtx_equal_p (op_false
, CONST0_RTX (mode
)))
19922 emit_insn (gen_rtx_SET (VOIDmode
, dest
, cmp
));
19924 else if (op_false
== CONST0_RTX (mode
))
19926 op_true
= force_reg (mode
, op_true
);
19927 x
= gen_rtx_AND (mode
, cmp
, op_true
);
19928 emit_insn (gen_rtx_SET (VOIDmode
, dest
, x
));
19930 else if (op_true
== CONST0_RTX (mode
))
19932 op_false
= force_reg (mode
, op_false
);
19933 x
= gen_rtx_NOT (mode
, cmp
);
19934 x
= gen_rtx_AND (mode
, x
, op_false
);
19935 emit_insn (gen_rtx_SET (VOIDmode
, dest
, x
));
19937 else if (INTEGRAL_MODE_P (mode
) && op_true
== CONSTM1_RTX (mode
))
19939 op_false
= force_reg (mode
, op_false
);
19940 x
= gen_rtx_IOR (mode
, cmp
, op_false
);
19941 emit_insn (gen_rtx_SET (VOIDmode
, dest
, x
));
19943 else if (TARGET_XOP
)
19945 op_true
= force_reg (mode
, op_true
);
19947 if (!nonimmediate_operand (op_false
, mode
))
19948 op_false
= force_reg (mode
, op_false
);
19950 emit_insn (gen_rtx_SET (mode
, dest
,
19951 gen_rtx_IF_THEN_ELSE (mode
, cmp
,
19957 rtx (*gen
) (rtx
, rtx
, rtx
, rtx
) = NULL
;
19959 if (!nonimmediate_operand (op_true
, mode
))
19960 op_true
= force_reg (mode
, op_true
);
19962 op_false
= force_reg (mode
, op_false
);
19968 gen
= gen_sse4_1_blendvps
;
19972 gen
= gen_sse4_1_blendvpd
;
19980 gen
= gen_sse4_1_pblendvb
;
19981 dest
= gen_lowpart (V16QImode
, dest
);
19982 op_false
= gen_lowpart (V16QImode
, op_false
);
19983 op_true
= gen_lowpart (V16QImode
, op_true
);
19984 cmp
= gen_lowpart (V16QImode
, cmp
);
19989 gen
= gen_avx_blendvps256
;
19993 gen
= gen_avx_blendvpd256
;
20001 gen
= gen_avx2_pblendvb
;
20002 dest
= gen_lowpart (V32QImode
, dest
);
20003 op_false
= gen_lowpart (V32QImode
, op_false
);
20004 op_true
= gen_lowpart (V32QImode
, op_true
);
20005 cmp
= gen_lowpart (V32QImode
, cmp
);
20013 emit_insn (gen (dest
, op_false
, op_true
, cmp
));
20016 op_true
= force_reg (mode
, op_true
);
20018 t2
= gen_reg_rtx (mode
);
20020 t3
= gen_reg_rtx (mode
);
20024 x
= gen_rtx_AND (mode
, op_true
, cmp
);
20025 emit_insn (gen_rtx_SET (VOIDmode
, t2
, x
));
20027 x
= gen_rtx_NOT (mode
, cmp
);
20028 x
= gen_rtx_AND (mode
, x
, op_false
);
20029 emit_insn (gen_rtx_SET (VOIDmode
, t3
, x
));
20031 x
= gen_rtx_IOR (mode
, t3
, t2
);
20032 emit_insn (gen_rtx_SET (VOIDmode
, dest
, x
));
20037 /* Expand a floating-point conditional move. Return true if successful. */
20040 ix86_expand_fp_movcc (rtx operands
[])
20042 enum machine_mode mode
= GET_MODE (operands
[0]);
20043 enum rtx_code code
= GET_CODE (operands
[1]);
20044 rtx tmp
, compare_op
;
20045 rtx op0
= XEXP (operands
[1], 0);
20046 rtx op1
= XEXP (operands
[1], 1);
20048 if (TARGET_SSE_MATH
&& SSE_FLOAT_MODE_P (mode
))
20050 enum machine_mode cmode
;
20052 /* Since we've no cmove for sse registers, don't force bad register
20053 allocation just to gain access to it. Deny movcc when the
20054 comparison mode doesn't match the move mode. */
20055 cmode
= GET_MODE (op0
);
20056 if (cmode
== VOIDmode
)
20057 cmode
= GET_MODE (op1
);
20061 code
= ix86_prepare_sse_fp_compare_args (operands
[0], code
, &op0
, &op1
);
20062 if (code
== UNKNOWN
)
20065 if (ix86_expand_sse_fp_minmax (operands
[0], code
, op0
, op1
,
20066 operands
[2], operands
[3]))
20069 tmp
= ix86_expand_sse_cmp (operands
[0], code
, op0
, op1
,
20070 operands
[2], operands
[3]);
20071 ix86_expand_sse_movcc (operands
[0], tmp
, operands
[2], operands
[3]);
20075 if (GET_MODE (op0
) == TImode
20076 || (GET_MODE (op0
) == DImode
20080 /* The floating point conditional move instructions don't directly
20081 support conditions resulting from a signed integer comparison. */
20083 compare_op
= ix86_expand_compare (code
, op0
, op1
);
20084 if (!fcmov_comparison_operator (compare_op
, VOIDmode
))
20086 tmp
= gen_reg_rtx (QImode
);
20087 ix86_expand_setcc (tmp
, code
, op0
, op1
);
20089 compare_op
= ix86_expand_compare (NE
, tmp
, const0_rtx
);
20092 emit_insn (gen_rtx_SET (VOIDmode
, operands
[0],
20093 gen_rtx_IF_THEN_ELSE (mode
, compare_op
,
20094 operands
[2], operands
[3])));
20099 /* Expand a floating-point vector conditional move; a vcond operation
20100 rather than a movcc operation. */
20103 ix86_expand_fp_vcond (rtx operands
[])
20105 enum rtx_code code
= GET_CODE (operands
[3]);
20108 code
= ix86_prepare_sse_fp_compare_args (operands
[0], code
,
20109 &operands
[4], &operands
[5]);
20110 if (code
== UNKNOWN
)
20113 switch (GET_CODE (operands
[3]))
20116 temp
= ix86_expand_sse_cmp (operands
[0], ORDERED
, operands
[4],
20117 operands
[5], operands
[0], operands
[0]);
20118 cmp
= ix86_expand_sse_cmp (operands
[0], NE
, operands
[4],
20119 operands
[5], operands
[1], operands
[2]);
20123 temp
= ix86_expand_sse_cmp (operands
[0], UNORDERED
, operands
[4],
20124 operands
[5], operands
[0], operands
[0]);
20125 cmp
= ix86_expand_sse_cmp (operands
[0], EQ
, operands
[4],
20126 operands
[5], operands
[1], operands
[2]);
20130 gcc_unreachable ();
20132 cmp
= expand_simple_binop (GET_MODE (cmp
), code
, temp
, cmp
, cmp
, 1,
20134 ix86_expand_sse_movcc (operands
[0], cmp
, operands
[1], operands
[2]);
20138 if (ix86_expand_sse_fp_minmax (operands
[0], code
, operands
[4],
20139 operands
[5], operands
[1], operands
[2]))
20142 cmp
= ix86_expand_sse_cmp (operands
[0], code
, operands
[4], operands
[5],
20143 operands
[1], operands
[2]);
20144 ix86_expand_sse_movcc (operands
[0], cmp
, operands
[1], operands
[2]);
20148 /* Expand a signed/unsigned integral vector conditional move. */
20151 ix86_expand_int_vcond (rtx operands
[])
20153 enum machine_mode data_mode
= GET_MODE (operands
[0]);
20154 enum machine_mode mode
= GET_MODE (operands
[4]);
20155 enum rtx_code code
= GET_CODE (operands
[3]);
20156 bool negate
= false;
20159 cop0
= operands
[4];
20160 cop1
= operands
[5];
20162 /* Try to optimize x < 0 ? -1 : 0 into (signed) x >> 31
20163 and x < 0 ? 1 : 0 into (unsigned) x >> 31. */
20164 if ((code
== LT
|| code
== GE
)
20165 && data_mode
== mode
20166 && cop1
== CONST0_RTX (mode
)
20167 && operands
[1 + (code
== LT
)] == CONST0_RTX (data_mode
)
20168 && GET_MODE_SIZE (GET_MODE_INNER (data_mode
)) > 1
20169 && GET_MODE_SIZE (GET_MODE_INNER (data_mode
)) <= 8
20170 && (GET_MODE_SIZE (data_mode
) == 16
20171 || (TARGET_AVX2
&& GET_MODE_SIZE (data_mode
) == 32)))
20173 rtx negop
= operands
[2 - (code
== LT
)];
20174 int shift
= GET_MODE_BITSIZE (GET_MODE_INNER (data_mode
)) - 1;
20175 if (negop
== CONST1_RTX (data_mode
))
20177 rtx res
= expand_simple_binop (mode
, LSHIFTRT
, cop0
, GEN_INT (shift
),
20178 operands
[0], 1, OPTAB_DIRECT
);
20179 if (res
!= operands
[0])
20180 emit_move_insn (operands
[0], res
);
20183 else if (GET_MODE_INNER (data_mode
) != DImode
20184 && vector_all_ones_operand (negop
, data_mode
))
20186 rtx res
= expand_simple_binop (mode
, ASHIFTRT
, cop0
, GEN_INT (shift
),
20187 operands
[0], 0, OPTAB_DIRECT
);
20188 if (res
!= operands
[0])
20189 emit_move_insn (operands
[0], res
);
20194 if (!nonimmediate_operand (cop1
, mode
))
20195 cop1
= force_reg (mode
, cop1
);
20196 if (!general_operand (operands
[1], data_mode
))
20197 operands
[1] = force_reg (data_mode
, operands
[1]);
20198 if (!general_operand (operands
[2], data_mode
))
20199 operands
[2] = force_reg (data_mode
, operands
[2]);
20201 /* XOP supports all of the comparisons on all 128-bit vector int types. */
20203 && (mode
== V16QImode
|| mode
== V8HImode
20204 || mode
== V4SImode
|| mode
== V2DImode
))
20208 /* Canonicalize the comparison to EQ, GT, GTU. */
20219 code
= reverse_condition (code
);
20225 code
= reverse_condition (code
);
20231 code
= swap_condition (code
);
20232 x
= cop0
, cop0
= cop1
, cop1
= x
;
20236 gcc_unreachable ();
20239 /* Only SSE4.1/SSE4.2 supports V2DImode. */
20240 if (mode
== V2DImode
)
20245 /* SSE4.1 supports EQ. */
20246 if (!TARGET_SSE4_1
)
20252 /* SSE4.2 supports GT/GTU. */
20253 if (!TARGET_SSE4_2
)
20258 gcc_unreachable ();
20262 /* Unsigned parallel compare is not supported by the hardware.
20263 Play some tricks to turn this into a signed comparison
20267 cop0
= force_reg (mode
, cop0
);
20277 rtx (*gen_sub3
) (rtx
, rtx
, rtx
);
20281 case V8SImode
: gen_sub3
= gen_subv8si3
; break;
20282 case V4DImode
: gen_sub3
= gen_subv4di3
; break;
20283 case V4SImode
: gen_sub3
= gen_subv4si3
; break;
20284 case V2DImode
: gen_sub3
= gen_subv2di3
; break;
20286 gcc_unreachable ();
20288 /* Subtract (-(INT MAX) - 1) from both operands to make
20290 mask
= ix86_build_signbit_mask (mode
, true, false);
20291 t1
= gen_reg_rtx (mode
);
20292 emit_insn (gen_sub3 (t1
, cop0
, mask
));
20294 t2
= gen_reg_rtx (mode
);
20295 emit_insn (gen_sub3 (t2
, cop1
, mask
));
20307 /* Perform a parallel unsigned saturating subtraction. */
20308 x
= gen_reg_rtx (mode
);
20309 emit_insn (gen_rtx_SET (VOIDmode
, x
,
20310 gen_rtx_US_MINUS (mode
, cop0
, cop1
)));
20313 cop1
= CONST0_RTX (mode
);
20319 gcc_unreachable ();
20324 /* Allow the comparison to be done in one mode, but the movcc to
20325 happen in another mode. */
20326 if (data_mode
== mode
)
20328 x
= ix86_expand_sse_cmp (operands
[0], code
, cop0
, cop1
,
20329 operands
[1+negate
], operands
[2-negate
]);
20333 gcc_assert (GET_MODE_SIZE (data_mode
) == GET_MODE_SIZE (mode
));
20334 x
= ix86_expand_sse_cmp (gen_lowpart (mode
, operands
[0]),
20336 operands
[1+negate
], operands
[2-negate
]);
20337 x
= gen_lowpart (data_mode
, x
);
20340 ix86_expand_sse_movcc (operands
[0], x
, operands
[1+negate
],
20341 operands
[2-negate
]);
20345 /* Expand a variable vector permutation. */
20348 ix86_expand_vec_perm (rtx operands
[])
20350 rtx target
= operands
[0];
20351 rtx op0
= operands
[1];
20352 rtx op1
= operands
[2];
20353 rtx mask
= operands
[3];
20354 rtx t1
, t2
, t3
, t4
, vt
, vt2
, vec
[32];
20355 enum machine_mode mode
= GET_MODE (op0
);
20356 enum machine_mode maskmode
= GET_MODE (mask
);
20358 bool one_operand_shuffle
= rtx_equal_p (op0
, op1
);
20360 /* Number of elements in the vector. */
20361 w
= GET_MODE_NUNITS (mode
);
20362 e
= GET_MODE_UNIT_SIZE (mode
);
20363 gcc_assert (w
<= 32);
20367 if (mode
== V4DImode
|| mode
== V4DFmode
|| mode
== V16HImode
)
20369 /* Unfortunately, the VPERMQ and VPERMPD instructions only support
20370 an constant shuffle operand. With a tiny bit of effort we can
20371 use VPERMD instead. A re-interpretation stall for V4DFmode is
20372 unfortunate but there's no avoiding it.
20373 Similarly for V16HImode we don't have instructions for variable
20374 shuffling, while for V32QImode we can use after preparing suitable
20375 masks vpshufb; vpshufb; vpermq; vpor. */
20377 if (mode
== V16HImode
)
20379 maskmode
= mode
= V32QImode
;
20385 maskmode
= mode
= V8SImode
;
20389 t1
= gen_reg_rtx (maskmode
);
20391 /* Replicate the low bits of the V4DImode mask into V8SImode:
20393 t1 = { A A B B C C D D }. */
20394 for (i
= 0; i
< w
/ 2; ++i
)
20395 vec
[i
*2 + 1] = vec
[i
*2] = GEN_INT (i
* 2);
20396 vt
= gen_rtx_CONST_VECTOR (maskmode
, gen_rtvec_v (w
, vec
));
20397 vt
= force_reg (maskmode
, vt
);
20398 mask
= gen_lowpart (maskmode
, mask
);
20399 if (maskmode
== V8SImode
)
20400 emit_insn (gen_avx2_permvarv8si (t1
, mask
, vt
));
20402 emit_insn (gen_avx2_pshufbv32qi3 (t1
, mask
, vt
));
20404 /* Multiply the shuffle indicies by two. */
20405 t1
= expand_simple_binop (maskmode
, PLUS
, t1
, t1
, t1
, 1,
20408 /* Add one to the odd shuffle indicies:
20409 t1 = { A*2, A*2+1, B*2, B*2+1, ... }. */
20410 for (i
= 0; i
< w
/ 2; ++i
)
20412 vec
[i
* 2] = const0_rtx
;
20413 vec
[i
* 2 + 1] = const1_rtx
;
20415 vt
= gen_rtx_CONST_VECTOR (maskmode
, gen_rtvec_v (w
, vec
));
20416 vt
= force_const_mem (maskmode
, vt
);
20417 t1
= expand_simple_binop (maskmode
, PLUS
, t1
, vt
, t1
, 1,
20420 /* Continue as if V8SImode (resp. V32QImode) was used initially. */
20421 operands
[3] = mask
= t1
;
20422 target
= gen_lowpart (mode
, target
);
20423 op0
= gen_lowpart (mode
, op0
);
20424 op1
= gen_lowpart (mode
, op1
);
20430 /* The VPERMD and VPERMPS instructions already properly ignore
20431 the high bits of the shuffle elements. No need for us to
20432 perform an AND ourselves. */
20433 if (one_operand_shuffle
)
20434 emit_insn (gen_avx2_permvarv8si (target
, op0
, mask
));
20437 t1
= gen_reg_rtx (V8SImode
);
20438 t2
= gen_reg_rtx (V8SImode
);
20439 emit_insn (gen_avx2_permvarv8si (t1
, op0
, mask
));
20440 emit_insn (gen_avx2_permvarv8si (t2
, op1
, mask
));
20446 mask
= gen_lowpart (V8SFmode
, mask
);
20447 if (one_operand_shuffle
)
20448 emit_insn (gen_avx2_permvarv8sf (target
, op0
, mask
));
20451 t1
= gen_reg_rtx (V8SFmode
);
20452 t2
= gen_reg_rtx (V8SFmode
);
20453 emit_insn (gen_avx2_permvarv8sf (t1
, op0
, mask
));
20454 emit_insn (gen_avx2_permvarv8sf (t2
, op1
, mask
));
20460 /* By combining the two 128-bit input vectors into one 256-bit
20461 input vector, we can use VPERMD and VPERMPS for the full
20462 two-operand shuffle. */
20463 t1
= gen_reg_rtx (V8SImode
);
20464 t2
= gen_reg_rtx (V8SImode
);
20465 emit_insn (gen_avx_vec_concatv8si (t1
, op0
, op1
));
20466 emit_insn (gen_avx_vec_concatv8si (t2
, mask
, mask
));
20467 emit_insn (gen_avx2_permvarv8si (t1
, t1
, t2
));
20468 emit_insn (gen_avx_vextractf128v8si (target
, t1
, const0_rtx
));
20472 t1
= gen_reg_rtx (V8SFmode
);
20473 t2
= gen_reg_rtx (V8SImode
);
20474 mask
= gen_lowpart (V4SImode
, mask
);
20475 emit_insn (gen_avx_vec_concatv8sf (t1
, op0
, op1
));
20476 emit_insn (gen_avx_vec_concatv8si (t2
, mask
, mask
));
20477 emit_insn (gen_avx2_permvarv8sf (t1
, t1
, t2
));
20478 emit_insn (gen_avx_vextractf128v8sf (target
, t1
, const0_rtx
));
20482 t1
= gen_reg_rtx (V32QImode
);
20483 t2
= gen_reg_rtx (V32QImode
);
20484 t3
= gen_reg_rtx (V32QImode
);
20485 vt2
= GEN_INT (128);
20486 for (i
= 0; i
< 32; i
++)
20488 vt
= gen_rtx_CONST_VECTOR (V32QImode
, gen_rtvec_v (32, vec
));
20489 vt
= force_reg (V32QImode
, vt
);
20490 for (i
= 0; i
< 32; i
++)
20491 vec
[i
] = i
< 16 ? vt2
: const0_rtx
;
20492 vt2
= gen_rtx_CONST_VECTOR (V32QImode
, gen_rtvec_v (32, vec
));
20493 vt2
= force_reg (V32QImode
, vt2
);
20494 /* From mask create two adjusted masks, which contain the same
20495 bits as mask in the low 7 bits of each vector element.
20496 The first mask will have the most significant bit clear
20497 if it requests element from the same 128-bit lane
20498 and MSB set if it requests element from the other 128-bit lane.
20499 The second mask will have the opposite values of the MSB,
20500 and additionally will have its 128-bit lanes swapped.
20501 E.g. { 07 12 1e 09 ... | 17 19 05 1f ... } mask vector will have
20502 t1 { 07 92 9e 09 ... | 17 19 85 1f ... } and
20503 t3 { 97 99 05 9f ... | 87 12 1e 89 ... } where each ...
20504 stands for other 12 bytes. */
20505 /* The bit whether element is from the same lane or the other
20506 lane is bit 4, so shift it up by 3 to the MSB position. */
20507 emit_insn (gen_ashlv4di3 (gen_lowpart (V4DImode
, t1
),
20508 gen_lowpart (V4DImode
, mask
),
20510 /* Clear MSB bits from the mask just in case it had them set. */
20511 emit_insn (gen_avx2_andnotv32qi3 (t2
, vt
, mask
));
20512 /* After this t1 will have MSB set for elements from other lane. */
20513 emit_insn (gen_xorv32qi3 (t1
, t1
, vt2
));
20514 /* Clear bits other than MSB. */
20515 emit_insn (gen_andv32qi3 (t1
, t1
, vt
));
20516 /* Or in the lower bits from mask into t3. */
20517 emit_insn (gen_iorv32qi3 (t3
, t1
, t2
));
20518 /* And invert MSB bits in t1, so MSB is set for elements from the same
20520 emit_insn (gen_xorv32qi3 (t1
, t1
, vt
));
20521 /* Swap 128-bit lanes in t3. */
20522 emit_insn (gen_avx2_permv4di_1 (gen_lowpart (V4DImode
, t3
),
20523 gen_lowpart (V4DImode
, t3
),
20524 const2_rtx
, GEN_INT (3),
20525 const0_rtx
, const1_rtx
));
20526 /* And or in the lower bits from mask into t1. */
20527 emit_insn (gen_iorv32qi3 (t1
, t1
, t2
));
20528 if (one_operand_shuffle
)
20530 /* Each of these shuffles will put 0s in places where
20531 element from the other 128-bit lane is needed, otherwise
20532 will shuffle in the requested value. */
20533 emit_insn (gen_avx2_pshufbv32qi3 (t3
, op0
, t3
));
20534 emit_insn (gen_avx2_pshufbv32qi3 (t1
, op0
, t1
));
20535 /* For t3 the 128-bit lanes are swapped again. */
20536 emit_insn (gen_avx2_permv4di_1 (gen_lowpart (V4DImode
, t3
),
20537 gen_lowpart (V4DImode
, t3
),
20538 const2_rtx
, GEN_INT (3),
20539 const0_rtx
, const1_rtx
));
20540 /* And oring both together leads to the result. */
20541 emit_insn (gen_iorv32qi3 (target
, t1
, t3
));
20545 t4
= gen_reg_rtx (V32QImode
);
20546 /* Similarly to the above one_operand_shuffle code,
20547 just for repeated twice for each operand. merge_two:
20548 code will merge the two results together. */
20549 emit_insn (gen_avx2_pshufbv32qi3 (t4
, op0
, t3
));
20550 emit_insn (gen_avx2_pshufbv32qi3 (t3
, op1
, t3
));
20551 emit_insn (gen_avx2_pshufbv32qi3 (t2
, op0
, t1
));
20552 emit_insn (gen_avx2_pshufbv32qi3 (t1
, op1
, t1
));
20553 emit_insn (gen_avx2_permv4di_1 (gen_lowpart (V4DImode
, t4
),
20554 gen_lowpart (V4DImode
, t4
),
20555 const2_rtx
, GEN_INT (3),
20556 const0_rtx
, const1_rtx
));
20557 emit_insn (gen_avx2_permv4di_1 (gen_lowpart (V4DImode
, t3
),
20558 gen_lowpart (V4DImode
, t3
),
20559 const2_rtx
, GEN_INT (3),
20560 const0_rtx
, const1_rtx
));
20561 emit_insn (gen_iorv32qi3 (t4
, t2
, t4
));
20562 emit_insn (gen_iorv32qi3 (t3
, t1
, t3
));
20568 gcc_assert (GET_MODE_SIZE (mode
) <= 16);
20575 /* The XOP VPPERM insn supports three inputs. By ignoring the
20576 one_operand_shuffle special case, we avoid creating another
20577 set of constant vectors in memory. */
20578 one_operand_shuffle
= false;
20580 /* mask = mask & {2*w-1, ...} */
20581 vt
= GEN_INT (2*w
- 1);
20585 /* mask = mask & {w-1, ...} */
20586 vt
= GEN_INT (w
- 1);
20589 for (i
= 0; i
< w
; i
++)
20591 vt
= gen_rtx_CONST_VECTOR (maskmode
, gen_rtvec_v (w
, vec
));
20592 mask
= expand_simple_binop (maskmode
, AND
, mask
, vt
,
20593 NULL_RTX
, 0, OPTAB_DIRECT
);
20595 /* For non-QImode operations, convert the word permutation control
20596 into a byte permutation control. */
20597 if (mode
!= V16QImode
)
20599 mask
= expand_simple_binop (maskmode
, ASHIFT
, mask
,
20600 GEN_INT (exact_log2 (e
)),
20601 NULL_RTX
, 0, OPTAB_DIRECT
);
20603 /* Convert mask to vector of chars. */
20604 mask
= force_reg (V16QImode
, gen_lowpart (V16QImode
, mask
));
20606 /* Replicate each of the input bytes into byte positions:
20607 (v2di) --> {0,0,0,0,0,0,0,0, 8,8,8,8,8,8,8,8}
20608 (v4si) --> {0,0,0,0, 4,4,4,4, 8,8,8,8, 12,12,12,12}
20609 (v8hi) --> {0,0, 2,2, 4,4, 6,6, ...}. */
20610 for (i
= 0; i
< 16; ++i
)
20611 vec
[i
] = GEN_INT (i
/e
* e
);
20612 vt
= gen_rtx_CONST_VECTOR (V16QImode
, gen_rtvec_v (16, vec
));
20613 vt
= force_const_mem (V16QImode
, vt
);
20615 emit_insn (gen_xop_pperm (mask
, mask
, mask
, vt
));
20617 emit_insn (gen_ssse3_pshufbv16qi3 (mask
, mask
, vt
));
20619 /* Convert it into the byte positions by doing
20620 mask = mask + {0,1,..,16/w, 0,1,..,16/w, ...} */
20621 for (i
= 0; i
< 16; ++i
)
20622 vec
[i
] = GEN_INT (i
% e
);
20623 vt
= gen_rtx_CONST_VECTOR (V16QImode
, gen_rtvec_v (16, vec
));
20624 vt
= force_const_mem (V16QImode
, vt
);
20625 emit_insn (gen_addv16qi3 (mask
, mask
, vt
));
20628 /* The actual shuffle operations all operate on V16QImode. */
20629 op0
= gen_lowpart (V16QImode
, op0
);
20630 op1
= gen_lowpart (V16QImode
, op1
);
20631 target
= gen_lowpart (V16QImode
, target
);
20635 emit_insn (gen_xop_pperm (target
, op0
, op1
, mask
));
20637 else if (one_operand_shuffle
)
20639 emit_insn (gen_ssse3_pshufbv16qi3 (target
, op0
, mask
));
20646 /* Shuffle the two input vectors independently. */
20647 t1
= gen_reg_rtx (V16QImode
);
20648 t2
= gen_reg_rtx (V16QImode
);
20649 emit_insn (gen_ssse3_pshufbv16qi3 (t1
, op0
, mask
));
20650 emit_insn (gen_ssse3_pshufbv16qi3 (t2
, op1
, mask
));
20653 /* Then merge them together. The key is whether any given control
20654 element contained a bit set that indicates the second word. */
20655 mask
= operands
[3];
20657 if (maskmode
== V2DImode
&& !TARGET_SSE4_1
)
20659 /* Without SSE4.1, we don't have V2DImode EQ. Perform one
20660 more shuffle to convert the V2DI input mask into a V4SI
20661 input mask. At which point the masking that expand_int_vcond
20662 will work as desired. */
20663 rtx t3
= gen_reg_rtx (V4SImode
);
20664 emit_insn (gen_sse2_pshufd_1 (t3
, gen_lowpart (V4SImode
, mask
),
20665 const0_rtx
, const0_rtx
,
20666 const2_rtx
, const2_rtx
));
20668 maskmode
= V4SImode
;
20672 for (i
= 0; i
< w
; i
++)
20674 vt
= gen_rtx_CONST_VECTOR (maskmode
, gen_rtvec_v (w
, vec
));
20675 vt
= force_reg (maskmode
, vt
);
20676 mask
= expand_simple_binop (maskmode
, AND
, mask
, vt
,
20677 NULL_RTX
, 0, OPTAB_DIRECT
);
20679 xops
[0] = gen_lowpart (mode
, operands
[0]);
20680 xops
[1] = gen_lowpart (mode
, t2
);
20681 xops
[2] = gen_lowpart (mode
, t1
);
20682 xops
[3] = gen_rtx_EQ (maskmode
, mask
, vt
);
20685 ok
= ix86_expand_int_vcond (xops
);
20690 /* Unpack OP[1] into the next wider integer vector type. UNSIGNED_P is
20691 true if we should do zero extension, else sign extension. HIGH_P is
20692 true if we want the N/2 high elements, else the low elements. */
20695 ix86_expand_sse_unpack (rtx dest
, rtx src
, bool unsigned_p
, bool high_p
)
20697 enum machine_mode imode
= GET_MODE (src
);
20702 rtx (*unpack
)(rtx
, rtx
);
20703 rtx (*extract
)(rtx
, rtx
) = NULL
;
20704 enum machine_mode halfmode
= BLKmode
;
20710 unpack
= gen_avx2_zero_extendv16qiv16hi2
;
20712 unpack
= gen_avx2_sign_extendv16qiv16hi2
;
20713 halfmode
= V16QImode
;
20715 = high_p
? gen_vec_extract_hi_v32qi
: gen_vec_extract_lo_v32qi
;
20719 unpack
= gen_avx2_zero_extendv8hiv8si2
;
20721 unpack
= gen_avx2_sign_extendv8hiv8si2
;
20722 halfmode
= V8HImode
;
20724 = high_p
? gen_vec_extract_hi_v16hi
: gen_vec_extract_lo_v16hi
;
20728 unpack
= gen_avx2_zero_extendv4siv4di2
;
20730 unpack
= gen_avx2_sign_extendv4siv4di2
;
20731 halfmode
= V4SImode
;
20733 = high_p
? gen_vec_extract_hi_v8si
: gen_vec_extract_lo_v8si
;
20737 unpack
= gen_sse4_1_zero_extendv8qiv8hi2
;
20739 unpack
= gen_sse4_1_sign_extendv8qiv8hi2
;
20743 unpack
= gen_sse4_1_zero_extendv4hiv4si2
;
20745 unpack
= gen_sse4_1_sign_extendv4hiv4si2
;
20749 unpack
= gen_sse4_1_zero_extendv2siv2di2
;
20751 unpack
= gen_sse4_1_sign_extendv2siv2di2
;
20754 gcc_unreachable ();
20757 if (GET_MODE_SIZE (imode
) == 32)
20759 tmp
= gen_reg_rtx (halfmode
);
20760 emit_insn (extract (tmp
, src
));
20764 /* Shift higher 8 bytes to lower 8 bytes. */
20765 tmp
= gen_reg_rtx (imode
);
20766 emit_insn (gen_sse2_lshrv1ti3 (gen_lowpart (V1TImode
, tmp
),
20767 gen_lowpart (V1TImode
, src
),
20773 emit_insn (unpack (dest
, tmp
));
20777 rtx (*unpack
)(rtx
, rtx
, rtx
);
20783 unpack
= gen_vec_interleave_highv16qi
;
20785 unpack
= gen_vec_interleave_lowv16qi
;
20789 unpack
= gen_vec_interleave_highv8hi
;
20791 unpack
= gen_vec_interleave_lowv8hi
;
20795 unpack
= gen_vec_interleave_highv4si
;
20797 unpack
= gen_vec_interleave_lowv4si
;
20800 gcc_unreachable ();
20804 tmp
= force_reg (imode
, CONST0_RTX (imode
));
20806 tmp
= ix86_expand_sse_cmp (gen_reg_rtx (imode
), GT
, CONST0_RTX (imode
),
20807 src
, pc_rtx
, pc_rtx
);
20809 emit_insn (unpack (gen_lowpart (imode
, dest
), src
, tmp
));
20813 /* Expand conditional increment or decrement using adb/sbb instructions.
20814 The default case using setcc followed by the conditional move can be
20815 done by generic code. */
20817 ix86_expand_int_addcc (rtx operands
[])
20819 enum rtx_code code
= GET_CODE (operands
[1]);
20821 rtx (*insn
)(rtx
, rtx
, rtx
, rtx
, rtx
);
20823 rtx val
= const0_rtx
;
20824 bool fpcmp
= false;
20825 enum machine_mode mode
;
20826 rtx op0
= XEXP (operands
[1], 0);
20827 rtx op1
= XEXP (operands
[1], 1);
20829 if (operands
[3] != const1_rtx
20830 && operands
[3] != constm1_rtx
)
20832 if (!ix86_expand_carry_flag_compare (code
, op0
, op1
, &compare_op
))
20834 code
= GET_CODE (compare_op
);
20836 flags
= XEXP (compare_op
, 0);
20838 if (GET_MODE (flags
) == CCFPmode
20839 || GET_MODE (flags
) == CCFPUmode
)
20842 code
= ix86_fp_compare_code_to_integer (code
);
20849 PUT_CODE (compare_op
,
20850 reverse_condition_maybe_unordered
20851 (GET_CODE (compare_op
)));
20853 PUT_CODE (compare_op
, reverse_condition (GET_CODE (compare_op
)));
20856 mode
= GET_MODE (operands
[0]);
20858 /* Construct either adc or sbb insn. */
20859 if ((code
== LTU
) == (operands
[3] == constm1_rtx
))
20864 insn
= gen_subqi3_carry
;
20867 insn
= gen_subhi3_carry
;
20870 insn
= gen_subsi3_carry
;
20873 insn
= gen_subdi3_carry
;
20876 gcc_unreachable ();
20884 insn
= gen_addqi3_carry
;
20887 insn
= gen_addhi3_carry
;
20890 insn
= gen_addsi3_carry
;
20893 insn
= gen_adddi3_carry
;
20896 gcc_unreachable ();
20899 emit_insn (insn (operands
[0], operands
[2], val
, flags
, compare_op
));
20905 /* Split operands 0 and 1 into half-mode parts. Similar to split_double_mode,
20906 but works for floating pointer parameters and nonoffsetable memories.
20907 For pushes, it returns just stack offsets; the values will be saved
20908 in the right order. Maximally three parts are generated. */
20911 ix86_split_to_parts (rtx operand
, rtx
*parts
, enum machine_mode mode
)
20916 size
= mode
==XFmode
? 3 : GET_MODE_SIZE (mode
) / 4;
20918 size
= (GET_MODE_SIZE (mode
) + 4) / 8;
20920 gcc_assert (!REG_P (operand
) || !MMX_REGNO_P (REGNO (operand
)));
20921 gcc_assert (size
>= 2 && size
<= 4);
20923 /* Optimize constant pool reference to immediates. This is used by fp
20924 moves, that force all constants to memory to allow combining. */
20925 if (MEM_P (operand
) && MEM_READONLY_P (operand
))
20927 rtx tmp
= maybe_get_pool_constant (operand
);
20932 if (MEM_P (operand
) && !offsettable_memref_p (operand
))
20934 /* The only non-offsetable memories we handle are pushes. */
20935 int ok
= push_operand (operand
, VOIDmode
);
20939 operand
= copy_rtx (operand
);
20940 PUT_MODE (operand
, word_mode
);
20941 parts
[0] = parts
[1] = parts
[2] = parts
[3] = operand
;
20945 if (GET_CODE (operand
) == CONST_VECTOR
)
20947 enum machine_mode imode
= int_mode_for_mode (mode
);
20948 /* Caution: if we looked through a constant pool memory above,
20949 the operand may actually have a different mode now. That's
20950 ok, since we want to pun this all the way back to an integer. */
20951 operand
= simplify_subreg (imode
, operand
, GET_MODE (operand
), 0);
20952 gcc_assert (operand
!= NULL
);
20958 if (mode
== DImode
)
20959 split_double_mode (mode
, &operand
, 1, &parts
[0], &parts
[1]);
20964 if (REG_P (operand
))
20966 gcc_assert (reload_completed
);
20967 for (i
= 0; i
< size
; i
++)
20968 parts
[i
] = gen_rtx_REG (SImode
, REGNO (operand
) + i
);
20970 else if (offsettable_memref_p (operand
))
20972 operand
= adjust_address (operand
, SImode
, 0);
20973 parts
[0] = operand
;
20974 for (i
= 1; i
< size
; i
++)
20975 parts
[i
] = adjust_address (operand
, SImode
, 4 * i
);
20977 else if (GET_CODE (operand
) == CONST_DOUBLE
)
20982 REAL_VALUE_FROM_CONST_DOUBLE (r
, operand
);
20986 real_to_target (l
, &r
, mode
);
20987 parts
[3] = gen_int_mode (l
[3], SImode
);
20988 parts
[2] = gen_int_mode (l
[2], SImode
);
20991 /* We can't use REAL_VALUE_TO_TARGET_LONG_DOUBLE since
20992 long double may not be 80-bit. */
20993 real_to_target (l
, &r
, mode
);
20994 parts
[2] = gen_int_mode (l
[2], SImode
);
20997 REAL_VALUE_TO_TARGET_DOUBLE (r
, l
);
21000 gcc_unreachable ();
21002 parts
[1] = gen_int_mode (l
[1], SImode
);
21003 parts
[0] = gen_int_mode (l
[0], SImode
);
21006 gcc_unreachable ();
21011 if (mode
== TImode
)
21012 split_double_mode (mode
, &operand
, 1, &parts
[0], &parts
[1]);
21013 if (mode
== XFmode
|| mode
== TFmode
)
21015 enum machine_mode upper_mode
= mode
==XFmode
? SImode
: DImode
;
21016 if (REG_P (operand
))
21018 gcc_assert (reload_completed
);
21019 parts
[0] = gen_rtx_REG (DImode
, REGNO (operand
) + 0);
21020 parts
[1] = gen_rtx_REG (upper_mode
, REGNO (operand
) + 1);
21022 else if (offsettable_memref_p (operand
))
21024 operand
= adjust_address (operand
, DImode
, 0);
21025 parts
[0] = operand
;
21026 parts
[1] = adjust_address (operand
, upper_mode
, 8);
21028 else if (GET_CODE (operand
) == CONST_DOUBLE
)
21033 REAL_VALUE_FROM_CONST_DOUBLE (r
, operand
);
21034 real_to_target (l
, &r
, mode
);
21036 /* Do not use shift by 32 to avoid warning on 32bit systems. */
21037 if (HOST_BITS_PER_WIDE_INT
>= 64)
21040 ((l
[0] & (((HOST_WIDE_INT
) 2 << 31) - 1))
21041 + ((((HOST_WIDE_INT
) l
[1]) << 31) << 1),
21044 parts
[0] = immed_double_const (l
[0], l
[1], DImode
);
21046 if (upper_mode
== SImode
)
21047 parts
[1] = gen_int_mode (l
[2], SImode
);
21048 else if (HOST_BITS_PER_WIDE_INT
>= 64)
21051 ((l
[2] & (((HOST_WIDE_INT
) 2 << 31) - 1))
21052 + ((((HOST_WIDE_INT
) l
[3]) << 31) << 1),
21055 parts
[1] = immed_double_const (l
[2], l
[3], DImode
);
21058 gcc_unreachable ();
21065 /* Emit insns to perform a move or push of DI, DF, XF, and TF values.
21066 Return false when normal moves are needed; true when all required
21067 insns have been emitted. Operands 2-4 contain the input values
21068 int the correct order; operands 5-7 contain the output values. */
21071 ix86_split_long_move (rtx operands
[])
21076 int collisions
= 0;
21077 enum machine_mode mode
= GET_MODE (operands
[0]);
21078 bool collisionparts
[4];
21080 /* The DFmode expanders may ask us to move double.
21081 For 64bit target this is single move. By hiding the fact
21082 here we simplify i386.md splitters. */
21083 if (TARGET_64BIT
&& GET_MODE_SIZE (GET_MODE (operands
[0])) == 8)
21085 /* Optimize constant pool reference to immediates. This is used by
21086 fp moves, that force all constants to memory to allow combining. */
21088 if (MEM_P (operands
[1])
21089 && GET_CODE (XEXP (operands
[1], 0)) == SYMBOL_REF
21090 && CONSTANT_POOL_ADDRESS_P (XEXP (operands
[1], 0)))
21091 operands
[1] = get_pool_constant (XEXP (operands
[1], 0));
21092 if (push_operand (operands
[0], VOIDmode
))
21094 operands
[0] = copy_rtx (operands
[0]);
21095 PUT_MODE (operands
[0], word_mode
);
21098 operands
[0] = gen_lowpart (DImode
, operands
[0]);
21099 operands
[1] = gen_lowpart (DImode
, operands
[1]);
21100 emit_move_insn (operands
[0], operands
[1]);
21104 /* The only non-offsettable memory we handle is push. */
21105 if (push_operand (operands
[0], VOIDmode
))
21108 gcc_assert (!MEM_P (operands
[0])
21109 || offsettable_memref_p (operands
[0]));
21111 nparts
= ix86_split_to_parts (operands
[1], part
[1], GET_MODE (operands
[0]));
21112 ix86_split_to_parts (operands
[0], part
[0], GET_MODE (operands
[0]));
21114 /* When emitting push, take care for source operands on the stack. */
21115 if (push
&& MEM_P (operands
[1])
21116 && reg_overlap_mentioned_p (stack_pointer_rtx
, operands
[1]))
21118 rtx src_base
= XEXP (part
[1][nparts
- 1], 0);
21120 /* Compensate for the stack decrement by 4. */
21121 if (!TARGET_64BIT
&& nparts
== 3
21122 && mode
== XFmode
&& TARGET_128BIT_LONG_DOUBLE
)
21123 src_base
= plus_constant (Pmode
, src_base
, 4);
21125 /* src_base refers to the stack pointer and is
21126 automatically decreased by emitted push. */
21127 for (i
= 0; i
< nparts
; i
++)
21128 part
[1][i
] = change_address (part
[1][i
],
21129 GET_MODE (part
[1][i
]), src_base
);
21132 /* We need to do copy in the right order in case an address register
21133 of the source overlaps the destination. */
21134 if (REG_P (part
[0][0]) && MEM_P (part
[1][0]))
21138 for (i
= 0; i
< nparts
; i
++)
21141 = reg_overlap_mentioned_p (part
[0][i
], XEXP (part
[1][0], 0));
21142 if (collisionparts
[i
])
21146 /* Collision in the middle part can be handled by reordering. */
21147 if (collisions
== 1 && nparts
== 3 && collisionparts
[1])
21149 tmp
= part
[0][1]; part
[0][1] = part
[0][2]; part
[0][2] = tmp
;
21150 tmp
= part
[1][1]; part
[1][1] = part
[1][2]; part
[1][2] = tmp
;
21152 else if (collisions
== 1
21154 && (collisionparts
[1] || collisionparts
[2]))
21156 if (collisionparts
[1])
21158 tmp
= part
[0][1]; part
[0][1] = part
[0][2]; part
[0][2] = tmp
;
21159 tmp
= part
[1][1]; part
[1][1] = part
[1][2]; part
[1][2] = tmp
;
21163 tmp
= part
[0][2]; part
[0][2] = part
[0][3]; part
[0][3] = tmp
;
21164 tmp
= part
[1][2]; part
[1][2] = part
[1][3]; part
[1][3] = tmp
;
21168 /* If there are more collisions, we can't handle it by reordering.
21169 Do an lea to the last part and use only one colliding move. */
21170 else if (collisions
> 1)
21176 base
= part
[0][nparts
- 1];
21178 /* Handle the case when the last part isn't valid for lea.
21179 Happens in 64-bit mode storing the 12-byte XFmode. */
21180 if (GET_MODE (base
) != Pmode
)
21181 base
= gen_rtx_REG (Pmode
, REGNO (base
));
21183 emit_insn (gen_rtx_SET (VOIDmode
, base
, XEXP (part
[1][0], 0)));
21184 part
[1][0] = replace_equiv_address (part
[1][0], base
);
21185 for (i
= 1; i
< nparts
; i
++)
21187 tmp
= plus_constant (Pmode
, base
, UNITS_PER_WORD
* i
);
21188 part
[1][i
] = replace_equiv_address (part
[1][i
], tmp
);
21199 if (TARGET_128BIT_LONG_DOUBLE
&& mode
== XFmode
)
21200 emit_insn (ix86_gen_add3 (stack_pointer_rtx
,
21201 stack_pointer_rtx
, GEN_INT (-4)));
21202 emit_move_insn (part
[0][2], part
[1][2]);
21204 else if (nparts
== 4)
21206 emit_move_insn (part
[0][3], part
[1][3]);
21207 emit_move_insn (part
[0][2], part
[1][2]);
21212 /* In 64bit mode we don't have 32bit push available. In case this is
21213 register, it is OK - we will just use larger counterpart. We also
21214 retype memory - these comes from attempt to avoid REX prefix on
21215 moving of second half of TFmode value. */
21216 if (GET_MODE (part
[1][1]) == SImode
)
21218 switch (GET_CODE (part
[1][1]))
21221 part
[1][1] = adjust_address (part
[1][1], DImode
, 0);
21225 part
[1][1] = gen_rtx_REG (DImode
, REGNO (part
[1][1]));
21229 gcc_unreachable ();
21232 if (GET_MODE (part
[1][0]) == SImode
)
21233 part
[1][0] = part
[1][1];
21236 emit_move_insn (part
[0][1], part
[1][1]);
21237 emit_move_insn (part
[0][0], part
[1][0]);
21241 /* Choose correct order to not overwrite the source before it is copied. */
21242 if ((REG_P (part
[0][0])
21243 && REG_P (part
[1][1])
21244 && (REGNO (part
[0][0]) == REGNO (part
[1][1])
21246 && REGNO (part
[0][0]) == REGNO (part
[1][2]))
21248 && REGNO (part
[0][0]) == REGNO (part
[1][3]))))
21250 && reg_overlap_mentioned_p (part
[0][0], XEXP (part
[1][0], 0))))
21252 for (i
= 0, j
= nparts
- 1; i
< nparts
; i
++, j
--)
21254 operands
[2 + i
] = part
[0][j
];
21255 operands
[6 + i
] = part
[1][j
];
21260 for (i
= 0; i
< nparts
; i
++)
21262 operands
[2 + i
] = part
[0][i
];
21263 operands
[6 + i
] = part
[1][i
];
21267 /* If optimizing for size, attempt to locally unCSE nonzero constants. */
21268 if (optimize_insn_for_size_p ())
21270 for (j
= 0; j
< nparts
- 1; j
++)
21271 if (CONST_INT_P (operands
[6 + j
])
21272 && operands
[6 + j
] != const0_rtx
21273 && REG_P (operands
[2 + j
]))
21274 for (i
= j
; i
< nparts
- 1; i
++)
21275 if (CONST_INT_P (operands
[7 + i
])
21276 && INTVAL (operands
[7 + i
]) == INTVAL (operands
[6 + j
]))
21277 operands
[7 + i
] = operands
[2 + j
];
21280 for (i
= 0; i
< nparts
; i
++)
21281 emit_move_insn (operands
[2 + i
], operands
[6 + i
]);
21286 /* Helper function of ix86_split_ashl used to generate an SImode/DImode
21287 left shift by a constant, either using a single shift or
21288 a sequence of add instructions. */
21291 ix86_expand_ashl_const (rtx operand
, int count
, enum machine_mode mode
)
21293 rtx (*insn
)(rtx
, rtx
, rtx
);
21296 || (count
* ix86_cost
->add
<= ix86_cost
->shift_const
21297 && !optimize_insn_for_size_p ()))
21299 insn
= mode
== DImode
? gen_addsi3
: gen_adddi3
;
21300 while (count
-- > 0)
21301 emit_insn (insn (operand
, operand
, operand
));
21305 insn
= mode
== DImode
? gen_ashlsi3
: gen_ashldi3
;
21306 emit_insn (insn (operand
, operand
, GEN_INT (count
)));
21311 ix86_split_ashl (rtx
*operands
, rtx scratch
, enum machine_mode mode
)
21313 rtx (*gen_ashl3
)(rtx
, rtx
, rtx
);
21314 rtx (*gen_shld
)(rtx
, rtx
, rtx
);
21315 int half_width
= GET_MODE_BITSIZE (mode
) >> 1;
21317 rtx low
[2], high
[2];
21320 if (CONST_INT_P (operands
[2]))
21322 split_double_mode (mode
, operands
, 2, low
, high
);
21323 count
= INTVAL (operands
[2]) & (GET_MODE_BITSIZE (mode
) - 1);
21325 if (count
>= half_width
)
21327 emit_move_insn (high
[0], low
[1]);
21328 emit_move_insn (low
[0], const0_rtx
);
21330 if (count
> half_width
)
21331 ix86_expand_ashl_const (high
[0], count
- half_width
, mode
);
21335 gen_shld
= mode
== DImode
? gen_x86_shld
: gen_x86_64_shld
;
21337 if (!rtx_equal_p (operands
[0], operands
[1]))
21338 emit_move_insn (operands
[0], operands
[1]);
21340 emit_insn (gen_shld (high
[0], low
[0], GEN_INT (count
)));
21341 ix86_expand_ashl_const (low
[0], count
, mode
);
21346 split_double_mode (mode
, operands
, 1, low
, high
);
21348 gen_ashl3
= mode
== DImode
? gen_ashlsi3
: gen_ashldi3
;
21350 if (operands
[1] == const1_rtx
)
21352 /* Assuming we've chosen a QImode capable registers, then 1 << N
21353 can be done with two 32/64-bit shifts, no branches, no cmoves. */
21354 if (ANY_QI_REG_P (low
[0]) && ANY_QI_REG_P (high
[0]))
21356 rtx s
, d
, flags
= gen_rtx_REG (CCZmode
, FLAGS_REG
);
21358 ix86_expand_clear (low
[0]);
21359 ix86_expand_clear (high
[0]);
21360 emit_insn (gen_testqi_ccz_1 (operands
[2], GEN_INT (half_width
)));
21362 d
= gen_lowpart (QImode
, low
[0]);
21363 d
= gen_rtx_STRICT_LOW_PART (VOIDmode
, d
);
21364 s
= gen_rtx_EQ (QImode
, flags
, const0_rtx
);
21365 emit_insn (gen_rtx_SET (VOIDmode
, d
, s
));
21367 d
= gen_lowpart (QImode
, high
[0]);
21368 d
= gen_rtx_STRICT_LOW_PART (VOIDmode
, d
);
21369 s
= gen_rtx_NE (QImode
, flags
, const0_rtx
);
21370 emit_insn (gen_rtx_SET (VOIDmode
, d
, s
));
21373 /* Otherwise, we can get the same results by manually performing
21374 a bit extract operation on bit 5/6, and then performing the two
21375 shifts. The two methods of getting 0/1 into low/high are exactly
21376 the same size. Avoiding the shift in the bit extract case helps
21377 pentium4 a bit; no one else seems to care much either way. */
21380 enum machine_mode half_mode
;
21381 rtx (*gen_lshr3
)(rtx
, rtx
, rtx
);
21382 rtx (*gen_and3
)(rtx
, rtx
, rtx
);
21383 rtx (*gen_xor3
)(rtx
, rtx
, rtx
);
21384 HOST_WIDE_INT bits
;
21387 if (mode
== DImode
)
21389 half_mode
= SImode
;
21390 gen_lshr3
= gen_lshrsi3
;
21391 gen_and3
= gen_andsi3
;
21392 gen_xor3
= gen_xorsi3
;
21397 half_mode
= DImode
;
21398 gen_lshr3
= gen_lshrdi3
;
21399 gen_and3
= gen_anddi3
;
21400 gen_xor3
= gen_xordi3
;
21404 if (TARGET_PARTIAL_REG_STALL
&& !optimize_insn_for_size_p ())
21405 x
= gen_rtx_ZERO_EXTEND (half_mode
, operands
[2]);
21407 x
= gen_lowpart (half_mode
, operands
[2]);
21408 emit_insn (gen_rtx_SET (VOIDmode
, high
[0], x
));
21410 emit_insn (gen_lshr3 (high
[0], high
[0], GEN_INT (bits
)));
21411 emit_insn (gen_and3 (high
[0], high
[0], const1_rtx
));
21412 emit_move_insn (low
[0], high
[0]);
21413 emit_insn (gen_xor3 (low
[0], low
[0], const1_rtx
));
21416 emit_insn (gen_ashl3 (low
[0], low
[0], operands
[2]));
21417 emit_insn (gen_ashl3 (high
[0], high
[0], operands
[2]));
21421 if (operands
[1] == constm1_rtx
)
21423 /* For -1 << N, we can avoid the shld instruction, because we
21424 know that we're shifting 0...31/63 ones into a -1. */
21425 emit_move_insn (low
[0], constm1_rtx
);
21426 if (optimize_insn_for_size_p ())
21427 emit_move_insn (high
[0], low
[0]);
21429 emit_move_insn (high
[0], constm1_rtx
);
21433 gen_shld
= mode
== DImode
? gen_x86_shld
: gen_x86_64_shld
;
21435 if (!rtx_equal_p (operands
[0], operands
[1]))
21436 emit_move_insn (operands
[0], operands
[1]);
21438 split_double_mode (mode
, operands
, 1, low
, high
);
21439 emit_insn (gen_shld (high
[0], low
[0], operands
[2]));
21442 emit_insn (gen_ashl3 (low
[0], low
[0], operands
[2]));
21444 if (TARGET_CMOVE
&& scratch
)
21446 rtx (*gen_x86_shift_adj_1
)(rtx
, rtx
, rtx
, rtx
)
21447 = mode
== DImode
? gen_x86_shiftsi_adj_1
: gen_x86_shiftdi_adj_1
;
21449 ix86_expand_clear (scratch
);
21450 emit_insn (gen_x86_shift_adj_1 (high
[0], low
[0], operands
[2], scratch
));
21454 rtx (*gen_x86_shift_adj_2
)(rtx
, rtx
, rtx
)
21455 = mode
== DImode
? gen_x86_shiftsi_adj_2
: gen_x86_shiftdi_adj_2
;
21457 emit_insn (gen_x86_shift_adj_2 (high
[0], low
[0], operands
[2]));
21462 ix86_split_ashr (rtx
*operands
, rtx scratch
, enum machine_mode mode
)
21464 rtx (*gen_ashr3
)(rtx
, rtx
, rtx
)
21465 = mode
== DImode
? gen_ashrsi3
: gen_ashrdi3
;
21466 rtx (*gen_shrd
)(rtx
, rtx
, rtx
);
21467 int half_width
= GET_MODE_BITSIZE (mode
) >> 1;
21469 rtx low
[2], high
[2];
21472 if (CONST_INT_P (operands
[2]))
21474 split_double_mode (mode
, operands
, 2, low
, high
);
21475 count
= INTVAL (operands
[2]) & (GET_MODE_BITSIZE (mode
) - 1);
21477 if (count
== GET_MODE_BITSIZE (mode
) - 1)
21479 emit_move_insn (high
[0], high
[1]);
21480 emit_insn (gen_ashr3 (high
[0], high
[0],
21481 GEN_INT (half_width
- 1)));
21482 emit_move_insn (low
[0], high
[0]);
21485 else if (count
>= half_width
)
21487 emit_move_insn (low
[0], high
[1]);
21488 emit_move_insn (high
[0], low
[0]);
21489 emit_insn (gen_ashr3 (high
[0], high
[0],
21490 GEN_INT (half_width
- 1)));
21492 if (count
> half_width
)
21493 emit_insn (gen_ashr3 (low
[0], low
[0],
21494 GEN_INT (count
- half_width
)));
21498 gen_shrd
= mode
== DImode
? gen_x86_shrd
: gen_x86_64_shrd
;
21500 if (!rtx_equal_p (operands
[0], operands
[1]))
21501 emit_move_insn (operands
[0], operands
[1]);
21503 emit_insn (gen_shrd (low
[0], high
[0], GEN_INT (count
)));
21504 emit_insn (gen_ashr3 (high
[0], high
[0], GEN_INT (count
)));
21509 gen_shrd
= mode
== DImode
? gen_x86_shrd
: gen_x86_64_shrd
;
21511 if (!rtx_equal_p (operands
[0], operands
[1]))
21512 emit_move_insn (operands
[0], operands
[1]);
21514 split_double_mode (mode
, operands
, 1, low
, high
);
21516 emit_insn (gen_shrd (low
[0], high
[0], operands
[2]));
21517 emit_insn (gen_ashr3 (high
[0], high
[0], operands
[2]));
21519 if (TARGET_CMOVE
&& scratch
)
21521 rtx (*gen_x86_shift_adj_1
)(rtx
, rtx
, rtx
, rtx
)
21522 = mode
== DImode
? gen_x86_shiftsi_adj_1
: gen_x86_shiftdi_adj_1
;
21524 emit_move_insn (scratch
, high
[0]);
21525 emit_insn (gen_ashr3 (scratch
, scratch
,
21526 GEN_INT (half_width
- 1)));
21527 emit_insn (gen_x86_shift_adj_1 (low
[0], high
[0], operands
[2],
21532 rtx (*gen_x86_shift_adj_3
)(rtx
, rtx
, rtx
)
21533 = mode
== DImode
? gen_x86_shiftsi_adj_3
: gen_x86_shiftdi_adj_3
;
21535 emit_insn (gen_x86_shift_adj_3 (low
[0], high
[0], operands
[2]));
21541 ix86_split_lshr (rtx
*operands
, rtx scratch
, enum machine_mode mode
)
21543 rtx (*gen_lshr3
)(rtx
, rtx
, rtx
)
21544 = mode
== DImode
? gen_lshrsi3
: gen_lshrdi3
;
21545 rtx (*gen_shrd
)(rtx
, rtx
, rtx
);
21546 int half_width
= GET_MODE_BITSIZE (mode
) >> 1;
21548 rtx low
[2], high
[2];
21551 if (CONST_INT_P (operands
[2]))
21553 split_double_mode (mode
, operands
, 2, low
, high
);
21554 count
= INTVAL (operands
[2]) & (GET_MODE_BITSIZE (mode
) - 1);
21556 if (count
>= half_width
)
21558 emit_move_insn (low
[0], high
[1]);
21559 ix86_expand_clear (high
[0]);
21561 if (count
> half_width
)
21562 emit_insn (gen_lshr3 (low
[0], low
[0],
21563 GEN_INT (count
- half_width
)));
21567 gen_shrd
= mode
== DImode
? gen_x86_shrd
: gen_x86_64_shrd
;
21569 if (!rtx_equal_p (operands
[0], operands
[1]))
21570 emit_move_insn (operands
[0], operands
[1]);
21572 emit_insn (gen_shrd (low
[0], high
[0], GEN_INT (count
)));
21573 emit_insn (gen_lshr3 (high
[0], high
[0], GEN_INT (count
)));
21578 gen_shrd
= mode
== DImode
? gen_x86_shrd
: gen_x86_64_shrd
;
21580 if (!rtx_equal_p (operands
[0], operands
[1]))
21581 emit_move_insn (operands
[0], operands
[1]);
21583 split_double_mode (mode
, operands
, 1, low
, high
);
21585 emit_insn (gen_shrd (low
[0], high
[0], operands
[2]));
21586 emit_insn (gen_lshr3 (high
[0], high
[0], operands
[2]));
21588 if (TARGET_CMOVE
&& scratch
)
21590 rtx (*gen_x86_shift_adj_1
)(rtx
, rtx
, rtx
, rtx
)
21591 = mode
== DImode
? gen_x86_shiftsi_adj_1
: gen_x86_shiftdi_adj_1
;
21593 ix86_expand_clear (scratch
);
21594 emit_insn (gen_x86_shift_adj_1 (low
[0], high
[0], operands
[2],
21599 rtx (*gen_x86_shift_adj_2
)(rtx
, rtx
, rtx
)
21600 = mode
== DImode
? gen_x86_shiftsi_adj_2
: gen_x86_shiftdi_adj_2
;
21602 emit_insn (gen_x86_shift_adj_2 (low
[0], high
[0], operands
[2]));
21607 /* Predict just emitted jump instruction to be taken with probability PROB. */
21609 predict_jump (int prob
)
21611 rtx insn
= get_last_insn ();
21612 gcc_assert (JUMP_P (insn
));
21613 add_reg_note (insn
, REG_BR_PROB
, GEN_INT (prob
));
21616 /* Helper function for the string operations below. Dest VARIABLE whether
21617 it is aligned to VALUE bytes. If true, jump to the label. */
21619 ix86_expand_aligntest (rtx variable
, int value
, bool epilogue
)
21621 rtx label
= gen_label_rtx ();
21622 rtx tmpcount
= gen_reg_rtx (GET_MODE (variable
));
21623 if (GET_MODE (variable
) == DImode
)
21624 emit_insn (gen_anddi3 (tmpcount
, variable
, GEN_INT (value
)));
21626 emit_insn (gen_andsi3 (tmpcount
, variable
, GEN_INT (value
)));
21627 emit_cmp_and_jump_insns (tmpcount
, const0_rtx
, EQ
, 0, GET_MODE (variable
),
21630 predict_jump (REG_BR_PROB_BASE
* 50 / 100);
21632 predict_jump (REG_BR_PROB_BASE
* 90 / 100);
21636 /* Adjust COUNTER by the VALUE. */
21638 ix86_adjust_counter (rtx countreg
, HOST_WIDE_INT value
)
21640 rtx (*gen_add
)(rtx
, rtx
, rtx
)
21641 = GET_MODE (countreg
) == DImode
? gen_adddi3
: gen_addsi3
;
21643 emit_insn (gen_add (countreg
, countreg
, GEN_INT (-value
)));
21646 /* Zero extend possibly SImode EXP to Pmode register. */
21648 ix86_zero_extend_to_Pmode (rtx exp
)
21650 if (GET_MODE (exp
) != Pmode
)
21651 exp
= convert_to_mode (Pmode
, exp
, 1);
21652 return force_reg (Pmode
, exp
);
21655 /* Divide COUNTREG by SCALE. */
21657 scale_counter (rtx countreg
, int scale
)
21663 if (CONST_INT_P (countreg
))
21664 return GEN_INT (INTVAL (countreg
) / scale
);
21665 gcc_assert (REG_P (countreg
));
21667 sc
= expand_simple_binop (GET_MODE (countreg
), LSHIFTRT
, countreg
,
21668 GEN_INT (exact_log2 (scale
)),
21669 NULL
, 1, OPTAB_DIRECT
);
21673 /* Return mode for the memcpy/memset loop counter. Prefer SImode over
21674 DImode for constant loop counts. */
21676 static enum machine_mode
21677 counter_mode (rtx count_exp
)
21679 if (GET_MODE (count_exp
) != VOIDmode
)
21680 return GET_MODE (count_exp
);
21681 if (!CONST_INT_P (count_exp
))
21683 if (TARGET_64BIT
&& (INTVAL (count_exp
) & ~0xffffffff))
21688 /* When SRCPTR is non-NULL, output simple loop to move memory
21689 pointer to SRCPTR to DESTPTR via chunks of MODE unrolled UNROLL times,
21690 overall size is COUNT specified in bytes. When SRCPTR is NULL, output the
21691 equivalent loop to set memory by VALUE (supposed to be in MODE).
21693 The size is rounded down to whole number of chunk size moved at once.
21694 SRCMEM and DESTMEM provide MEMrtx to feed proper aliasing info. */
21698 expand_set_or_movmem_via_loop (rtx destmem
, rtx srcmem
,
21699 rtx destptr
, rtx srcptr
, rtx value
,
21700 rtx count
, enum machine_mode mode
, int unroll
,
21703 rtx out_label
, top_label
, iter
, tmp
;
21704 enum machine_mode iter_mode
= counter_mode (count
);
21705 rtx piece_size
= GEN_INT (GET_MODE_SIZE (mode
) * unroll
);
21706 rtx piece_size_mask
= GEN_INT (~((GET_MODE_SIZE (mode
) * unroll
) - 1));
21712 top_label
= gen_label_rtx ();
21713 out_label
= gen_label_rtx ();
21714 iter
= gen_reg_rtx (iter_mode
);
21716 size
= expand_simple_binop (iter_mode
, AND
, count
, piece_size_mask
,
21717 NULL
, 1, OPTAB_DIRECT
);
21718 /* Those two should combine. */
21719 if (piece_size
== const1_rtx
)
21721 emit_cmp_and_jump_insns (size
, const0_rtx
, EQ
, NULL_RTX
, iter_mode
,
21723 predict_jump (REG_BR_PROB_BASE
* 10 / 100);
21725 emit_move_insn (iter
, const0_rtx
);
21727 emit_label (top_label
);
21729 tmp
= convert_modes (Pmode
, iter_mode
, iter
, true);
21730 x_addr
= gen_rtx_PLUS (Pmode
, destptr
, tmp
);
21731 destmem
= change_address (destmem
, mode
, x_addr
);
21735 y_addr
= gen_rtx_PLUS (Pmode
, srcptr
, copy_rtx (tmp
));
21736 srcmem
= change_address (srcmem
, mode
, y_addr
);
21738 /* When unrolling for chips that reorder memory reads and writes,
21739 we can save registers by using single temporary.
21740 Also using 4 temporaries is overkill in 32bit mode. */
21741 if (!TARGET_64BIT
&& 0)
21743 for (i
= 0; i
< unroll
; i
++)
21748 adjust_address (copy_rtx (destmem
), mode
, GET_MODE_SIZE (mode
));
21750 adjust_address (copy_rtx (srcmem
), mode
, GET_MODE_SIZE (mode
));
21752 emit_move_insn (destmem
, srcmem
);
21758 gcc_assert (unroll
<= 4);
21759 for (i
= 0; i
< unroll
; i
++)
21761 tmpreg
[i
] = gen_reg_rtx (mode
);
21765 adjust_address (copy_rtx (srcmem
), mode
, GET_MODE_SIZE (mode
));
21767 emit_move_insn (tmpreg
[i
], srcmem
);
21769 for (i
= 0; i
< unroll
; i
++)
21774 adjust_address (copy_rtx (destmem
), mode
, GET_MODE_SIZE (mode
));
21776 emit_move_insn (destmem
, tmpreg
[i
]);
21781 for (i
= 0; i
< unroll
; i
++)
21785 adjust_address (copy_rtx (destmem
), mode
, GET_MODE_SIZE (mode
));
21786 emit_move_insn (destmem
, value
);
21789 tmp
= expand_simple_binop (iter_mode
, PLUS
, iter
, piece_size
, iter
,
21790 true, OPTAB_LIB_WIDEN
);
21792 emit_move_insn (iter
, tmp
);
21794 emit_cmp_and_jump_insns (iter
, size
, LT
, NULL_RTX
, iter_mode
,
21796 if (expected_size
!= -1)
21798 expected_size
/= GET_MODE_SIZE (mode
) * unroll
;
21799 if (expected_size
== 0)
21801 else if (expected_size
> REG_BR_PROB_BASE
)
21802 predict_jump (REG_BR_PROB_BASE
- 1);
21804 predict_jump (REG_BR_PROB_BASE
- (REG_BR_PROB_BASE
+ expected_size
/ 2) / expected_size
);
21807 predict_jump (REG_BR_PROB_BASE
* 80 / 100);
21808 iter
= ix86_zero_extend_to_Pmode (iter
);
21809 tmp
= expand_simple_binop (Pmode
, PLUS
, destptr
, iter
, destptr
,
21810 true, OPTAB_LIB_WIDEN
);
21811 if (tmp
!= destptr
)
21812 emit_move_insn (destptr
, tmp
);
21815 tmp
= expand_simple_binop (Pmode
, PLUS
, srcptr
, iter
, srcptr
,
21816 true, OPTAB_LIB_WIDEN
);
21818 emit_move_insn (srcptr
, tmp
);
21820 emit_label (out_label
);
21823 /* Output "rep; mov" instruction.
21824 Arguments have same meaning as for previous function */
21826 expand_movmem_via_rep_mov (rtx destmem
, rtx srcmem
,
21827 rtx destptr
, rtx srcptr
,
21829 enum machine_mode mode
)
21834 HOST_WIDE_INT rounded_count
;
21836 /* If the size is known, it is shorter to use rep movs. */
21837 if (mode
== QImode
&& CONST_INT_P (count
)
21838 && !(INTVAL (count
) & 3))
21841 if (destptr
!= XEXP (destmem
, 0) || GET_MODE (destmem
) != BLKmode
)
21842 destmem
= adjust_automodify_address_nv (destmem
, BLKmode
, destptr
, 0);
21843 if (srcptr
!= XEXP (srcmem
, 0) || GET_MODE (srcmem
) != BLKmode
)
21844 srcmem
= adjust_automodify_address_nv (srcmem
, BLKmode
, srcptr
, 0);
21845 countreg
= ix86_zero_extend_to_Pmode (scale_counter (count
, GET_MODE_SIZE (mode
)));
21846 if (mode
!= QImode
)
21848 destexp
= gen_rtx_ASHIFT (Pmode
, countreg
,
21849 GEN_INT (exact_log2 (GET_MODE_SIZE (mode
))));
21850 destexp
= gen_rtx_PLUS (Pmode
, destexp
, destptr
);
21851 srcexp
= gen_rtx_ASHIFT (Pmode
, countreg
,
21852 GEN_INT (exact_log2 (GET_MODE_SIZE (mode
))));
21853 srcexp
= gen_rtx_PLUS (Pmode
, srcexp
, srcptr
);
21857 destexp
= gen_rtx_PLUS (Pmode
, destptr
, countreg
);
21858 srcexp
= gen_rtx_PLUS (Pmode
, srcptr
, countreg
);
21860 if (CONST_INT_P (count
))
21862 rounded_count
= (INTVAL (count
)
21863 & ~((HOST_WIDE_INT
) GET_MODE_SIZE (mode
) - 1));
21864 destmem
= shallow_copy_rtx (destmem
);
21865 srcmem
= shallow_copy_rtx (srcmem
);
21866 set_mem_size (destmem
, rounded_count
);
21867 set_mem_size (srcmem
, rounded_count
);
21871 if (MEM_SIZE_KNOWN_P (destmem
))
21872 clear_mem_size (destmem
);
21873 if (MEM_SIZE_KNOWN_P (srcmem
))
21874 clear_mem_size (srcmem
);
21876 emit_insn (gen_rep_mov (destptr
, destmem
, srcptr
, srcmem
, countreg
,
21880 /* Output "rep; stos" instruction.
21881 Arguments have same meaning as for previous function */
21883 expand_setmem_via_rep_stos (rtx destmem
, rtx destptr
, rtx value
,
21884 rtx count
, enum machine_mode mode
,
21889 HOST_WIDE_INT rounded_count
;
21891 if (destptr
!= XEXP (destmem
, 0) || GET_MODE (destmem
) != BLKmode
)
21892 destmem
= adjust_automodify_address_nv (destmem
, BLKmode
, destptr
, 0);
21893 value
= force_reg (mode
, gen_lowpart (mode
, value
));
21894 countreg
= ix86_zero_extend_to_Pmode (scale_counter (count
, GET_MODE_SIZE (mode
)));
21895 if (mode
!= QImode
)
21897 destexp
= gen_rtx_ASHIFT (Pmode
, countreg
,
21898 GEN_INT (exact_log2 (GET_MODE_SIZE (mode
))));
21899 destexp
= gen_rtx_PLUS (Pmode
, destexp
, destptr
);
21902 destexp
= gen_rtx_PLUS (Pmode
, destptr
, countreg
);
21903 if (orig_value
== const0_rtx
&& CONST_INT_P (count
))
21905 rounded_count
= (INTVAL (count
)
21906 & ~((HOST_WIDE_INT
) GET_MODE_SIZE (mode
) - 1));
21907 destmem
= shallow_copy_rtx (destmem
);
21908 set_mem_size (destmem
, rounded_count
);
21910 else if (MEM_SIZE_KNOWN_P (destmem
))
21911 clear_mem_size (destmem
);
21912 emit_insn (gen_rep_stos (destptr
, countreg
, destmem
, value
, destexp
));
21916 emit_strmov (rtx destmem
, rtx srcmem
,
21917 rtx destptr
, rtx srcptr
, enum machine_mode mode
, int offset
)
21919 rtx src
= adjust_automodify_address_nv (srcmem
, mode
, srcptr
, offset
);
21920 rtx dest
= adjust_automodify_address_nv (destmem
, mode
, destptr
, offset
);
21921 emit_insn (gen_strmov (destptr
, dest
, srcptr
, src
));
21924 /* Output code to copy at most count & (max_size - 1) bytes from SRC to DEST. */
21926 expand_movmem_epilogue (rtx destmem
, rtx srcmem
,
21927 rtx destptr
, rtx srcptr
, rtx count
, int max_size
)
21930 if (CONST_INT_P (count
))
21932 HOST_WIDE_INT countval
= INTVAL (count
);
21935 if ((countval
& 0x10) && max_size
> 16)
21939 emit_strmov (destmem
, srcmem
, destptr
, srcptr
, DImode
, offset
);
21940 emit_strmov (destmem
, srcmem
, destptr
, srcptr
, DImode
, offset
+ 8);
21943 gcc_unreachable ();
21946 if ((countval
& 0x08) && max_size
> 8)
21949 emit_strmov (destmem
, srcmem
, destptr
, srcptr
, DImode
, offset
);
21952 emit_strmov (destmem
, srcmem
, destptr
, srcptr
, SImode
, offset
);
21953 emit_strmov (destmem
, srcmem
, destptr
, srcptr
, SImode
, offset
+ 4);
21957 if ((countval
& 0x04) && max_size
> 4)
21959 emit_strmov (destmem
, srcmem
, destptr
, srcptr
, SImode
, offset
);
21962 if ((countval
& 0x02) && max_size
> 2)
21964 emit_strmov (destmem
, srcmem
, destptr
, srcptr
, HImode
, offset
);
21967 if ((countval
& 0x01) && max_size
> 1)
21969 emit_strmov (destmem
, srcmem
, destptr
, srcptr
, QImode
, offset
);
21976 count
= expand_simple_binop (GET_MODE (count
), AND
, count
, GEN_INT (max_size
- 1),
21977 count
, 1, OPTAB_DIRECT
);
21978 expand_set_or_movmem_via_loop (destmem
, srcmem
, destptr
, srcptr
, NULL
,
21979 count
, QImode
, 1, 4);
21983 /* When there are stringops, we can cheaply increase dest and src pointers.
21984 Otherwise we save code size by maintaining offset (zero is readily
21985 available from preceding rep operation) and using x86 addressing modes.
21987 if (TARGET_SINGLE_STRINGOP
)
21991 rtx label
= ix86_expand_aligntest (count
, 4, true);
21992 src
= change_address (srcmem
, SImode
, srcptr
);
21993 dest
= change_address (destmem
, SImode
, destptr
);
21994 emit_insn (gen_strmov (destptr
, dest
, srcptr
, src
));
21995 emit_label (label
);
21996 LABEL_NUSES (label
) = 1;
22000 rtx label
= ix86_expand_aligntest (count
, 2, true);
22001 src
= change_address (srcmem
, HImode
, srcptr
);
22002 dest
= change_address (destmem
, HImode
, destptr
);
22003 emit_insn (gen_strmov (destptr
, dest
, srcptr
, src
));
22004 emit_label (label
);
22005 LABEL_NUSES (label
) = 1;
22009 rtx label
= ix86_expand_aligntest (count
, 1, true);
22010 src
= change_address (srcmem
, QImode
, srcptr
);
22011 dest
= change_address (destmem
, QImode
, destptr
);
22012 emit_insn (gen_strmov (destptr
, dest
, srcptr
, src
));
22013 emit_label (label
);
22014 LABEL_NUSES (label
) = 1;
22019 rtx offset
= force_reg (Pmode
, const0_rtx
);
22024 rtx label
= ix86_expand_aligntest (count
, 4, true);
22025 src
= change_address (srcmem
, SImode
, srcptr
);
22026 dest
= change_address (destmem
, SImode
, destptr
);
22027 emit_move_insn (dest
, src
);
22028 tmp
= expand_simple_binop (Pmode
, PLUS
, offset
, GEN_INT (4), NULL
,
22029 true, OPTAB_LIB_WIDEN
);
22031 emit_move_insn (offset
, tmp
);
22032 emit_label (label
);
22033 LABEL_NUSES (label
) = 1;
22037 rtx label
= ix86_expand_aligntest (count
, 2, true);
22038 tmp
= gen_rtx_PLUS (Pmode
, srcptr
, offset
);
22039 src
= change_address (srcmem
, HImode
, tmp
);
22040 tmp
= gen_rtx_PLUS (Pmode
, destptr
, offset
);
22041 dest
= change_address (destmem
, HImode
, tmp
);
22042 emit_move_insn (dest
, src
);
22043 tmp
= expand_simple_binop (Pmode
, PLUS
, offset
, GEN_INT (2), tmp
,
22044 true, OPTAB_LIB_WIDEN
);
22046 emit_move_insn (offset
, tmp
);
22047 emit_label (label
);
22048 LABEL_NUSES (label
) = 1;
22052 rtx label
= ix86_expand_aligntest (count
, 1, true);
22053 tmp
= gen_rtx_PLUS (Pmode
, srcptr
, offset
);
22054 src
= change_address (srcmem
, QImode
, tmp
);
22055 tmp
= gen_rtx_PLUS (Pmode
, destptr
, offset
);
22056 dest
= change_address (destmem
, QImode
, tmp
);
22057 emit_move_insn (dest
, src
);
22058 emit_label (label
);
22059 LABEL_NUSES (label
) = 1;
22064 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
22066 expand_setmem_epilogue_via_loop (rtx destmem
, rtx destptr
, rtx value
,
22067 rtx count
, int max_size
)
22070 expand_simple_binop (counter_mode (count
), AND
, count
,
22071 GEN_INT (max_size
- 1), count
, 1, OPTAB_DIRECT
);
22072 expand_set_or_movmem_via_loop (destmem
, NULL
, destptr
, NULL
,
22073 gen_lowpart (QImode
, value
), count
, QImode
,
22077 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
22079 expand_setmem_epilogue (rtx destmem
, rtx destptr
, rtx value
, rtx count
, int max_size
)
22083 if (CONST_INT_P (count
))
22085 HOST_WIDE_INT countval
= INTVAL (count
);
22088 if ((countval
& 0x10) && max_size
> 16)
22092 dest
= adjust_automodify_address_nv (destmem
, DImode
, destptr
, offset
);
22093 emit_insn (gen_strset (destptr
, dest
, value
));
22094 dest
= adjust_automodify_address_nv (destmem
, DImode
, destptr
, offset
+ 8);
22095 emit_insn (gen_strset (destptr
, dest
, value
));
22098 gcc_unreachable ();
22101 if ((countval
& 0x08) && max_size
> 8)
22105 dest
= adjust_automodify_address_nv (destmem
, DImode
, destptr
, offset
);
22106 emit_insn (gen_strset (destptr
, dest
, value
));
22110 dest
= adjust_automodify_address_nv (destmem
, SImode
, destptr
, offset
);
22111 emit_insn (gen_strset (destptr
, dest
, value
));
22112 dest
= adjust_automodify_address_nv (destmem
, SImode
, destptr
, offset
+ 4);
22113 emit_insn (gen_strset (destptr
, dest
, value
));
22117 if ((countval
& 0x04) && max_size
> 4)
22119 dest
= adjust_automodify_address_nv (destmem
, SImode
, destptr
, offset
);
22120 emit_insn (gen_strset (destptr
, dest
, gen_lowpart (SImode
, value
)));
22123 if ((countval
& 0x02) && max_size
> 2)
22125 dest
= adjust_automodify_address_nv (destmem
, HImode
, destptr
, offset
);
22126 emit_insn (gen_strset (destptr
, dest
, gen_lowpart (HImode
, value
)));
22129 if ((countval
& 0x01) && max_size
> 1)
22131 dest
= adjust_automodify_address_nv (destmem
, QImode
, destptr
, offset
);
22132 emit_insn (gen_strset (destptr
, dest
, gen_lowpart (QImode
, value
)));
22139 expand_setmem_epilogue_via_loop (destmem
, destptr
, value
, count
, max_size
);
22144 rtx label
= ix86_expand_aligntest (count
, 16, true);
22147 dest
= change_address (destmem
, DImode
, destptr
);
22148 emit_insn (gen_strset (destptr
, dest
, value
));
22149 emit_insn (gen_strset (destptr
, dest
, value
));
22153 dest
= change_address (destmem
, SImode
, destptr
);
22154 emit_insn (gen_strset (destptr
, dest
, value
));
22155 emit_insn (gen_strset (destptr
, dest
, value
));
22156 emit_insn (gen_strset (destptr
, dest
, value
));
22157 emit_insn (gen_strset (destptr
, dest
, value
));
22159 emit_label (label
);
22160 LABEL_NUSES (label
) = 1;
22164 rtx label
= ix86_expand_aligntest (count
, 8, true);
22167 dest
= change_address (destmem
, DImode
, destptr
);
22168 emit_insn (gen_strset (destptr
, dest
, value
));
22172 dest
= change_address (destmem
, SImode
, destptr
);
22173 emit_insn (gen_strset (destptr
, dest
, value
));
22174 emit_insn (gen_strset (destptr
, dest
, value
));
22176 emit_label (label
);
22177 LABEL_NUSES (label
) = 1;
22181 rtx label
= ix86_expand_aligntest (count
, 4, true);
22182 dest
= change_address (destmem
, SImode
, destptr
);
22183 emit_insn (gen_strset (destptr
, dest
, gen_lowpart (SImode
, value
)));
22184 emit_label (label
);
22185 LABEL_NUSES (label
) = 1;
22189 rtx label
= ix86_expand_aligntest (count
, 2, true);
22190 dest
= change_address (destmem
, HImode
, destptr
);
22191 emit_insn (gen_strset (destptr
, dest
, gen_lowpart (HImode
, value
)));
22192 emit_label (label
);
22193 LABEL_NUSES (label
) = 1;
22197 rtx label
= ix86_expand_aligntest (count
, 1, true);
22198 dest
= change_address (destmem
, QImode
, destptr
);
22199 emit_insn (gen_strset (destptr
, dest
, gen_lowpart (QImode
, value
)));
22200 emit_label (label
);
22201 LABEL_NUSES (label
) = 1;
22205 /* Copy enough from DEST to SRC to align DEST known to by aligned by ALIGN to
22206 DESIRED_ALIGNMENT. */
22208 expand_movmem_prologue (rtx destmem
, rtx srcmem
,
22209 rtx destptr
, rtx srcptr
, rtx count
,
22210 int align
, int desired_alignment
)
22212 if (align
<= 1 && desired_alignment
> 1)
22214 rtx label
= ix86_expand_aligntest (destptr
, 1, false);
22215 srcmem
= change_address (srcmem
, QImode
, srcptr
);
22216 destmem
= change_address (destmem
, QImode
, destptr
);
22217 emit_insn (gen_strmov (destptr
, destmem
, srcptr
, srcmem
));
22218 ix86_adjust_counter (count
, 1);
22219 emit_label (label
);
22220 LABEL_NUSES (label
) = 1;
22222 if (align
<= 2 && desired_alignment
> 2)
22224 rtx label
= ix86_expand_aligntest (destptr
, 2, false);
22225 srcmem
= change_address (srcmem
, HImode
, srcptr
);
22226 destmem
= change_address (destmem
, HImode
, destptr
);
22227 emit_insn (gen_strmov (destptr
, destmem
, srcptr
, srcmem
));
22228 ix86_adjust_counter (count
, 2);
22229 emit_label (label
);
22230 LABEL_NUSES (label
) = 1;
22232 if (align
<= 4 && desired_alignment
> 4)
22234 rtx label
= ix86_expand_aligntest (destptr
, 4, false);
22235 srcmem
= change_address (srcmem
, SImode
, srcptr
);
22236 destmem
= change_address (destmem
, SImode
, destptr
);
22237 emit_insn (gen_strmov (destptr
, destmem
, srcptr
, srcmem
));
22238 ix86_adjust_counter (count
, 4);
22239 emit_label (label
);
22240 LABEL_NUSES (label
) = 1;
22242 gcc_assert (desired_alignment
<= 8);
22245 /* Copy enough from DST to SRC to align DST known to DESIRED_ALIGN.
22246 ALIGN_BYTES is how many bytes need to be copied. */
22248 expand_constant_movmem_prologue (rtx dst
, rtx
*srcp
, rtx destreg
, rtx srcreg
,
22249 int desired_align
, int align_bytes
)
22252 rtx orig_dst
= dst
;
22253 rtx orig_src
= src
;
22255 int src_align_bytes
= get_mem_align_offset (src
, desired_align
* BITS_PER_UNIT
);
22256 if (src_align_bytes
>= 0)
22257 src_align_bytes
= desired_align
- src_align_bytes
;
22258 if (align_bytes
& 1)
22260 dst
= adjust_automodify_address_nv (dst
, QImode
, destreg
, 0);
22261 src
= adjust_automodify_address_nv (src
, QImode
, srcreg
, 0);
22263 emit_insn (gen_strmov (destreg
, dst
, srcreg
, src
));
22265 if (align_bytes
& 2)
22267 dst
= adjust_automodify_address_nv (dst
, HImode
, destreg
, off
);
22268 src
= adjust_automodify_address_nv (src
, HImode
, srcreg
, off
);
22269 if (MEM_ALIGN (dst
) < 2 * BITS_PER_UNIT
)
22270 set_mem_align (dst
, 2 * BITS_PER_UNIT
);
22271 if (src_align_bytes
>= 0
22272 && (src_align_bytes
& 1) == (align_bytes
& 1)
22273 && MEM_ALIGN (src
) < 2 * BITS_PER_UNIT
)
22274 set_mem_align (src
, 2 * BITS_PER_UNIT
);
22276 emit_insn (gen_strmov (destreg
, dst
, srcreg
, src
));
22278 if (align_bytes
& 4)
22280 dst
= adjust_automodify_address_nv (dst
, SImode
, destreg
, off
);
22281 src
= adjust_automodify_address_nv (src
, SImode
, srcreg
, off
);
22282 if (MEM_ALIGN (dst
) < 4 * BITS_PER_UNIT
)
22283 set_mem_align (dst
, 4 * BITS_PER_UNIT
);
22284 if (src_align_bytes
>= 0)
22286 unsigned int src_align
= 0;
22287 if ((src_align_bytes
& 3) == (align_bytes
& 3))
22289 else if ((src_align_bytes
& 1) == (align_bytes
& 1))
22291 if (MEM_ALIGN (src
) < src_align
* BITS_PER_UNIT
)
22292 set_mem_align (src
, src_align
* BITS_PER_UNIT
);
22295 emit_insn (gen_strmov (destreg
, dst
, srcreg
, src
));
22297 dst
= adjust_automodify_address_nv (dst
, BLKmode
, destreg
, off
);
22298 src
= adjust_automodify_address_nv (src
, BLKmode
, srcreg
, off
);
22299 if (MEM_ALIGN (dst
) < (unsigned int) desired_align
* BITS_PER_UNIT
)
22300 set_mem_align (dst
, desired_align
* BITS_PER_UNIT
);
22301 if (src_align_bytes
>= 0)
22303 unsigned int src_align
= 0;
22304 if ((src_align_bytes
& 7) == (align_bytes
& 7))
22306 else if ((src_align_bytes
& 3) == (align_bytes
& 3))
22308 else if ((src_align_bytes
& 1) == (align_bytes
& 1))
22310 if (src_align
> (unsigned int) desired_align
)
22311 src_align
= desired_align
;
22312 if (MEM_ALIGN (src
) < src_align
* BITS_PER_UNIT
)
22313 set_mem_align (src
, src_align
* BITS_PER_UNIT
);
22315 if (MEM_SIZE_KNOWN_P (orig_dst
))
22316 set_mem_size (dst
, MEM_SIZE (orig_dst
) - align_bytes
);
22317 if (MEM_SIZE_KNOWN_P (orig_src
))
22318 set_mem_size (src
, MEM_SIZE (orig_src
) - align_bytes
);
22323 /* Set enough from DEST to align DEST known to by aligned by ALIGN to
22324 DESIRED_ALIGNMENT. */
22326 expand_setmem_prologue (rtx destmem
, rtx destptr
, rtx value
, rtx count
,
22327 int align
, int desired_alignment
)
22329 if (align
<= 1 && desired_alignment
> 1)
22331 rtx label
= ix86_expand_aligntest (destptr
, 1, false);
22332 destmem
= change_address (destmem
, QImode
, destptr
);
22333 emit_insn (gen_strset (destptr
, destmem
, gen_lowpart (QImode
, value
)));
22334 ix86_adjust_counter (count
, 1);
22335 emit_label (label
);
22336 LABEL_NUSES (label
) = 1;
22338 if (align
<= 2 && desired_alignment
> 2)
22340 rtx label
= ix86_expand_aligntest (destptr
, 2, false);
22341 destmem
= change_address (destmem
, HImode
, destptr
);
22342 emit_insn (gen_strset (destptr
, destmem
, gen_lowpart (HImode
, value
)));
22343 ix86_adjust_counter (count
, 2);
22344 emit_label (label
);
22345 LABEL_NUSES (label
) = 1;
22347 if (align
<= 4 && desired_alignment
> 4)
22349 rtx label
= ix86_expand_aligntest (destptr
, 4, false);
22350 destmem
= change_address (destmem
, SImode
, destptr
);
22351 emit_insn (gen_strset (destptr
, destmem
, gen_lowpart (SImode
, value
)));
22352 ix86_adjust_counter (count
, 4);
22353 emit_label (label
);
22354 LABEL_NUSES (label
) = 1;
22356 gcc_assert (desired_alignment
<= 8);
22359 /* Set enough from DST to align DST known to by aligned by ALIGN to
22360 DESIRED_ALIGN. ALIGN_BYTES is how many bytes need to be stored. */
22362 expand_constant_setmem_prologue (rtx dst
, rtx destreg
, rtx value
,
22363 int desired_align
, int align_bytes
)
22366 rtx orig_dst
= dst
;
22367 if (align_bytes
& 1)
22369 dst
= adjust_automodify_address_nv (dst
, QImode
, destreg
, 0);
22371 emit_insn (gen_strset (destreg
, dst
,
22372 gen_lowpart (QImode
, value
)));
22374 if (align_bytes
& 2)
22376 dst
= adjust_automodify_address_nv (dst
, HImode
, destreg
, off
);
22377 if (MEM_ALIGN (dst
) < 2 * BITS_PER_UNIT
)
22378 set_mem_align (dst
, 2 * BITS_PER_UNIT
);
22380 emit_insn (gen_strset (destreg
, dst
,
22381 gen_lowpart (HImode
, value
)));
22383 if (align_bytes
& 4)
22385 dst
= adjust_automodify_address_nv (dst
, SImode
, destreg
, off
);
22386 if (MEM_ALIGN (dst
) < 4 * BITS_PER_UNIT
)
22387 set_mem_align (dst
, 4 * BITS_PER_UNIT
);
22389 emit_insn (gen_strset (destreg
, dst
,
22390 gen_lowpart (SImode
, value
)));
22392 dst
= adjust_automodify_address_nv (dst
, BLKmode
, destreg
, off
);
22393 if (MEM_ALIGN (dst
) < (unsigned int) desired_align
* BITS_PER_UNIT
)
22394 set_mem_align (dst
, desired_align
* BITS_PER_UNIT
);
22395 if (MEM_SIZE_KNOWN_P (orig_dst
))
22396 set_mem_size (dst
, MEM_SIZE (orig_dst
) - align_bytes
);
22400 /* Given COUNT and EXPECTED_SIZE, decide on codegen of string operation. */
22401 static enum stringop_alg
22402 decide_alg (HOST_WIDE_INT count
, HOST_WIDE_INT expected_size
, bool memset
,
22403 int *dynamic_check
, bool *noalign
)
22405 const struct stringop_algs
* algs
;
22406 bool optimize_for_speed
;
22407 /* Algorithms using the rep prefix want at least edi and ecx;
22408 additionally, memset wants eax and memcpy wants esi. Don't
22409 consider such algorithms if the user has appropriated those
22410 registers for their own purposes. */
22411 bool rep_prefix_usable
= !(fixed_regs
[CX_REG
] || fixed_regs
[DI_REG
]
22413 ? fixed_regs
[AX_REG
] : fixed_regs
[SI_REG
]));
22416 #define ALG_USABLE_P(alg) (rep_prefix_usable \
22417 || (alg != rep_prefix_1_byte \
22418 && alg != rep_prefix_4_byte \
22419 && alg != rep_prefix_8_byte))
22420 const struct processor_costs
*cost
;
22422 /* Even if the string operation call is cold, we still might spend a lot
22423 of time processing large blocks. */
22424 if (optimize_function_for_size_p (cfun
)
22425 || (optimize_insn_for_size_p ()
22426 && expected_size
!= -1 && expected_size
< 256))
22427 optimize_for_speed
= false;
22429 optimize_for_speed
= true;
22431 cost
= optimize_for_speed
? ix86_cost
: &ix86_size_cost
;
22433 *dynamic_check
= -1;
22435 algs
= &cost
->memset
[TARGET_64BIT
!= 0];
22437 algs
= &cost
->memcpy
[TARGET_64BIT
!= 0];
22438 if (ix86_stringop_alg
!= no_stringop
&& ALG_USABLE_P (ix86_stringop_alg
))
22439 return ix86_stringop_alg
;
22440 /* rep; movq or rep; movl is the smallest variant. */
22441 else if (!optimize_for_speed
)
22443 if (!count
|| (count
& 3))
22444 return rep_prefix_usable
? rep_prefix_1_byte
: loop_1_byte
;
22446 return rep_prefix_usable
? rep_prefix_4_byte
: loop
;
22448 /* Very tiny blocks are best handled via the loop, REP is expensive to setup.
22450 else if (expected_size
!= -1 && expected_size
< 4)
22451 return loop_1_byte
;
22452 else if (expected_size
!= -1)
22455 enum stringop_alg alg
= libcall
;
22456 for (i
= 0; i
< MAX_STRINGOP_ALGS
; i
++)
22458 /* We get here if the algorithms that were not libcall-based
22459 were rep-prefix based and we are unable to use rep prefixes
22460 based on global register usage. Break out of the loop and
22461 use the heuristic below. */
22462 if (algs
->size
[i
].max
== 0)
22464 if (algs
->size
[i
].max
>= expected_size
|| algs
->size
[i
].max
== -1)
22466 enum stringop_alg candidate
= algs
->size
[i
].alg
;
22468 if (candidate
!= libcall
&& ALG_USABLE_P (candidate
))
22470 /* Honor TARGET_INLINE_ALL_STRINGOPS by picking
22471 last non-libcall inline algorithm. */
22472 if (TARGET_INLINE_ALL_STRINGOPS
)
22474 /* When the current size is best to be copied by a libcall,
22475 but we are still forced to inline, run the heuristic below
22476 that will pick code for medium sized blocks. */
22477 if (alg
!= libcall
)
22481 else if (ALG_USABLE_P (candidate
))
22483 *noalign
= algs
->size
[i
].noalign
;
22488 gcc_assert (TARGET_INLINE_ALL_STRINGOPS
|| !rep_prefix_usable
);
22490 /* When asked to inline the call anyway, try to pick meaningful choice.
22491 We look for maximal size of block that is faster to copy by hand and
22492 take blocks of at most of that size guessing that average size will
22493 be roughly half of the block.
22495 If this turns out to be bad, we might simply specify the preferred
22496 choice in ix86_costs. */
22497 if ((TARGET_INLINE_ALL_STRINGOPS
|| TARGET_INLINE_STRINGOPS_DYNAMICALLY
)
22498 && (algs
->unknown_size
== libcall
|| !ALG_USABLE_P (algs
->unknown_size
)))
22501 enum stringop_alg alg
;
22503 bool any_alg_usable_p
= true;
22505 for (i
= 0; i
< MAX_STRINGOP_ALGS
; i
++)
22507 enum stringop_alg candidate
= algs
->size
[i
].alg
;
22508 any_alg_usable_p
= any_alg_usable_p
&& ALG_USABLE_P (candidate
);
22510 if (candidate
!= libcall
&& candidate
22511 && ALG_USABLE_P (candidate
))
22512 max
= algs
->size
[i
].max
;
22514 /* If there aren't any usable algorithms, then recursing on
22515 smaller sizes isn't going to find anything. Just return the
22516 simple byte-at-a-time copy loop. */
22517 if (!any_alg_usable_p
)
22519 /* Pick something reasonable. */
22520 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY
)
22521 *dynamic_check
= 128;
22522 return loop_1_byte
;
22526 alg
= decide_alg (count
, max
/ 2, memset
, dynamic_check
, noalign
);
22527 gcc_assert (*dynamic_check
== -1);
22528 gcc_assert (alg
!= libcall
);
22529 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY
)
22530 *dynamic_check
= max
;
22533 return ALG_USABLE_P (algs
->unknown_size
) ? algs
->unknown_size
: libcall
;
22534 #undef ALG_USABLE_P
22537 /* Decide on alignment. We know that the operand is already aligned to ALIGN
22538 (ALIGN can be based on profile feedback and thus it is not 100% guaranteed). */
22540 decide_alignment (int align
,
22541 enum stringop_alg alg
,
22544 int desired_align
= 0;
22548 gcc_unreachable ();
22550 case unrolled_loop
:
22551 desired_align
= GET_MODE_SIZE (Pmode
);
22553 case rep_prefix_8_byte
:
22556 case rep_prefix_4_byte
:
22557 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
22558 copying whole cacheline at once. */
22559 if (TARGET_PENTIUMPRO
)
22564 case rep_prefix_1_byte
:
22565 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
22566 copying whole cacheline at once. */
22567 if (TARGET_PENTIUMPRO
)
22581 if (desired_align
< align
)
22582 desired_align
= align
;
22583 if (expected_size
!= -1 && expected_size
< 4)
22584 desired_align
= align
;
22585 return desired_align
;
22588 /* Return the smallest power of 2 greater than VAL. */
22590 smallest_pow2_greater_than (int val
)
22598 /* Expand string move (memcpy) operation. Use i386 string operations
22599 when profitable. expand_setmem contains similar code. The code
22600 depends upon architecture, block size and alignment, but always has
22601 the same overall structure:
22603 1) Prologue guard: Conditional that jumps up to epilogues for small
22604 blocks that can be handled by epilogue alone. This is faster
22605 but also needed for correctness, since prologue assume the block
22606 is larger than the desired alignment.
22608 Optional dynamic check for size and libcall for large
22609 blocks is emitted here too, with -minline-stringops-dynamically.
22611 2) Prologue: copy first few bytes in order to get destination
22612 aligned to DESIRED_ALIGN. It is emitted only when ALIGN is less
22613 than DESIRED_ALIGN and up to DESIRED_ALIGN - ALIGN bytes can be
22614 copied. We emit either a jump tree on power of two sized
22615 blocks, or a byte loop.
22617 3) Main body: the copying loop itself, copying in SIZE_NEEDED chunks
22618 with specified algorithm.
22620 4) Epilogue: code copying tail of the block that is too small to be
22621 handled by main body (or up to size guarded by prologue guard). */
22624 ix86_expand_movmem (rtx dst
, rtx src
, rtx count_exp
, rtx align_exp
,
22625 rtx expected_align_exp
, rtx expected_size_exp
)
22631 rtx jump_around_label
= NULL
;
22632 HOST_WIDE_INT align
= 1;
22633 unsigned HOST_WIDE_INT count
= 0;
22634 HOST_WIDE_INT expected_size
= -1;
22635 int size_needed
= 0, epilogue_size_needed
;
22636 int desired_align
= 0, align_bytes
= 0;
22637 enum stringop_alg alg
;
22639 bool need_zero_guard
= false;
22642 if (CONST_INT_P (align_exp
))
22643 align
= INTVAL (align_exp
);
22644 /* i386 can do misaligned access on reasonably increased cost. */
22645 if (CONST_INT_P (expected_align_exp
)
22646 && INTVAL (expected_align_exp
) > align
)
22647 align
= INTVAL (expected_align_exp
);
22648 /* ALIGN is the minimum of destination and source alignment, but we care here
22649 just about destination alignment. */
22650 else if (MEM_ALIGN (dst
) > (unsigned HOST_WIDE_INT
) align
* BITS_PER_UNIT
)
22651 align
= MEM_ALIGN (dst
) / BITS_PER_UNIT
;
22653 if (CONST_INT_P (count_exp
))
22654 count
= expected_size
= INTVAL (count_exp
);
22655 if (CONST_INT_P (expected_size_exp
) && count
== 0)
22656 expected_size
= INTVAL (expected_size_exp
);
22658 /* Make sure we don't need to care about overflow later on. */
22659 if (count
> ((unsigned HOST_WIDE_INT
) 1 << 30))
22662 /* Step 0: Decide on preferred algorithm, desired alignment and
22663 size of chunks to be copied by main loop. */
22665 alg
= decide_alg (count
, expected_size
, false, &dynamic_check
, &noalign
);
22666 desired_align
= decide_alignment (align
, alg
, expected_size
);
22668 if (!TARGET_ALIGN_STRINGOPS
|| noalign
)
22669 align
= desired_align
;
22671 if (alg
== libcall
)
22673 gcc_assert (alg
!= no_stringop
);
22675 count_exp
= copy_to_mode_reg (GET_MODE (count_exp
), count_exp
);
22676 destreg
= copy_addr_to_reg (XEXP (dst
, 0));
22677 srcreg
= copy_addr_to_reg (XEXP (src
, 0));
22682 gcc_unreachable ();
22684 need_zero_guard
= true;
22685 size_needed
= GET_MODE_SIZE (word_mode
);
22687 case unrolled_loop
:
22688 need_zero_guard
= true;
22689 size_needed
= GET_MODE_SIZE (word_mode
) * (TARGET_64BIT
? 4 : 2);
22691 case rep_prefix_8_byte
:
22694 case rep_prefix_4_byte
:
22697 case rep_prefix_1_byte
:
22701 need_zero_guard
= true;
22706 epilogue_size_needed
= size_needed
;
22708 /* Step 1: Prologue guard. */
22710 /* Alignment code needs count to be in register. */
22711 if (CONST_INT_P (count_exp
) && desired_align
> align
)
22713 if (INTVAL (count_exp
) > desired_align
22714 && INTVAL (count_exp
) > size_needed
)
22717 = get_mem_align_offset (dst
, desired_align
* BITS_PER_UNIT
);
22718 if (align_bytes
<= 0)
22721 align_bytes
= desired_align
- align_bytes
;
22723 if (align_bytes
== 0)
22724 count_exp
= force_reg (counter_mode (count_exp
), count_exp
);
22726 gcc_assert (desired_align
>= 1 && align
>= 1);
22728 /* Ensure that alignment prologue won't copy past end of block. */
22729 if (size_needed
> 1 || (desired_align
> 1 && desired_align
> align
))
22731 epilogue_size_needed
= MAX (size_needed
- 1, desired_align
- align
);
22732 /* Epilogue always copies COUNT_EXP & EPILOGUE_SIZE_NEEDED bytes.
22733 Make sure it is power of 2. */
22734 epilogue_size_needed
= smallest_pow2_greater_than (epilogue_size_needed
);
22738 if (count
< (unsigned HOST_WIDE_INT
)epilogue_size_needed
)
22740 /* If main algorithm works on QImode, no epilogue is needed.
22741 For small sizes just don't align anything. */
22742 if (size_needed
== 1)
22743 desired_align
= align
;
22750 label
= gen_label_rtx ();
22751 emit_cmp_and_jump_insns (count_exp
,
22752 GEN_INT (epilogue_size_needed
),
22753 LTU
, 0, counter_mode (count_exp
), 1, label
);
22754 if (expected_size
== -1 || expected_size
< epilogue_size_needed
)
22755 predict_jump (REG_BR_PROB_BASE
* 60 / 100);
22757 predict_jump (REG_BR_PROB_BASE
* 20 / 100);
22761 /* Emit code to decide on runtime whether library call or inline should be
22763 if (dynamic_check
!= -1)
22765 if (CONST_INT_P (count_exp
))
22767 if (UINTVAL (count_exp
) >= (unsigned HOST_WIDE_INT
)dynamic_check
)
22769 emit_block_move_via_libcall (dst
, src
, count_exp
, false);
22770 count_exp
= const0_rtx
;
22776 rtx hot_label
= gen_label_rtx ();
22777 jump_around_label
= gen_label_rtx ();
22778 emit_cmp_and_jump_insns (count_exp
, GEN_INT (dynamic_check
- 1),
22779 LEU
, 0, GET_MODE (count_exp
), 1, hot_label
);
22780 predict_jump (REG_BR_PROB_BASE
* 90 / 100);
22781 emit_block_move_via_libcall (dst
, src
, count_exp
, false);
22782 emit_jump (jump_around_label
);
22783 emit_label (hot_label
);
22787 /* Step 2: Alignment prologue. */
22789 if (desired_align
> align
)
22791 if (align_bytes
== 0)
22793 /* Except for the first move in epilogue, we no longer know
22794 constant offset in aliasing info. It don't seems to worth
22795 the pain to maintain it for the first move, so throw away
22797 src
= change_address (src
, BLKmode
, srcreg
);
22798 dst
= change_address (dst
, BLKmode
, destreg
);
22799 expand_movmem_prologue (dst
, src
, destreg
, srcreg
, count_exp
, align
,
22804 /* If we know how many bytes need to be stored before dst is
22805 sufficiently aligned, maintain aliasing info accurately. */
22806 dst
= expand_constant_movmem_prologue (dst
, &src
, destreg
, srcreg
,
22807 desired_align
, align_bytes
);
22808 count_exp
= plus_constant (counter_mode (count_exp
),
22809 count_exp
, -align_bytes
);
22810 count
-= align_bytes
;
22812 if (need_zero_guard
22813 && (count
< (unsigned HOST_WIDE_INT
) size_needed
22814 || (align_bytes
== 0
22815 && count
< ((unsigned HOST_WIDE_INT
) size_needed
22816 + desired_align
- align
))))
22818 /* It is possible that we copied enough so the main loop will not
22820 gcc_assert (size_needed
> 1);
22821 if (label
== NULL_RTX
)
22822 label
= gen_label_rtx ();
22823 emit_cmp_and_jump_insns (count_exp
,
22824 GEN_INT (size_needed
),
22825 LTU
, 0, counter_mode (count_exp
), 1, label
);
22826 if (expected_size
== -1
22827 || expected_size
< (desired_align
- align
) / 2 + size_needed
)
22828 predict_jump (REG_BR_PROB_BASE
* 20 / 100);
22830 predict_jump (REG_BR_PROB_BASE
* 60 / 100);
22833 if (label
&& size_needed
== 1)
22835 emit_label (label
);
22836 LABEL_NUSES (label
) = 1;
22838 epilogue_size_needed
= 1;
22840 else if (label
== NULL_RTX
)
22841 epilogue_size_needed
= size_needed
;
22843 /* Step 3: Main loop. */
22849 gcc_unreachable ();
22851 expand_set_or_movmem_via_loop (dst
, src
, destreg
, srcreg
, NULL
,
22852 count_exp
, QImode
, 1, expected_size
);
22855 expand_set_or_movmem_via_loop (dst
, src
, destreg
, srcreg
, NULL
,
22856 count_exp
, word_mode
, 1, expected_size
);
22858 case unrolled_loop
:
22859 /* Unroll only by factor of 2 in 32bit mode, since we don't have enough
22860 registers for 4 temporaries anyway. */
22861 expand_set_or_movmem_via_loop (dst
, src
, destreg
, srcreg
, NULL
,
22862 count_exp
, word_mode
, TARGET_64BIT
? 4 : 2,
22865 case rep_prefix_8_byte
:
22866 expand_movmem_via_rep_mov (dst
, src
, destreg
, srcreg
, count_exp
,
22869 case rep_prefix_4_byte
:
22870 expand_movmem_via_rep_mov (dst
, src
, destreg
, srcreg
, count_exp
,
22873 case rep_prefix_1_byte
:
22874 expand_movmem_via_rep_mov (dst
, src
, destreg
, srcreg
, count_exp
,
22878 /* Adjust properly the offset of src and dest memory for aliasing. */
22879 if (CONST_INT_P (count_exp
))
22881 src
= adjust_automodify_address_nv (src
, BLKmode
, srcreg
,
22882 (count
/ size_needed
) * size_needed
);
22883 dst
= adjust_automodify_address_nv (dst
, BLKmode
, destreg
,
22884 (count
/ size_needed
) * size_needed
);
22888 src
= change_address (src
, BLKmode
, srcreg
);
22889 dst
= change_address (dst
, BLKmode
, destreg
);
22892 /* Step 4: Epilogue to copy the remaining bytes. */
22896 /* When the main loop is done, COUNT_EXP might hold original count,
22897 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
22898 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
22899 bytes. Compensate if needed. */
22901 if (size_needed
< epilogue_size_needed
)
22904 expand_simple_binop (counter_mode (count_exp
), AND
, count_exp
,
22905 GEN_INT (size_needed
- 1), count_exp
, 1,
22907 if (tmp
!= count_exp
)
22908 emit_move_insn (count_exp
, tmp
);
22910 emit_label (label
);
22911 LABEL_NUSES (label
) = 1;
22914 if (count_exp
!= const0_rtx
&& epilogue_size_needed
> 1)
22915 expand_movmem_epilogue (dst
, src
, destreg
, srcreg
, count_exp
,
22916 epilogue_size_needed
);
22917 if (jump_around_label
)
22918 emit_label (jump_around_label
);
22922 /* Helper function for memcpy. For QImode value 0xXY produce
22923 0xXYXYXYXY of wide specified by MODE. This is essentially
22924 a * 0x10101010, but we can do slightly better than
22925 synth_mult by unwinding the sequence by hand on CPUs with
22928 promote_duplicated_reg (enum machine_mode mode
, rtx val
)
22930 enum machine_mode valmode
= GET_MODE (val
);
22932 int nops
= mode
== DImode
? 3 : 2;
22934 gcc_assert (mode
== SImode
|| mode
== DImode
);
22935 if (val
== const0_rtx
)
22936 return copy_to_mode_reg (mode
, const0_rtx
);
22937 if (CONST_INT_P (val
))
22939 HOST_WIDE_INT v
= INTVAL (val
) & 255;
22943 if (mode
== DImode
)
22944 v
|= (v
<< 16) << 16;
22945 return copy_to_mode_reg (mode
, gen_int_mode (v
, mode
));
22948 if (valmode
== VOIDmode
)
22950 if (valmode
!= QImode
)
22951 val
= gen_lowpart (QImode
, val
);
22952 if (mode
== QImode
)
22954 if (!TARGET_PARTIAL_REG_STALL
)
22956 if (ix86_cost
->mult_init
[mode
== DImode
? 3 : 2]
22957 + ix86_cost
->mult_bit
* (mode
== DImode
? 8 : 4)
22958 <= (ix86_cost
->shift_const
+ ix86_cost
->add
) * nops
22959 + (COSTS_N_INSNS (TARGET_PARTIAL_REG_STALL
== 0)))
22961 rtx reg
= convert_modes (mode
, QImode
, val
, true);
22962 tmp
= promote_duplicated_reg (mode
, const1_rtx
);
22963 return expand_simple_binop (mode
, MULT
, reg
, tmp
, NULL
, 1,
22968 rtx reg
= convert_modes (mode
, QImode
, val
, true);
22970 if (!TARGET_PARTIAL_REG_STALL
)
22971 if (mode
== SImode
)
22972 emit_insn (gen_movsi_insv_1 (reg
, reg
));
22974 emit_insn (gen_movdi_insv_1 (reg
, reg
));
22977 tmp
= expand_simple_binop (mode
, ASHIFT
, reg
, GEN_INT (8),
22978 NULL
, 1, OPTAB_DIRECT
);
22980 expand_simple_binop (mode
, IOR
, reg
, tmp
, reg
, 1, OPTAB_DIRECT
);
22982 tmp
= expand_simple_binop (mode
, ASHIFT
, reg
, GEN_INT (16),
22983 NULL
, 1, OPTAB_DIRECT
);
22984 reg
= expand_simple_binop (mode
, IOR
, reg
, tmp
, reg
, 1, OPTAB_DIRECT
);
22985 if (mode
== SImode
)
22987 tmp
= expand_simple_binop (mode
, ASHIFT
, reg
, GEN_INT (32),
22988 NULL
, 1, OPTAB_DIRECT
);
22989 reg
= expand_simple_binop (mode
, IOR
, reg
, tmp
, reg
, 1, OPTAB_DIRECT
);
22994 /* Duplicate value VAL using promote_duplicated_reg into maximal size that will
22995 be needed by main loop copying SIZE_NEEDED chunks and prologue getting
22996 alignment from ALIGN to DESIRED_ALIGN. */
22998 promote_duplicated_reg_to_size (rtx val
, int size_needed
, int desired_align
, int align
)
23003 && (size_needed
> 4 || (desired_align
> align
&& desired_align
> 4)))
23004 promoted_val
= promote_duplicated_reg (DImode
, val
);
23005 else if (size_needed
> 2 || (desired_align
> align
&& desired_align
> 2))
23006 promoted_val
= promote_duplicated_reg (SImode
, val
);
23007 else if (size_needed
> 1 || (desired_align
> align
&& desired_align
> 1))
23008 promoted_val
= promote_duplicated_reg (HImode
, val
);
23010 promoted_val
= val
;
23012 return promoted_val
;
23015 /* Expand string clear operation (bzero). Use i386 string operations when
23016 profitable. See expand_movmem comment for explanation of individual
23017 steps performed. */
23019 ix86_expand_setmem (rtx dst
, rtx count_exp
, rtx val_exp
, rtx align_exp
,
23020 rtx expected_align_exp
, rtx expected_size_exp
)
23025 rtx jump_around_label
= NULL
;
23026 HOST_WIDE_INT align
= 1;
23027 unsigned HOST_WIDE_INT count
= 0;
23028 HOST_WIDE_INT expected_size
= -1;
23029 int size_needed
= 0, epilogue_size_needed
;
23030 int desired_align
= 0, align_bytes
= 0;
23031 enum stringop_alg alg
;
23032 rtx promoted_val
= NULL
;
23033 bool force_loopy_epilogue
= false;
23035 bool need_zero_guard
= false;
23038 if (CONST_INT_P (align_exp
))
23039 align
= INTVAL (align_exp
);
23040 /* i386 can do misaligned access on reasonably increased cost. */
23041 if (CONST_INT_P (expected_align_exp
)
23042 && INTVAL (expected_align_exp
) > align
)
23043 align
= INTVAL (expected_align_exp
);
23044 if (CONST_INT_P (count_exp
))
23045 count
= expected_size
= INTVAL (count_exp
);
23046 if (CONST_INT_P (expected_size_exp
) && count
== 0)
23047 expected_size
= INTVAL (expected_size_exp
);
23049 /* Make sure we don't need to care about overflow later on. */
23050 if (count
> ((unsigned HOST_WIDE_INT
) 1 << 30))
23053 /* Step 0: Decide on preferred algorithm, desired alignment and
23054 size of chunks to be copied by main loop. */
23056 alg
= decide_alg (count
, expected_size
, true, &dynamic_check
, &noalign
);
23057 desired_align
= decide_alignment (align
, alg
, expected_size
);
23059 if (!TARGET_ALIGN_STRINGOPS
|| noalign
)
23060 align
= desired_align
;
23062 if (alg
== libcall
)
23064 gcc_assert (alg
!= no_stringop
);
23066 count_exp
= copy_to_mode_reg (counter_mode (count_exp
), count_exp
);
23067 destreg
= copy_addr_to_reg (XEXP (dst
, 0));
23072 gcc_unreachable ();
23074 need_zero_guard
= true;
23075 size_needed
= GET_MODE_SIZE (word_mode
);
23077 case unrolled_loop
:
23078 need_zero_guard
= true;
23079 size_needed
= GET_MODE_SIZE (word_mode
) * 4;
23081 case rep_prefix_8_byte
:
23084 case rep_prefix_4_byte
:
23087 case rep_prefix_1_byte
:
23091 need_zero_guard
= true;
23095 epilogue_size_needed
= size_needed
;
23097 /* Step 1: Prologue guard. */
23099 /* Alignment code needs count to be in register. */
23100 if (CONST_INT_P (count_exp
) && desired_align
> align
)
23102 if (INTVAL (count_exp
) > desired_align
23103 && INTVAL (count_exp
) > size_needed
)
23106 = get_mem_align_offset (dst
, desired_align
* BITS_PER_UNIT
);
23107 if (align_bytes
<= 0)
23110 align_bytes
= desired_align
- align_bytes
;
23112 if (align_bytes
== 0)
23114 enum machine_mode mode
= SImode
;
23115 if (TARGET_64BIT
&& (count
& ~0xffffffff))
23117 count_exp
= force_reg (mode
, count_exp
);
23120 /* Do the cheap promotion to allow better CSE across the
23121 main loop and epilogue (ie one load of the big constant in the
23122 front of all code. */
23123 if (CONST_INT_P (val_exp
))
23124 promoted_val
= promote_duplicated_reg_to_size (val_exp
, size_needed
,
23125 desired_align
, align
);
23126 /* Ensure that alignment prologue won't copy past end of block. */
23127 if (size_needed
> 1 || (desired_align
> 1 && desired_align
> align
))
23129 epilogue_size_needed
= MAX (size_needed
- 1, desired_align
- align
);
23130 /* Epilogue always copies COUNT_EXP & (EPILOGUE_SIZE_NEEDED - 1) bytes.
23131 Make sure it is power of 2. */
23132 epilogue_size_needed
= smallest_pow2_greater_than (epilogue_size_needed
);
23134 /* To improve performance of small blocks, we jump around the VAL
23135 promoting mode. This mean that if the promoted VAL is not constant,
23136 we might not use it in the epilogue and have to use byte
23138 if (epilogue_size_needed
> 2 && !promoted_val
)
23139 force_loopy_epilogue
= true;
23142 if (count
< (unsigned HOST_WIDE_INT
)epilogue_size_needed
)
23144 /* If main algorithm works on QImode, no epilogue is needed.
23145 For small sizes just don't align anything. */
23146 if (size_needed
== 1)
23147 desired_align
= align
;
23154 label
= gen_label_rtx ();
23155 emit_cmp_and_jump_insns (count_exp
,
23156 GEN_INT (epilogue_size_needed
),
23157 LTU
, 0, counter_mode (count_exp
), 1, label
);
23158 if (expected_size
== -1 || expected_size
<= epilogue_size_needed
)
23159 predict_jump (REG_BR_PROB_BASE
* 60 / 100);
23161 predict_jump (REG_BR_PROB_BASE
* 20 / 100);
23164 if (dynamic_check
!= -1)
23166 rtx hot_label
= gen_label_rtx ();
23167 jump_around_label
= gen_label_rtx ();
23168 emit_cmp_and_jump_insns (count_exp
, GEN_INT (dynamic_check
- 1),
23169 LEU
, 0, counter_mode (count_exp
), 1, hot_label
);
23170 predict_jump (REG_BR_PROB_BASE
* 90 / 100);
23171 set_storage_via_libcall (dst
, count_exp
, val_exp
, false);
23172 emit_jump (jump_around_label
);
23173 emit_label (hot_label
);
23176 /* Step 2: Alignment prologue. */
23178 /* Do the expensive promotion once we branched off the small blocks. */
23180 promoted_val
= promote_duplicated_reg_to_size (val_exp
, size_needed
,
23181 desired_align
, align
);
23182 gcc_assert (desired_align
>= 1 && align
>= 1);
23184 if (desired_align
> align
)
23186 if (align_bytes
== 0)
23188 /* Except for the first move in epilogue, we no longer know
23189 constant offset in aliasing info. It don't seems to worth
23190 the pain to maintain it for the first move, so throw away
23192 dst
= change_address (dst
, BLKmode
, destreg
);
23193 expand_setmem_prologue (dst
, destreg
, promoted_val
, count_exp
, align
,
23198 /* If we know how many bytes need to be stored before dst is
23199 sufficiently aligned, maintain aliasing info accurately. */
23200 dst
= expand_constant_setmem_prologue (dst
, destreg
, promoted_val
,
23201 desired_align
, align_bytes
);
23202 count_exp
= plus_constant (counter_mode (count_exp
),
23203 count_exp
, -align_bytes
);
23204 count
-= align_bytes
;
23206 if (need_zero_guard
23207 && (count
< (unsigned HOST_WIDE_INT
) size_needed
23208 || (align_bytes
== 0
23209 && count
< ((unsigned HOST_WIDE_INT
) size_needed
23210 + desired_align
- align
))))
23212 /* It is possible that we copied enough so the main loop will not
23214 gcc_assert (size_needed
> 1);
23215 if (label
== NULL_RTX
)
23216 label
= gen_label_rtx ();
23217 emit_cmp_and_jump_insns (count_exp
,
23218 GEN_INT (size_needed
),
23219 LTU
, 0, counter_mode (count_exp
), 1, label
);
23220 if (expected_size
== -1
23221 || expected_size
< (desired_align
- align
) / 2 + size_needed
)
23222 predict_jump (REG_BR_PROB_BASE
* 20 / 100);
23224 predict_jump (REG_BR_PROB_BASE
* 60 / 100);
23227 if (label
&& size_needed
== 1)
23229 emit_label (label
);
23230 LABEL_NUSES (label
) = 1;
23232 promoted_val
= val_exp
;
23233 epilogue_size_needed
= 1;
23235 else if (label
== NULL_RTX
)
23236 epilogue_size_needed
= size_needed
;
23238 /* Step 3: Main loop. */
23244 gcc_unreachable ();
23246 expand_set_or_movmem_via_loop (dst
, NULL
, destreg
, NULL
, promoted_val
,
23247 count_exp
, QImode
, 1, expected_size
);
23250 expand_set_or_movmem_via_loop (dst
, NULL
, destreg
, NULL
, promoted_val
,
23251 count_exp
, word_mode
, 1, expected_size
);
23253 case unrolled_loop
:
23254 expand_set_or_movmem_via_loop (dst
, NULL
, destreg
, NULL
, promoted_val
,
23255 count_exp
, word_mode
, 4, expected_size
);
23257 case rep_prefix_8_byte
:
23258 expand_setmem_via_rep_stos (dst
, destreg
, promoted_val
, count_exp
,
23261 case rep_prefix_4_byte
:
23262 expand_setmem_via_rep_stos (dst
, destreg
, promoted_val
, count_exp
,
23265 case rep_prefix_1_byte
:
23266 expand_setmem_via_rep_stos (dst
, destreg
, promoted_val
, count_exp
,
23270 /* Adjust properly the offset of src and dest memory for aliasing. */
23271 if (CONST_INT_P (count_exp
))
23272 dst
= adjust_automodify_address_nv (dst
, BLKmode
, destreg
,
23273 (count
/ size_needed
) * size_needed
);
23275 dst
= change_address (dst
, BLKmode
, destreg
);
23277 /* Step 4: Epilogue to copy the remaining bytes. */
23281 /* When the main loop is done, COUNT_EXP might hold original count,
23282 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
23283 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
23284 bytes. Compensate if needed. */
23286 if (size_needed
< epilogue_size_needed
)
23289 expand_simple_binop (counter_mode (count_exp
), AND
, count_exp
,
23290 GEN_INT (size_needed
- 1), count_exp
, 1,
23292 if (tmp
!= count_exp
)
23293 emit_move_insn (count_exp
, tmp
);
23295 emit_label (label
);
23296 LABEL_NUSES (label
) = 1;
23299 if (count_exp
!= const0_rtx
&& epilogue_size_needed
> 1)
23301 if (force_loopy_epilogue
)
23302 expand_setmem_epilogue_via_loop (dst
, destreg
, val_exp
, count_exp
,
23303 epilogue_size_needed
);
23305 expand_setmem_epilogue (dst
, destreg
, promoted_val
, count_exp
,
23306 epilogue_size_needed
);
23308 if (jump_around_label
)
23309 emit_label (jump_around_label
);
23313 /* Expand the appropriate insns for doing strlen if not just doing
23316 out = result, initialized with the start address
23317 align_rtx = alignment of the address.
23318 scratch = scratch register, initialized with the startaddress when
23319 not aligned, otherwise undefined
23321 This is just the body. It needs the initializations mentioned above and
23322 some address computing at the end. These things are done in i386.md. */
23325 ix86_expand_strlensi_unroll_1 (rtx out
, rtx src
, rtx align_rtx
)
23329 rtx align_2_label
= NULL_RTX
;
23330 rtx align_3_label
= NULL_RTX
;
23331 rtx align_4_label
= gen_label_rtx ();
23332 rtx end_0_label
= gen_label_rtx ();
23334 rtx tmpreg
= gen_reg_rtx (SImode
);
23335 rtx scratch
= gen_reg_rtx (SImode
);
23339 if (CONST_INT_P (align_rtx
))
23340 align
= INTVAL (align_rtx
);
23342 /* Loop to check 1..3 bytes for null to get an aligned pointer. */
23344 /* Is there a known alignment and is it less than 4? */
23347 rtx scratch1
= gen_reg_rtx (Pmode
);
23348 emit_move_insn (scratch1
, out
);
23349 /* Is there a known alignment and is it not 2? */
23352 align_3_label
= gen_label_rtx (); /* Label when aligned to 3-byte */
23353 align_2_label
= gen_label_rtx (); /* Label when aligned to 2-byte */
23355 /* Leave just the 3 lower bits. */
23356 align_rtx
= expand_binop (Pmode
, and_optab
, scratch1
, GEN_INT (3),
23357 NULL_RTX
, 0, OPTAB_WIDEN
);
23359 emit_cmp_and_jump_insns (align_rtx
, const0_rtx
, EQ
, NULL
,
23360 Pmode
, 1, align_4_label
);
23361 emit_cmp_and_jump_insns (align_rtx
, const2_rtx
, EQ
, NULL
,
23362 Pmode
, 1, align_2_label
);
23363 emit_cmp_and_jump_insns (align_rtx
, const2_rtx
, GTU
, NULL
,
23364 Pmode
, 1, align_3_label
);
23368 /* Since the alignment is 2, we have to check 2 or 0 bytes;
23369 check if is aligned to 4 - byte. */
23371 align_rtx
= expand_binop (Pmode
, and_optab
, scratch1
, const2_rtx
,
23372 NULL_RTX
, 0, OPTAB_WIDEN
);
23374 emit_cmp_and_jump_insns (align_rtx
, const0_rtx
, EQ
, NULL
,
23375 Pmode
, 1, align_4_label
);
23378 mem
= change_address (src
, QImode
, out
);
23380 /* Now compare the bytes. */
23382 /* Compare the first n unaligned byte on a byte per byte basis. */
23383 emit_cmp_and_jump_insns (mem
, const0_rtx
, EQ
, NULL
,
23384 QImode
, 1, end_0_label
);
23386 /* Increment the address. */
23387 emit_insn (ix86_gen_add3 (out
, out
, const1_rtx
));
23389 /* Not needed with an alignment of 2 */
23392 emit_label (align_2_label
);
23394 emit_cmp_and_jump_insns (mem
, const0_rtx
, EQ
, NULL
, QImode
, 1,
23397 emit_insn (ix86_gen_add3 (out
, out
, const1_rtx
));
23399 emit_label (align_3_label
);
23402 emit_cmp_and_jump_insns (mem
, const0_rtx
, EQ
, NULL
, QImode
, 1,
23405 emit_insn (ix86_gen_add3 (out
, out
, const1_rtx
));
23408 /* Generate loop to check 4 bytes at a time. It is not a good idea to
23409 align this loop. It gives only huge programs, but does not help to
23411 emit_label (align_4_label
);
23413 mem
= change_address (src
, SImode
, out
);
23414 emit_move_insn (scratch
, mem
);
23415 emit_insn (ix86_gen_add3 (out
, out
, GEN_INT (4)));
23417 /* This formula yields a nonzero result iff one of the bytes is zero.
23418 This saves three branches inside loop and many cycles. */
23420 emit_insn (gen_addsi3 (tmpreg
, scratch
, GEN_INT (-0x01010101)));
23421 emit_insn (gen_one_cmplsi2 (scratch
, scratch
));
23422 emit_insn (gen_andsi3 (tmpreg
, tmpreg
, scratch
));
23423 emit_insn (gen_andsi3 (tmpreg
, tmpreg
,
23424 gen_int_mode (0x80808080, SImode
)));
23425 emit_cmp_and_jump_insns (tmpreg
, const0_rtx
, EQ
, 0, SImode
, 1,
23430 rtx reg
= gen_reg_rtx (SImode
);
23431 rtx reg2
= gen_reg_rtx (Pmode
);
23432 emit_move_insn (reg
, tmpreg
);
23433 emit_insn (gen_lshrsi3 (reg
, reg
, GEN_INT (16)));
23435 /* If zero is not in the first two bytes, move two bytes forward. */
23436 emit_insn (gen_testsi_ccno_1 (tmpreg
, GEN_INT (0x8080)));
23437 tmp
= gen_rtx_REG (CCNOmode
, FLAGS_REG
);
23438 tmp
= gen_rtx_EQ (VOIDmode
, tmp
, const0_rtx
);
23439 emit_insn (gen_rtx_SET (VOIDmode
, tmpreg
,
23440 gen_rtx_IF_THEN_ELSE (SImode
, tmp
,
23443 /* Emit lea manually to avoid clobbering of flags. */
23444 emit_insn (gen_rtx_SET (SImode
, reg2
,
23445 gen_rtx_PLUS (Pmode
, out
, const2_rtx
)));
23447 tmp
= gen_rtx_REG (CCNOmode
, FLAGS_REG
);
23448 tmp
= gen_rtx_EQ (VOIDmode
, tmp
, const0_rtx
);
23449 emit_insn (gen_rtx_SET (VOIDmode
, out
,
23450 gen_rtx_IF_THEN_ELSE (Pmode
, tmp
,
23456 rtx end_2_label
= gen_label_rtx ();
23457 /* Is zero in the first two bytes? */
23459 emit_insn (gen_testsi_ccno_1 (tmpreg
, GEN_INT (0x8080)));
23460 tmp
= gen_rtx_REG (CCNOmode
, FLAGS_REG
);
23461 tmp
= gen_rtx_NE (VOIDmode
, tmp
, const0_rtx
);
23462 tmp
= gen_rtx_IF_THEN_ELSE (VOIDmode
, tmp
,
23463 gen_rtx_LABEL_REF (VOIDmode
, end_2_label
),
23465 tmp
= emit_jump_insn (gen_rtx_SET (VOIDmode
, pc_rtx
, tmp
));
23466 JUMP_LABEL (tmp
) = end_2_label
;
23468 /* Not in the first two. Move two bytes forward. */
23469 emit_insn (gen_lshrsi3 (tmpreg
, tmpreg
, GEN_INT (16)));
23470 emit_insn (ix86_gen_add3 (out
, out
, const2_rtx
));
23472 emit_label (end_2_label
);
23476 /* Avoid branch in fixing the byte. */
23477 tmpreg
= gen_lowpart (QImode
, tmpreg
);
23478 emit_insn (gen_addqi3_cc (tmpreg
, tmpreg
, tmpreg
));
23479 tmp
= gen_rtx_REG (CCmode
, FLAGS_REG
);
23480 cmp
= gen_rtx_LTU (VOIDmode
, tmp
, const0_rtx
);
23481 emit_insn (ix86_gen_sub3_carry (out
, out
, GEN_INT (3), tmp
, cmp
));
23483 emit_label (end_0_label
);
23486 /* Expand strlen. */
23489 ix86_expand_strlen (rtx out
, rtx src
, rtx eoschar
, rtx align
)
23491 rtx addr
, scratch1
, scratch2
, scratch3
, scratch4
;
23493 /* The generic case of strlen expander is long. Avoid it's
23494 expanding unless TARGET_INLINE_ALL_STRINGOPS. */
23496 if (TARGET_UNROLL_STRLEN
&& eoschar
== const0_rtx
&& optimize
> 1
23497 && !TARGET_INLINE_ALL_STRINGOPS
23498 && !optimize_insn_for_size_p ()
23499 && (!CONST_INT_P (align
) || INTVAL (align
) < 4))
23502 addr
= force_reg (Pmode
, XEXP (src
, 0));
23503 scratch1
= gen_reg_rtx (Pmode
);
23505 if (TARGET_UNROLL_STRLEN
&& eoschar
== const0_rtx
&& optimize
> 1
23506 && !optimize_insn_for_size_p ())
23508 /* Well it seems that some optimizer does not combine a call like
23509 foo(strlen(bar), strlen(bar));
23510 when the move and the subtraction is done here. It does calculate
23511 the length just once when these instructions are done inside of
23512 output_strlen_unroll(). But I think since &bar[strlen(bar)] is
23513 often used and I use one fewer register for the lifetime of
23514 output_strlen_unroll() this is better. */
23516 emit_move_insn (out
, addr
);
23518 ix86_expand_strlensi_unroll_1 (out
, src
, align
);
23520 /* strlensi_unroll_1 returns the address of the zero at the end of
23521 the string, like memchr(), so compute the length by subtracting
23522 the start address. */
23523 emit_insn (ix86_gen_sub3 (out
, out
, addr
));
23529 /* Can't use this if the user has appropriated eax, ecx, or edi. */
23530 if (fixed_regs
[AX_REG
] || fixed_regs
[CX_REG
] || fixed_regs
[DI_REG
])
23533 scratch2
= gen_reg_rtx (Pmode
);
23534 scratch3
= gen_reg_rtx (Pmode
);
23535 scratch4
= force_reg (Pmode
, constm1_rtx
);
23537 emit_move_insn (scratch3
, addr
);
23538 eoschar
= force_reg (QImode
, eoschar
);
23540 src
= replace_equiv_address_nv (src
, scratch3
);
23542 /* If .md starts supporting :P, this can be done in .md. */
23543 unspec
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (4, src
, eoschar
, align
,
23544 scratch4
), UNSPEC_SCAS
);
23545 emit_insn (gen_strlenqi_1 (scratch1
, scratch3
, unspec
));
23546 emit_insn (ix86_gen_one_cmpl2 (scratch2
, scratch1
));
23547 emit_insn (ix86_gen_add3 (out
, scratch2
, constm1_rtx
));
23552 /* For given symbol (function) construct code to compute address of it's PLT
23553 entry in large x86-64 PIC model. */
23555 construct_plt_address (rtx symbol
)
23559 gcc_assert (GET_CODE (symbol
) == SYMBOL_REF
);
23560 gcc_assert (ix86_cmodel
== CM_LARGE_PIC
);
23561 gcc_assert (Pmode
== DImode
);
23563 tmp
= gen_reg_rtx (Pmode
);
23564 unspec
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, symbol
), UNSPEC_PLTOFF
);
23566 emit_move_insn (tmp
, gen_rtx_CONST (Pmode
, unspec
));
23567 emit_insn (ix86_gen_add3 (tmp
, tmp
, pic_offset_table_rtx
));
23572 ix86_expand_call (rtx retval
, rtx fnaddr
, rtx callarg1
,
23574 rtx pop
, bool sibcall
)
23576 /* We need to represent that SI and DI registers are clobbered
23578 static int clobbered_registers
[] = {
23579 XMM6_REG
, XMM7_REG
, XMM8_REG
,
23580 XMM9_REG
, XMM10_REG
, XMM11_REG
,
23581 XMM12_REG
, XMM13_REG
, XMM14_REG
,
23582 XMM15_REG
, SI_REG
, DI_REG
23584 rtx vec
[ARRAY_SIZE (clobbered_registers
) + 3];
23585 rtx use
= NULL
, call
;
23586 unsigned int vec_len
;
23588 if (pop
== const0_rtx
)
23590 gcc_assert (!TARGET_64BIT
|| !pop
);
23592 if (TARGET_MACHO
&& !TARGET_64BIT
)
23595 if (flag_pic
&& GET_CODE (XEXP (fnaddr
, 0)) == SYMBOL_REF
)
23596 fnaddr
= machopic_indirect_call_target (fnaddr
);
23601 /* Static functions and indirect calls don't need the pic register. */
23602 if (flag_pic
&& (!TARGET_64BIT
|| ix86_cmodel
== CM_LARGE_PIC
)
23603 && GET_CODE (XEXP (fnaddr
, 0)) == SYMBOL_REF
23604 && ! SYMBOL_REF_LOCAL_P (XEXP (fnaddr
, 0)))
23605 use_reg (&use
, pic_offset_table_rtx
);
23608 if (TARGET_64BIT
&& INTVAL (callarg2
) >= 0)
23610 rtx al
= gen_rtx_REG (QImode
, AX_REG
);
23611 emit_move_insn (al
, callarg2
);
23612 use_reg (&use
, al
);
23615 if (ix86_cmodel
== CM_LARGE_PIC
23617 && GET_CODE (XEXP (fnaddr
, 0)) == SYMBOL_REF
23618 && !local_symbolic_operand (XEXP (fnaddr
, 0), VOIDmode
))
23619 fnaddr
= gen_rtx_MEM (QImode
, construct_plt_address (XEXP (fnaddr
, 0)));
23621 ? !sibcall_insn_operand (XEXP (fnaddr
, 0), word_mode
)
23622 : !call_insn_operand (XEXP (fnaddr
, 0), word_mode
))
23624 fnaddr
= XEXP (fnaddr
, 0);
23625 if (GET_MODE (fnaddr
) != word_mode
)
23626 fnaddr
= convert_to_mode (word_mode
, fnaddr
, 1);
23627 fnaddr
= gen_rtx_MEM (QImode
, copy_to_mode_reg (word_mode
, fnaddr
));
23631 call
= gen_rtx_CALL (VOIDmode
, fnaddr
, callarg1
);
23633 call
= gen_rtx_SET (VOIDmode
, retval
, call
);
23634 vec
[vec_len
++] = call
;
23638 pop
= gen_rtx_PLUS (Pmode
, stack_pointer_rtx
, pop
);
23639 pop
= gen_rtx_SET (VOIDmode
, stack_pointer_rtx
, pop
);
23640 vec
[vec_len
++] = pop
;
23643 if (TARGET_64BIT_MS_ABI
23644 && (!callarg2
|| INTVAL (callarg2
) != -2))
23648 vec
[vec_len
++] = gen_rtx_UNSPEC (VOIDmode
, gen_rtvec (1, const0_rtx
),
23649 UNSPEC_MS_TO_SYSV_CALL
);
23651 for (i
= 0; i
< ARRAY_SIZE (clobbered_registers
); i
++)
23653 = gen_rtx_CLOBBER (VOIDmode
,
23654 gen_rtx_REG (SSE_REGNO_P (clobbered_registers
[i
])
23656 clobbered_registers
[i
]));
23660 call
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec_v (vec_len
, vec
));
23661 call
= emit_call_insn (call
);
23663 CALL_INSN_FUNCTION_USAGE (call
) = use
;
23668 /* Output the assembly for a call instruction. */
23671 ix86_output_call_insn (rtx insn
, rtx call_op
)
23673 bool direct_p
= constant_call_address_operand (call_op
, VOIDmode
);
23674 bool seh_nop_p
= false;
23677 if (SIBLING_CALL_P (insn
))
23681 /* SEH epilogue detection requires the indirect branch case
23682 to include REX.W. */
23683 else if (TARGET_SEH
)
23684 xasm
= "rex.W jmp %A0";
23688 output_asm_insn (xasm
, &call_op
);
23692 /* SEH unwinding can require an extra nop to be emitted in several
23693 circumstances. Determine if we have one of those. */
23698 for (i
= NEXT_INSN (insn
); i
; i
= NEXT_INSN (i
))
23700 /* If we get to another real insn, we don't need the nop. */
23704 /* If we get to the epilogue note, prevent a catch region from
23705 being adjacent to the standard epilogue sequence. If non-
23706 call-exceptions, we'll have done this during epilogue emission. */
23707 if (NOTE_P (i
) && NOTE_KIND (i
) == NOTE_INSN_EPILOGUE_BEG
23708 && !flag_non_call_exceptions
23709 && !can_throw_internal (insn
))
23716 /* If we didn't find a real insn following the call, prevent the
23717 unwinder from looking into the next function. */
23723 xasm
= "call\t%P0";
23725 xasm
= "call\t%A0";
23727 output_asm_insn (xasm
, &call_op
);
23735 /* Clear stack slot assignments remembered from previous functions.
23736 This is called from INIT_EXPANDERS once before RTL is emitted for each
23739 static struct machine_function
*
23740 ix86_init_machine_status (void)
23742 struct machine_function
*f
;
23744 f
= ggc_alloc_cleared_machine_function ();
23745 f
->use_fast_prologue_epilogue_nregs
= -1;
23746 f
->call_abi
= ix86_abi
;
23751 /* Return a MEM corresponding to a stack slot with mode MODE.
23752 Allocate a new slot if necessary.
23754 The RTL for a function can have several slots available: N is
23755 which slot to use. */
23758 assign_386_stack_local (enum machine_mode mode
, enum ix86_stack_slot n
)
23760 struct stack_local_entry
*s
;
23762 gcc_assert (n
< MAX_386_STACK_LOCALS
);
23764 for (s
= ix86_stack_locals
; s
; s
= s
->next
)
23765 if (s
->mode
== mode
&& s
->n
== n
)
23766 return validize_mem (copy_rtx (s
->rtl
));
23768 s
= ggc_alloc_stack_local_entry ();
23771 s
->rtl
= assign_stack_local (mode
, GET_MODE_SIZE (mode
), 0);
23773 s
->next
= ix86_stack_locals
;
23774 ix86_stack_locals
= s
;
23775 return validize_mem (s
->rtl
);
23779 ix86_instantiate_decls (void)
23781 struct stack_local_entry
*s
;
23783 for (s
= ix86_stack_locals
; s
; s
= s
->next
)
23784 if (s
->rtl
!= NULL_RTX
)
23785 instantiate_decl_rtl (s
->rtl
);
23788 /* Calculate the length of the memory address in the instruction encoding.
23789 Includes addr32 prefix, does not include the one-byte modrm, opcode,
23790 or other prefixes. We never generate addr32 prefix for LEA insn. */
23793 memory_address_length (rtx addr
, bool lea
)
23795 struct ix86_address parts
;
23796 rtx base
, index
, disp
;
23800 if (GET_CODE (addr
) == PRE_DEC
23801 || GET_CODE (addr
) == POST_INC
23802 || GET_CODE (addr
) == PRE_MODIFY
23803 || GET_CODE (addr
) == POST_MODIFY
)
23806 ok
= ix86_decompose_address (addr
, &parts
);
23809 len
= (parts
.seg
== SEG_DEFAULT
) ? 0 : 1;
23811 /* If this is not LEA instruction, add the length of addr32 prefix. */
23812 if (TARGET_64BIT
&& !lea
23813 && (SImode_address_operand (addr
, VOIDmode
)
23814 || (parts
.base
&& GET_MODE (parts
.base
) == SImode
)
23815 || (parts
.index
&& GET_MODE (parts
.index
) == SImode
)))
23819 index
= parts
.index
;
23822 if (base
&& GET_CODE (base
) == SUBREG
)
23823 base
= SUBREG_REG (base
);
23824 if (index
&& GET_CODE (index
) == SUBREG
)
23825 index
= SUBREG_REG (index
);
23827 gcc_assert (base
== NULL_RTX
|| REG_P (base
));
23828 gcc_assert (index
== NULL_RTX
|| REG_P (index
));
23831 - esp as the base always wants an index,
23832 - ebp as the base always wants a displacement,
23833 - r12 as the base always wants an index,
23834 - r13 as the base always wants a displacement. */
23836 /* Register Indirect. */
23837 if (base
&& !index
&& !disp
)
23839 /* esp (for its index) and ebp (for its displacement) need
23840 the two-byte modrm form. Similarly for r12 and r13 in 64-bit
23842 if (base
== arg_pointer_rtx
23843 || base
== frame_pointer_rtx
23844 || REGNO (base
) == SP_REG
23845 || REGNO (base
) == BP_REG
23846 || REGNO (base
) == R12_REG
23847 || REGNO (base
) == R13_REG
)
23851 /* Direct Addressing. In 64-bit mode mod 00 r/m 5
23852 is not disp32, but disp32(%rip), so for disp32
23853 SIB byte is needed, unless print_operand_address
23854 optimizes it into disp32(%rip) or (%rip) is implied
23856 else if (disp
&& !base
&& !index
)
23863 if (GET_CODE (disp
) == CONST
)
23864 symbol
= XEXP (disp
, 0);
23865 if (GET_CODE (symbol
) == PLUS
23866 && CONST_INT_P (XEXP (symbol
, 1)))
23867 symbol
= XEXP (symbol
, 0);
23869 if (GET_CODE (symbol
) != LABEL_REF
23870 && (GET_CODE (symbol
) != SYMBOL_REF
23871 || SYMBOL_REF_TLS_MODEL (symbol
) != 0)
23872 && (GET_CODE (symbol
) != UNSPEC
23873 || (XINT (symbol
, 1) != UNSPEC_GOTPCREL
23874 && XINT (symbol
, 1) != UNSPEC_PCREL
23875 && XINT (symbol
, 1) != UNSPEC_GOTNTPOFF
)))
23881 /* Find the length of the displacement constant. */
23884 if (base
&& satisfies_constraint_K (disp
))
23889 /* ebp always wants a displacement. Similarly r13. */
23890 else if (base
&& (REGNO (base
) == BP_REG
|| REGNO (base
) == R13_REG
))
23893 /* An index requires the two-byte modrm form.... */
23895 /* ...like esp (or r12), which always wants an index. */
23896 || base
== arg_pointer_rtx
23897 || base
== frame_pointer_rtx
23898 || (base
&& (REGNO (base
) == SP_REG
|| REGNO (base
) == R12_REG
)))
23905 /* Compute default value for "length_immediate" attribute. When SHORTFORM
23906 is set, expect that insn have 8bit immediate alternative. */
23908 ix86_attr_length_immediate_default (rtx insn
, bool shortform
)
23912 extract_insn_cached (insn
);
23913 for (i
= recog_data
.n_operands
- 1; i
>= 0; --i
)
23914 if (CONSTANT_P (recog_data
.operand
[i
]))
23916 enum attr_mode mode
= get_attr_mode (insn
);
23919 if (shortform
&& CONST_INT_P (recog_data
.operand
[i
]))
23921 HOST_WIDE_INT ival
= INTVAL (recog_data
.operand
[i
]);
23928 ival
= trunc_int_for_mode (ival
, HImode
);
23931 ival
= trunc_int_for_mode (ival
, SImode
);
23936 if (IN_RANGE (ival
, -128, 127))
23953 /* Immediates for DImode instructions are encoded
23954 as 32bit sign extended values. */
23959 fatal_insn ("unknown insn mode", insn
);
23965 /* Compute default value for "length_address" attribute. */
23967 ix86_attr_length_address_default (rtx insn
)
23971 if (get_attr_type (insn
) == TYPE_LEA
)
23973 rtx set
= PATTERN (insn
), addr
;
23975 if (GET_CODE (set
) == PARALLEL
)
23976 set
= XVECEXP (set
, 0, 0);
23978 gcc_assert (GET_CODE (set
) == SET
);
23980 addr
= SET_SRC (set
);
23982 return memory_address_length (addr
, true);
23985 extract_insn_cached (insn
);
23986 for (i
= recog_data
.n_operands
- 1; i
>= 0; --i
)
23987 if (MEM_P (recog_data
.operand
[i
]))
23989 constrain_operands_cached (reload_completed
);
23990 if (which_alternative
!= -1)
23992 const char *constraints
= recog_data
.constraints
[i
];
23993 int alt
= which_alternative
;
23995 while (*constraints
== '=' || *constraints
== '+')
23998 while (*constraints
++ != ',')
24000 /* Skip ignored operands. */
24001 if (*constraints
== 'X')
24004 return memory_address_length (XEXP (recog_data
.operand
[i
], 0), false);
24009 /* Compute default value for "length_vex" attribute. It includes
24010 2 or 3 byte VEX prefix and 1 opcode byte. */
24013 ix86_attr_length_vex_default (rtx insn
, bool has_0f_opcode
, bool has_vex_w
)
24017 /* Only 0f opcode can use 2 byte VEX prefix and VEX W bit uses 3
24018 byte VEX prefix. */
24019 if (!has_0f_opcode
|| has_vex_w
)
24022 /* We can always use 2 byte VEX prefix in 32bit. */
24026 extract_insn_cached (insn
);
24028 for (i
= recog_data
.n_operands
- 1; i
>= 0; --i
)
24029 if (REG_P (recog_data
.operand
[i
]))
24031 /* REX.W bit uses 3 byte VEX prefix. */
24032 if (GET_MODE (recog_data
.operand
[i
]) == DImode
24033 && GENERAL_REG_P (recog_data
.operand
[i
]))
24038 /* REX.X or REX.B bits use 3 byte VEX prefix. */
24039 if (MEM_P (recog_data
.operand
[i
])
24040 && x86_extended_reg_mentioned_p (recog_data
.operand
[i
]))
24047 /* Return the maximum number of instructions a cpu can issue. */
24050 ix86_issue_rate (void)
24054 case PROCESSOR_PENTIUM
:
24055 case PROCESSOR_ATOM
:
24057 case PROCESSOR_BTVER2
:
24060 case PROCESSOR_PENTIUMPRO
:
24061 case PROCESSOR_PENTIUM4
:
24062 case PROCESSOR_CORE2
:
24063 case PROCESSOR_COREI7
:
24064 case PROCESSOR_ATHLON
:
24066 case PROCESSOR_AMDFAM10
:
24067 case PROCESSOR_NOCONA
:
24068 case PROCESSOR_GENERIC32
:
24069 case PROCESSOR_GENERIC64
:
24070 case PROCESSOR_BDVER1
:
24071 case PROCESSOR_BDVER2
:
24072 case PROCESSOR_BDVER3
:
24073 case PROCESSOR_BTVER1
:
24081 /* A subroutine of ix86_adjust_cost -- return TRUE iff INSN reads flags set
24082 by DEP_INSN and nothing set by DEP_INSN. */
24085 ix86_flags_dependent (rtx insn
, rtx dep_insn
, enum attr_type insn_type
)
24089 /* Simplify the test for uninteresting insns. */
24090 if (insn_type
!= TYPE_SETCC
24091 && insn_type
!= TYPE_ICMOV
24092 && insn_type
!= TYPE_FCMOV
24093 && insn_type
!= TYPE_IBR
)
24096 if ((set
= single_set (dep_insn
)) != 0)
24098 set
= SET_DEST (set
);
24101 else if (GET_CODE (PATTERN (dep_insn
)) == PARALLEL
24102 && XVECLEN (PATTERN (dep_insn
), 0) == 2
24103 && GET_CODE (XVECEXP (PATTERN (dep_insn
), 0, 0)) == SET
24104 && GET_CODE (XVECEXP (PATTERN (dep_insn
), 0, 1)) == SET
)
24106 set
= SET_DEST (XVECEXP (PATTERN (dep_insn
), 0, 0));
24107 set2
= SET_DEST (XVECEXP (PATTERN (dep_insn
), 0, 0));
24112 if (!REG_P (set
) || REGNO (set
) != FLAGS_REG
)
24115 /* This test is true if the dependent insn reads the flags but
24116 not any other potentially set register. */
24117 if (!reg_overlap_mentioned_p (set
, PATTERN (insn
)))
24120 if (set2
&& reg_overlap_mentioned_p (set2
, PATTERN (insn
)))
24126 /* Return true iff USE_INSN has a memory address with operands set by
24130 ix86_agi_dependent (rtx set_insn
, rtx use_insn
)
24133 extract_insn_cached (use_insn
);
24134 for (i
= recog_data
.n_operands
- 1; i
>= 0; --i
)
24135 if (MEM_P (recog_data
.operand
[i
]))
24137 rtx addr
= XEXP (recog_data
.operand
[i
], 0);
24138 return modified_in_p (addr
, set_insn
) != 0;
24144 ix86_adjust_cost (rtx insn
, rtx link
, rtx dep_insn
, int cost
)
24146 enum attr_type insn_type
, dep_insn_type
;
24147 enum attr_memory memory
;
24149 int dep_insn_code_number
;
24151 /* Anti and output dependencies have zero cost on all CPUs. */
24152 if (REG_NOTE_KIND (link
) != 0)
24155 dep_insn_code_number
= recog_memoized (dep_insn
);
24157 /* If we can't recognize the insns, we can't really do anything. */
24158 if (dep_insn_code_number
< 0 || recog_memoized (insn
) < 0)
24161 insn_type
= get_attr_type (insn
);
24162 dep_insn_type
= get_attr_type (dep_insn
);
24166 case PROCESSOR_PENTIUM
:
24167 /* Address Generation Interlock adds a cycle of latency. */
24168 if (insn_type
== TYPE_LEA
)
24170 rtx addr
= PATTERN (insn
);
24172 if (GET_CODE (addr
) == PARALLEL
)
24173 addr
= XVECEXP (addr
, 0, 0);
24175 gcc_assert (GET_CODE (addr
) == SET
);
24177 addr
= SET_SRC (addr
);
24178 if (modified_in_p (addr
, dep_insn
))
24181 else if (ix86_agi_dependent (dep_insn
, insn
))
24184 /* ??? Compares pair with jump/setcc. */
24185 if (ix86_flags_dependent (insn
, dep_insn
, insn_type
))
24188 /* Floating point stores require value to be ready one cycle earlier. */
24189 if (insn_type
== TYPE_FMOV
24190 && get_attr_memory (insn
) == MEMORY_STORE
24191 && !ix86_agi_dependent (dep_insn
, insn
))
24195 case PROCESSOR_PENTIUMPRO
:
24196 memory
= get_attr_memory (insn
);
24198 /* INT->FP conversion is expensive. */
24199 if (get_attr_fp_int_src (dep_insn
))
24202 /* There is one cycle extra latency between an FP op and a store. */
24203 if (insn_type
== TYPE_FMOV
24204 && (set
= single_set (dep_insn
)) != NULL_RTX
24205 && (set2
= single_set (insn
)) != NULL_RTX
24206 && rtx_equal_p (SET_DEST (set
), SET_SRC (set2
))
24207 && MEM_P (SET_DEST (set2
)))
24210 /* Show ability of reorder buffer to hide latency of load by executing
24211 in parallel with previous instruction in case
24212 previous instruction is not needed to compute the address. */
24213 if ((memory
== MEMORY_LOAD
|| memory
== MEMORY_BOTH
)
24214 && !ix86_agi_dependent (dep_insn
, insn
))
24216 /* Claim moves to take one cycle, as core can issue one load
24217 at time and the next load can start cycle later. */
24218 if (dep_insn_type
== TYPE_IMOV
24219 || dep_insn_type
== TYPE_FMOV
)
24227 memory
= get_attr_memory (insn
);
24229 /* The esp dependency is resolved before the instruction is really
24231 if ((insn_type
== TYPE_PUSH
|| insn_type
== TYPE_POP
)
24232 && (dep_insn_type
== TYPE_PUSH
|| dep_insn_type
== TYPE_POP
))
24235 /* INT->FP conversion is expensive. */
24236 if (get_attr_fp_int_src (dep_insn
))
24239 /* Show ability of reorder buffer to hide latency of load by executing
24240 in parallel with previous instruction in case
24241 previous instruction is not needed to compute the address. */
24242 if ((memory
== MEMORY_LOAD
|| memory
== MEMORY_BOTH
)
24243 && !ix86_agi_dependent (dep_insn
, insn
))
24245 /* Claim moves to take one cycle, as core can issue one load
24246 at time and the next load can start cycle later. */
24247 if (dep_insn_type
== TYPE_IMOV
24248 || dep_insn_type
== TYPE_FMOV
)
24257 case PROCESSOR_ATHLON
:
24259 case PROCESSOR_AMDFAM10
:
24260 case PROCESSOR_BDVER1
:
24261 case PROCESSOR_BDVER2
:
24262 case PROCESSOR_BDVER3
:
24263 case PROCESSOR_BTVER1
:
24264 case PROCESSOR_BTVER2
:
24265 case PROCESSOR_ATOM
:
24266 case PROCESSOR_GENERIC32
:
24267 case PROCESSOR_GENERIC64
:
24268 memory
= get_attr_memory (insn
);
24270 /* Show ability of reorder buffer to hide latency of load by executing
24271 in parallel with previous instruction in case
24272 previous instruction is not needed to compute the address. */
24273 if ((memory
== MEMORY_LOAD
|| memory
== MEMORY_BOTH
)
24274 && !ix86_agi_dependent (dep_insn
, insn
))
24276 enum attr_unit unit
= get_attr_unit (insn
);
24279 /* Because of the difference between the length of integer and
24280 floating unit pipeline preparation stages, the memory operands
24281 for floating point are cheaper.
24283 ??? For Athlon it the difference is most probably 2. */
24284 if (unit
== UNIT_INTEGER
|| unit
== UNIT_UNKNOWN
)
24287 loadcost
= TARGET_ATHLON
? 2 : 0;
24289 if (cost
>= loadcost
)
24302 /* How many alternative schedules to try. This should be as wide as the
24303 scheduling freedom in the DFA, but no wider. Making this value too
24304 large results extra work for the scheduler. */
24307 ia32_multipass_dfa_lookahead (void)
24311 case PROCESSOR_PENTIUM
:
24314 case PROCESSOR_PENTIUMPRO
:
24318 case PROCESSOR_CORE2
:
24319 case PROCESSOR_COREI7
:
24320 case PROCESSOR_ATOM
:
24321 /* Generally, we want haifa-sched:max_issue() to look ahead as far
24322 as many instructions can be executed on a cycle, i.e.,
24323 issue_rate. I wonder why tuning for many CPUs does not do this. */
24324 if (reload_completed
)
24325 return ix86_issue_rate ();
24326 /* Don't use lookahead for pre-reload schedule to save compile time. */
24334 /* Try to reorder ready list to take advantage of Atom pipelined IMUL
24335 execution. It is applied if
24336 (1) IMUL instruction is on the top of list;
24337 (2) There exists the only producer of independent IMUL instruction in
24339 (3) Put found producer on the top of ready list.
24340 Returns issue rate. */
24343 ix86_sched_reorder(FILE *dump
, int sched_verbose
, rtx
*ready
, int *pn_ready
,
24344 int clock_var ATTRIBUTE_UNUSED
)
24346 static int issue_rate
= -1;
24347 int n_ready
= *pn_ready
;
24348 rtx insn
, insn1
, insn2
;
24350 sd_iterator_def sd_it
;
24354 /* Set up issue rate. */
24355 issue_rate
= ix86_issue_rate();
24357 /* Do reodering for Atom only. */
24358 if (ix86_tune
!= PROCESSOR_ATOM
)
24360 /* Do not perform ready list reodering for pre-reload schedule pass. */
24361 if (!reload_completed
)
24363 /* Nothing to do if ready list contains only 1 instruction. */
24367 /* Check that IMUL instruction is on the top of ready list. */
24368 insn
= ready
[n_ready
- 1];
24369 if (!NONDEBUG_INSN_P (insn
))
24371 insn
= PATTERN (insn
);
24372 if (GET_CODE (insn
) == PARALLEL
)
24373 insn
= XVECEXP (insn
, 0, 0);
24374 if (GET_CODE (insn
) != SET
)
24376 if (!(GET_CODE (SET_SRC (insn
)) == MULT
24377 && GET_MODE (SET_SRC (insn
)) == SImode
))
24380 /* Search for producer of independent IMUL instruction. */
24381 for (i
= n_ready
- 2; i
>= 0; i
--)
24384 if (!NONDEBUG_INSN_P (insn
))
24386 /* Skip IMUL instruction. */
24387 insn2
= PATTERN (insn
);
24388 if (GET_CODE (insn2
) == PARALLEL
)
24389 insn2
= XVECEXP (insn2
, 0, 0);
24390 if (GET_CODE (insn2
) == SET
24391 && GET_CODE (SET_SRC (insn2
)) == MULT
24392 && GET_MODE (SET_SRC (insn2
)) == SImode
)
24395 FOR_EACH_DEP (insn
, SD_LIST_FORW
, sd_it
, dep
)
24398 con
= DEP_CON (dep
);
24399 if (!NONDEBUG_INSN_P (con
))
24401 insn1
= PATTERN (con
);
24402 if (GET_CODE (insn1
) == PARALLEL
)
24403 insn1
= XVECEXP (insn1
, 0, 0);
24405 if (GET_CODE (insn1
) == SET
24406 && GET_CODE (SET_SRC (insn1
)) == MULT
24407 && GET_MODE (SET_SRC (insn1
)) == SImode
)
24409 sd_iterator_def sd_it1
;
24411 /* Check if there is no other dependee for IMUL. */
24413 FOR_EACH_DEP (con
, SD_LIST_BACK
, sd_it1
, dep1
)
24416 pro
= DEP_PRO (dep1
);
24417 if (!NONDEBUG_INSN_P (pro
))
24430 return issue_rate
; /* Didn't find IMUL producer. */
24432 if (sched_verbose
> 1)
24433 fprintf(dump
, ";;\tatom sched_reorder: swap %d and %d insns\n",
24434 INSN_UID (ready
[index
]), INSN_UID (ready
[n_ready
- 1]));
24436 /* Put IMUL producer (ready[index]) at the top of ready list. */
24437 insn1
= ready
[index
];
24438 for (i
= index
; i
< n_ready
- 1; i
++)
24439 ready
[i
] = ready
[i
+ 1];
24440 ready
[n_ready
- 1] = insn1
;
24446 ix86_class_likely_spilled_p (reg_class_t
);
24448 /* Returns true if lhs of insn is HW function argument register and set up
24449 is_spilled to true if it is likely spilled HW register. */
24451 insn_is_function_arg (rtx insn
, bool* is_spilled
)
24455 if (!NONDEBUG_INSN_P (insn
))
24457 /* Call instructions are not movable, ignore it. */
24460 insn
= PATTERN (insn
);
24461 if (GET_CODE (insn
) == PARALLEL
)
24462 insn
= XVECEXP (insn
, 0, 0);
24463 if (GET_CODE (insn
) != SET
)
24465 dst
= SET_DEST (insn
);
24466 if (REG_P (dst
) && HARD_REGISTER_P (dst
)
24467 && ix86_function_arg_regno_p (REGNO (dst
)))
24469 /* Is it likely spilled HW register? */
24470 if (!TEST_HARD_REG_BIT (fixed_reg_set
, REGNO (dst
))
24471 && ix86_class_likely_spilled_p (REGNO_REG_CLASS (REGNO (dst
))))
24472 *is_spilled
= true;
24478 /* Add output dependencies for chain of function adjacent arguments if only
24479 there is a move to likely spilled HW register. Return first argument
24480 if at least one dependence was added or NULL otherwise. */
24482 add_parameter_dependencies (rtx call
, rtx head
)
24486 rtx first_arg
= NULL
;
24487 bool is_spilled
= false;
24489 head
= PREV_INSN (head
);
24491 /* Find nearest to call argument passing instruction. */
24494 last
= PREV_INSN (last
);
24497 if (!NONDEBUG_INSN_P (last
))
24499 if (insn_is_function_arg (last
, &is_spilled
))
24507 insn
= PREV_INSN (last
);
24508 if (!INSN_P (insn
))
24512 if (!NONDEBUG_INSN_P (insn
))
24517 if (insn_is_function_arg (insn
, &is_spilled
))
24519 /* Add output depdendence between two function arguments if chain
24520 of output arguments contains likely spilled HW registers. */
24522 add_dependence (last
, insn
, REG_DEP_OUTPUT
);
24523 first_arg
= last
= insn
;
24533 /* Add output or anti dependency from insn to first_arg to restrict its code
24536 avoid_func_arg_motion (rtx first_arg
, rtx insn
)
24541 set
= single_set (insn
);
24544 tmp
= SET_DEST (set
);
24547 /* Add output dependency to the first function argument. */
24548 add_dependence (first_arg
, insn
, REG_DEP_OUTPUT
);
24551 /* Add anti dependency. */
24552 add_dependence (first_arg
, insn
, REG_DEP_ANTI
);
24555 /* Avoid cross block motion of function argument through adding dependency
24556 from the first non-jump instruction in bb. */
24558 add_dependee_for_func_arg (rtx arg
, basic_block bb
)
24560 rtx insn
= BB_END (bb
);
24564 if (NONDEBUG_INSN_P (insn
) && NONJUMP_INSN_P (insn
))
24566 rtx set
= single_set (insn
);
24569 avoid_func_arg_motion (arg
, insn
);
24573 if (insn
== BB_HEAD (bb
))
24575 insn
= PREV_INSN (insn
);
24579 /* Hook for pre-reload schedule - avoid motion of function arguments
24580 passed in likely spilled HW registers. */
24582 ix86_dependencies_evaluation_hook (rtx head
, rtx tail
)
24585 rtx first_arg
= NULL
;
24586 if (reload_completed
)
24588 while (head
!= tail
&& DEBUG_INSN_P (head
))
24589 head
= NEXT_INSN (head
);
24590 for (insn
= tail
; insn
!= head
; insn
= PREV_INSN (insn
))
24591 if (INSN_P (insn
) && CALL_P (insn
))
24593 first_arg
= add_parameter_dependencies (insn
, head
);
24596 /* Add dependee for first argument to predecessors if only
24597 region contains more than one block. */
24598 basic_block bb
= BLOCK_FOR_INSN (insn
);
24599 int rgn
= CONTAINING_RGN (bb
->index
);
24600 int nr_blks
= RGN_NR_BLOCKS (rgn
);
24601 /* Skip trivial regions and region head blocks that can have
24602 predecessors outside of region. */
24603 if (nr_blks
> 1 && BLOCK_TO_BB (bb
->index
) != 0)
24607 /* Assume that region is SCC, i.e. all immediate predecessors
24608 of non-head block are in the same region. */
24609 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
24611 /* Avoid creating of loop-carried dependencies through
24612 using topological odering in region. */
24613 if (BLOCK_TO_BB (bb
->index
) > BLOCK_TO_BB (e
->src
->index
))
24614 add_dependee_for_func_arg (first_arg
, e
->src
);
24622 else if (first_arg
)
24623 avoid_func_arg_motion (first_arg
, insn
);
24626 /* Hook for pre-reload schedule - set priority of moves from likely spilled
24627 HW registers to maximum, to schedule them at soon as possible. These are
24628 moves from function argument registers at the top of the function entry
24629 and moves from function return value registers after call. */
24631 ix86_adjust_priority (rtx insn
, int priority
)
24635 if (reload_completed
)
24638 if (!NONDEBUG_INSN_P (insn
))
24641 set
= single_set (insn
);
24644 rtx tmp
= SET_SRC (set
);
24646 && HARD_REGISTER_P (tmp
)
24647 && !TEST_HARD_REG_BIT (fixed_reg_set
, REGNO (tmp
))
24648 && ix86_class_likely_spilled_p (REGNO_REG_CLASS (REGNO (tmp
))))
24649 return current_sched_info
->sched_max_insns_priority
;
24655 /* Model decoder of Core 2/i7.
24656 Below hooks for multipass scheduling (see haifa-sched.c:max_issue)
24657 track the instruction fetch block boundaries and make sure that long
24658 (9+ bytes) instructions are assigned to D0. */
24660 /* Maximum length of an insn that can be handled by
24661 a secondary decoder unit. '8' for Core 2/i7. */
24662 static int core2i7_secondary_decoder_max_insn_size
;
24664 /* Ifetch block size, i.e., number of bytes decoder reads per cycle.
24665 '16' for Core 2/i7. */
24666 static int core2i7_ifetch_block_size
;
24668 /* Maximum number of instructions decoder can handle per cycle.
24669 '6' for Core 2/i7. */
24670 static int core2i7_ifetch_block_max_insns
;
24672 typedef struct ix86_first_cycle_multipass_data_
*
24673 ix86_first_cycle_multipass_data_t
;
24674 typedef const struct ix86_first_cycle_multipass_data_
*
24675 const_ix86_first_cycle_multipass_data_t
;
24677 /* A variable to store target state across calls to max_issue within
24679 static struct ix86_first_cycle_multipass_data_ _ix86_first_cycle_multipass_data
,
24680 *ix86_first_cycle_multipass_data
= &_ix86_first_cycle_multipass_data
;
24682 /* Initialize DATA. */
24684 core2i7_first_cycle_multipass_init (void *_data
)
24686 ix86_first_cycle_multipass_data_t data
24687 = (ix86_first_cycle_multipass_data_t
) _data
;
24689 data
->ifetch_block_len
= 0;
24690 data
->ifetch_block_n_insns
= 0;
24691 data
->ready_try_change
= NULL
;
24692 data
->ready_try_change_size
= 0;
24695 /* Advancing the cycle; reset ifetch block counts. */
24697 core2i7_dfa_post_advance_cycle (void)
24699 ix86_first_cycle_multipass_data_t data
= ix86_first_cycle_multipass_data
;
24701 gcc_assert (data
->ifetch_block_n_insns
<= core2i7_ifetch_block_max_insns
);
24703 data
->ifetch_block_len
= 0;
24704 data
->ifetch_block_n_insns
= 0;
24707 static int min_insn_size (rtx
);
24709 /* Filter out insns from ready_try that the core will not be able to issue
24710 on current cycle due to decoder. */
24712 core2i7_first_cycle_multipass_filter_ready_try
24713 (const_ix86_first_cycle_multipass_data_t data
,
24714 char *ready_try
, int n_ready
, bool first_cycle_insn_p
)
24721 if (ready_try
[n_ready
])
24724 insn
= get_ready_element (n_ready
);
24725 insn_size
= min_insn_size (insn
);
24727 if (/* If this is a too long an insn for a secondary decoder ... */
24728 (!first_cycle_insn_p
24729 && insn_size
> core2i7_secondary_decoder_max_insn_size
)
24730 /* ... or it would not fit into the ifetch block ... */
24731 || data
->ifetch_block_len
+ insn_size
> core2i7_ifetch_block_size
24732 /* ... or the decoder is full already ... */
24733 || data
->ifetch_block_n_insns
+ 1 > core2i7_ifetch_block_max_insns
)
24734 /* ... mask the insn out. */
24736 ready_try
[n_ready
] = 1;
24738 if (data
->ready_try_change
)
24739 bitmap_set_bit (data
->ready_try_change
, n_ready
);
24744 /* Prepare for a new round of multipass lookahead scheduling. */
24746 core2i7_first_cycle_multipass_begin (void *_data
, char *ready_try
, int n_ready
,
24747 bool first_cycle_insn_p
)
24749 ix86_first_cycle_multipass_data_t data
24750 = (ix86_first_cycle_multipass_data_t
) _data
;
24751 const_ix86_first_cycle_multipass_data_t prev_data
24752 = ix86_first_cycle_multipass_data
;
24754 /* Restore the state from the end of the previous round. */
24755 data
->ifetch_block_len
= prev_data
->ifetch_block_len
;
24756 data
->ifetch_block_n_insns
= prev_data
->ifetch_block_n_insns
;
24758 /* Filter instructions that cannot be issued on current cycle due to
24759 decoder restrictions. */
24760 core2i7_first_cycle_multipass_filter_ready_try (data
, ready_try
, n_ready
,
24761 first_cycle_insn_p
);
24764 /* INSN is being issued in current solution. Account for its impact on
24765 the decoder model. */
24767 core2i7_first_cycle_multipass_issue (void *_data
, char *ready_try
, int n_ready
,
24768 rtx insn
, const void *_prev_data
)
24770 ix86_first_cycle_multipass_data_t data
24771 = (ix86_first_cycle_multipass_data_t
) _data
;
24772 const_ix86_first_cycle_multipass_data_t prev_data
24773 = (const_ix86_first_cycle_multipass_data_t
) _prev_data
;
24775 int insn_size
= min_insn_size (insn
);
24777 data
->ifetch_block_len
= prev_data
->ifetch_block_len
+ insn_size
;
24778 data
->ifetch_block_n_insns
= prev_data
->ifetch_block_n_insns
+ 1;
24779 gcc_assert (data
->ifetch_block_len
<= core2i7_ifetch_block_size
24780 && data
->ifetch_block_n_insns
<= core2i7_ifetch_block_max_insns
);
24782 /* Allocate or resize the bitmap for storing INSN's effect on ready_try. */
24783 if (!data
->ready_try_change
)
24785 data
->ready_try_change
= sbitmap_alloc (n_ready
);
24786 data
->ready_try_change_size
= n_ready
;
24788 else if (data
->ready_try_change_size
< n_ready
)
24790 data
->ready_try_change
= sbitmap_resize (data
->ready_try_change
,
24792 data
->ready_try_change_size
= n_ready
;
24794 bitmap_clear (data
->ready_try_change
);
24796 /* Filter out insns from ready_try that the core will not be able to issue
24797 on current cycle due to decoder. */
24798 core2i7_first_cycle_multipass_filter_ready_try (data
, ready_try
, n_ready
,
24802 /* Revert the effect on ready_try. */
24804 core2i7_first_cycle_multipass_backtrack (const void *_data
,
24806 int n_ready ATTRIBUTE_UNUSED
)
24808 const_ix86_first_cycle_multipass_data_t data
24809 = (const_ix86_first_cycle_multipass_data_t
) _data
;
24810 unsigned int i
= 0;
24811 sbitmap_iterator sbi
;
24813 gcc_assert (bitmap_last_set_bit (data
->ready_try_change
) < n_ready
);
24814 EXECUTE_IF_SET_IN_BITMAP (data
->ready_try_change
, 0, i
, sbi
)
24820 /* Save the result of multipass lookahead scheduling for the next round. */
24822 core2i7_first_cycle_multipass_end (const void *_data
)
24824 const_ix86_first_cycle_multipass_data_t data
24825 = (const_ix86_first_cycle_multipass_data_t
) _data
;
24826 ix86_first_cycle_multipass_data_t next_data
24827 = ix86_first_cycle_multipass_data
;
24831 next_data
->ifetch_block_len
= data
->ifetch_block_len
;
24832 next_data
->ifetch_block_n_insns
= data
->ifetch_block_n_insns
;
24836 /* Deallocate target data. */
24838 core2i7_first_cycle_multipass_fini (void *_data
)
24840 ix86_first_cycle_multipass_data_t data
24841 = (ix86_first_cycle_multipass_data_t
) _data
;
24843 if (data
->ready_try_change
)
24845 sbitmap_free (data
->ready_try_change
);
24846 data
->ready_try_change
= NULL
;
24847 data
->ready_try_change_size
= 0;
24851 /* Prepare for scheduling pass. */
24853 ix86_sched_init_global (FILE *dump ATTRIBUTE_UNUSED
,
24854 int verbose ATTRIBUTE_UNUSED
,
24855 int max_uid ATTRIBUTE_UNUSED
)
24857 /* Install scheduling hooks for current CPU. Some of these hooks are used
24858 in time-critical parts of the scheduler, so we only set them up when
24859 they are actually used. */
24862 case PROCESSOR_CORE2
:
24863 case PROCESSOR_COREI7
:
24864 /* Do not perform multipass scheduling for pre-reload schedule
24865 to save compile time. */
24866 if (reload_completed
)
24868 targetm
.sched
.dfa_post_advance_cycle
24869 = core2i7_dfa_post_advance_cycle
;
24870 targetm
.sched
.first_cycle_multipass_init
24871 = core2i7_first_cycle_multipass_init
;
24872 targetm
.sched
.first_cycle_multipass_begin
24873 = core2i7_first_cycle_multipass_begin
;
24874 targetm
.sched
.first_cycle_multipass_issue
24875 = core2i7_first_cycle_multipass_issue
;
24876 targetm
.sched
.first_cycle_multipass_backtrack
24877 = core2i7_first_cycle_multipass_backtrack
;
24878 targetm
.sched
.first_cycle_multipass_end
24879 = core2i7_first_cycle_multipass_end
;
24880 targetm
.sched
.first_cycle_multipass_fini
24881 = core2i7_first_cycle_multipass_fini
;
24883 /* Set decoder parameters. */
24884 core2i7_secondary_decoder_max_insn_size
= 8;
24885 core2i7_ifetch_block_size
= 16;
24886 core2i7_ifetch_block_max_insns
= 6;
24889 /* ... Fall through ... */
24891 targetm
.sched
.dfa_post_advance_cycle
= NULL
;
24892 targetm
.sched
.first_cycle_multipass_init
= NULL
;
24893 targetm
.sched
.first_cycle_multipass_begin
= NULL
;
24894 targetm
.sched
.first_cycle_multipass_issue
= NULL
;
24895 targetm
.sched
.first_cycle_multipass_backtrack
= NULL
;
24896 targetm
.sched
.first_cycle_multipass_end
= NULL
;
24897 targetm
.sched
.first_cycle_multipass_fini
= NULL
;
24903 /* Compute the alignment given to a constant that is being placed in memory.
24904 EXP is the constant and ALIGN is the alignment that the object would
24906 The value of this function is used instead of that alignment to align
24910 ix86_constant_alignment (tree exp
, int align
)
24912 if (TREE_CODE (exp
) == REAL_CST
|| TREE_CODE (exp
) == VECTOR_CST
24913 || TREE_CODE (exp
) == INTEGER_CST
)
24915 if (TYPE_MODE (TREE_TYPE (exp
)) == DFmode
&& align
< 64)
24917 else if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (exp
))) && align
< 128)
24920 else if (!optimize_size
&& TREE_CODE (exp
) == STRING_CST
24921 && TREE_STRING_LENGTH (exp
) >= 31 && align
< BITS_PER_WORD
)
24922 return BITS_PER_WORD
;
24927 /* Compute the alignment for a static variable.
24928 TYPE is the data type, and ALIGN is the alignment that
24929 the object would ordinarily have. The value of this function is used
24930 instead of that alignment to align the object. */
24933 ix86_data_alignment (tree type
, int align
)
24935 int max_align
= optimize_size
? BITS_PER_WORD
: MIN (256, MAX_OFILE_ALIGNMENT
);
24937 if (AGGREGATE_TYPE_P (type
)
24938 && TYPE_SIZE (type
)
24939 && TREE_CODE (TYPE_SIZE (type
)) == INTEGER_CST
24940 && (TREE_INT_CST_LOW (TYPE_SIZE (type
)) >= (unsigned) max_align
24941 || TREE_INT_CST_HIGH (TYPE_SIZE (type
)))
24942 && align
< max_align
)
24945 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
24946 to 16byte boundary. */
24949 if (AGGREGATE_TYPE_P (type
)
24950 && TYPE_SIZE (type
)
24951 && TREE_CODE (TYPE_SIZE (type
)) == INTEGER_CST
24952 && (TREE_INT_CST_LOW (TYPE_SIZE (type
)) >= 128
24953 || TREE_INT_CST_HIGH (TYPE_SIZE (type
))) && align
< 128)
24957 if (TREE_CODE (type
) == ARRAY_TYPE
)
24959 if (TYPE_MODE (TREE_TYPE (type
)) == DFmode
&& align
< 64)
24961 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type
))) && align
< 128)
24964 else if (TREE_CODE (type
) == COMPLEX_TYPE
)
24967 if (TYPE_MODE (type
) == DCmode
&& align
< 64)
24969 if ((TYPE_MODE (type
) == XCmode
24970 || TYPE_MODE (type
) == TCmode
) && align
< 128)
24973 else if ((TREE_CODE (type
) == RECORD_TYPE
24974 || TREE_CODE (type
) == UNION_TYPE
24975 || TREE_CODE (type
) == QUAL_UNION_TYPE
)
24976 && TYPE_FIELDS (type
))
24978 if (DECL_MODE (TYPE_FIELDS (type
)) == DFmode
&& align
< 64)
24980 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type
))) && align
< 128)
24983 else if (TREE_CODE (type
) == REAL_TYPE
|| TREE_CODE (type
) == VECTOR_TYPE
24984 || TREE_CODE (type
) == INTEGER_TYPE
)
24986 if (TYPE_MODE (type
) == DFmode
&& align
< 64)
24988 if (ALIGN_MODE_128 (TYPE_MODE (type
)) && align
< 128)
24995 /* Compute the alignment for a local variable or a stack slot. EXP is
24996 the data type or decl itself, MODE is the widest mode available and
24997 ALIGN is the alignment that the object would ordinarily have. The
24998 value of this macro is used instead of that alignment to align the
25002 ix86_local_alignment (tree exp
, enum machine_mode mode
,
25003 unsigned int align
)
25007 if (exp
&& DECL_P (exp
))
25009 type
= TREE_TYPE (exp
);
25018 /* Don't do dynamic stack realignment for long long objects with
25019 -mpreferred-stack-boundary=2. */
25022 && ix86_preferred_stack_boundary
< 64
25023 && (mode
== DImode
|| (type
&& TYPE_MODE (type
) == DImode
))
25024 && (!type
|| !TYPE_USER_ALIGN (type
))
25025 && (!decl
|| !DECL_USER_ALIGN (decl
)))
25028 /* If TYPE is NULL, we are allocating a stack slot for caller-save
25029 register in MODE. We will return the largest alignment of XF
25033 if (mode
== XFmode
&& align
< GET_MODE_ALIGNMENT (DFmode
))
25034 align
= GET_MODE_ALIGNMENT (DFmode
);
25038 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
25039 to 16byte boundary. Exact wording is:
25041 An array uses the same alignment as its elements, except that a local or
25042 global array variable of length at least 16 bytes or
25043 a C99 variable-length array variable always has alignment of at least 16 bytes.
25045 This was added to allow use of aligned SSE instructions at arrays. This
25046 rule is meant for static storage (where compiler can not do the analysis
25047 by itself). We follow it for automatic variables only when convenient.
25048 We fully control everything in the function compiled and functions from
25049 other unit can not rely on the alignment.
25051 Exclude va_list type. It is the common case of local array where
25052 we can not benefit from the alignment. */
25053 if (TARGET_64BIT
&& optimize_function_for_speed_p (cfun
)
25056 if (AGGREGATE_TYPE_P (type
)
25057 && (va_list_type_node
== NULL_TREE
25058 || (TYPE_MAIN_VARIANT (type
)
25059 != TYPE_MAIN_VARIANT (va_list_type_node
)))
25060 && TYPE_SIZE (type
)
25061 && TREE_CODE (TYPE_SIZE (type
)) == INTEGER_CST
25062 && (TREE_INT_CST_LOW (TYPE_SIZE (type
)) >= 16
25063 || TREE_INT_CST_HIGH (TYPE_SIZE (type
))) && align
< 128)
25066 if (TREE_CODE (type
) == ARRAY_TYPE
)
25068 if (TYPE_MODE (TREE_TYPE (type
)) == DFmode
&& align
< 64)
25070 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type
))) && align
< 128)
25073 else if (TREE_CODE (type
) == COMPLEX_TYPE
)
25075 if (TYPE_MODE (type
) == DCmode
&& align
< 64)
25077 if ((TYPE_MODE (type
) == XCmode
25078 || TYPE_MODE (type
) == TCmode
) && align
< 128)
25081 else if ((TREE_CODE (type
) == RECORD_TYPE
25082 || TREE_CODE (type
) == UNION_TYPE
25083 || TREE_CODE (type
) == QUAL_UNION_TYPE
)
25084 && TYPE_FIELDS (type
))
25086 if (DECL_MODE (TYPE_FIELDS (type
)) == DFmode
&& align
< 64)
25088 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type
))) && align
< 128)
25091 else if (TREE_CODE (type
) == REAL_TYPE
|| TREE_CODE (type
) == VECTOR_TYPE
25092 || TREE_CODE (type
) == INTEGER_TYPE
)
25095 if (TYPE_MODE (type
) == DFmode
&& align
< 64)
25097 if (ALIGN_MODE_128 (TYPE_MODE (type
)) && align
< 128)
25103 /* Compute the minimum required alignment for dynamic stack realignment
25104 purposes for a local variable, parameter or a stack slot. EXP is
25105 the data type or decl itself, MODE is its mode and ALIGN is the
25106 alignment that the object would ordinarily have. */
25109 ix86_minimum_alignment (tree exp
, enum machine_mode mode
,
25110 unsigned int align
)
25114 if (exp
&& DECL_P (exp
))
25116 type
= TREE_TYPE (exp
);
25125 if (TARGET_64BIT
|| align
!= 64 || ix86_preferred_stack_boundary
>= 64)
25128 /* Don't do dynamic stack realignment for long long objects with
25129 -mpreferred-stack-boundary=2. */
25130 if ((mode
== DImode
|| (type
&& TYPE_MODE (type
) == DImode
))
25131 && (!type
|| !TYPE_USER_ALIGN (type
))
25132 && (!decl
|| !DECL_USER_ALIGN (decl
)))
25138 /* Find a location for the static chain incoming to a nested function.
25139 This is a register, unless all free registers are used by arguments. */
25142 ix86_static_chain (const_tree fndecl
, bool incoming_p
)
25146 if (!DECL_STATIC_CHAIN (fndecl
))
25151 /* We always use R10 in 64-bit mode. */
25159 /* By default in 32-bit mode we use ECX to pass the static chain. */
25162 fntype
= TREE_TYPE (fndecl
);
25163 ccvt
= ix86_get_callcvt (fntype
);
25164 if ((ccvt
& IX86_CALLCVT_FASTCALL
) != 0)
25166 /* Fastcall functions use ecx/edx for arguments, which leaves
25167 us with EAX for the static chain.
25168 Thiscall functions use ecx for arguments, which also
25169 leaves us with EAX for the static chain. */
25172 else if ((ccvt
& IX86_CALLCVT_THISCALL
) != 0)
25174 /* Thiscall functions use ecx for arguments, which leaves
25175 us with EAX and EDX for the static chain.
25176 We are using for abi-compatibility EAX. */
25179 else if (ix86_function_regparm (fntype
, fndecl
) == 3)
25181 /* For regparm 3, we have no free call-clobbered registers in
25182 which to store the static chain. In order to implement this,
25183 we have the trampoline push the static chain to the stack.
25184 However, we can't push a value below the return address when
25185 we call the nested function directly, so we have to use an
25186 alternate entry point. For this we use ESI, and have the
25187 alternate entry point push ESI, so that things appear the
25188 same once we're executing the nested function. */
25191 if (fndecl
== current_function_decl
)
25192 ix86_static_chain_on_stack
= true;
25193 return gen_frame_mem (SImode
,
25194 plus_constant (Pmode
,
25195 arg_pointer_rtx
, -8));
25201 return gen_rtx_REG (Pmode
, regno
);
25204 /* Emit RTL insns to initialize the variable parts of a trampoline.
25205 FNDECL is the decl of the target address; M_TRAMP is a MEM for
25206 the trampoline, and CHAIN_VALUE is an RTX for the static chain
25207 to be passed to the target function. */
25210 ix86_trampoline_init (rtx m_tramp
, tree fndecl
, rtx chain_value
)
25216 fnaddr
= XEXP (DECL_RTL (fndecl
), 0);
25222 /* Load the function address to r11. Try to load address using
25223 the shorter movl instead of movabs. We may want to support
25224 movq for kernel mode, but kernel does not use trampolines at
25225 the moment. FNADDR is a 32bit address and may not be in
25226 DImode when ptr_mode == SImode. Always use movl in this
25228 if (ptr_mode
== SImode
25229 || x86_64_zext_immediate_operand (fnaddr
, VOIDmode
))
25231 fnaddr
= copy_addr_to_reg (fnaddr
);
25233 mem
= adjust_address (m_tramp
, HImode
, offset
);
25234 emit_move_insn (mem
, gen_int_mode (0xbb41, HImode
));
25236 mem
= adjust_address (m_tramp
, SImode
, offset
+ 2);
25237 emit_move_insn (mem
, gen_lowpart (SImode
, fnaddr
));
25242 mem
= adjust_address (m_tramp
, HImode
, offset
);
25243 emit_move_insn (mem
, gen_int_mode (0xbb49, HImode
));
25245 mem
= adjust_address (m_tramp
, DImode
, offset
+ 2);
25246 emit_move_insn (mem
, fnaddr
);
25250 /* Load static chain using movabs to r10. Use the shorter movl
25251 instead of movabs when ptr_mode == SImode. */
25252 if (ptr_mode
== SImode
)
25263 mem
= adjust_address (m_tramp
, HImode
, offset
);
25264 emit_move_insn (mem
, gen_int_mode (opcode
, HImode
));
25266 mem
= adjust_address (m_tramp
, ptr_mode
, offset
+ 2);
25267 emit_move_insn (mem
, chain_value
);
25270 /* Jump to r11; the last (unused) byte is a nop, only there to
25271 pad the write out to a single 32-bit store. */
25272 mem
= adjust_address (m_tramp
, SImode
, offset
);
25273 emit_move_insn (mem
, gen_int_mode (0x90e3ff49, SImode
));
25280 /* Depending on the static chain location, either load a register
25281 with a constant, or push the constant to the stack. All of the
25282 instructions are the same size. */
25283 chain
= ix86_static_chain (fndecl
, true);
25286 switch (REGNO (chain
))
25289 opcode
= 0xb8; break;
25291 opcode
= 0xb9; break;
25293 gcc_unreachable ();
25299 mem
= adjust_address (m_tramp
, QImode
, offset
);
25300 emit_move_insn (mem
, gen_int_mode (opcode
, QImode
));
25302 mem
= adjust_address (m_tramp
, SImode
, offset
+ 1);
25303 emit_move_insn (mem
, chain_value
);
25306 mem
= adjust_address (m_tramp
, QImode
, offset
);
25307 emit_move_insn (mem
, gen_int_mode (0xe9, QImode
));
25309 mem
= adjust_address (m_tramp
, SImode
, offset
+ 1);
25311 /* Compute offset from the end of the jmp to the target function.
25312 In the case in which the trampoline stores the static chain on
25313 the stack, we need to skip the first insn which pushes the
25314 (call-saved) register static chain; this push is 1 byte. */
25316 disp
= expand_binop (SImode
, sub_optab
, fnaddr
,
25317 plus_constant (Pmode
, XEXP (m_tramp
, 0),
25318 offset
- (MEM_P (chain
) ? 1 : 0)),
25319 NULL_RTX
, 1, OPTAB_DIRECT
);
25320 emit_move_insn (mem
, disp
);
25323 gcc_assert (offset
<= TRAMPOLINE_SIZE
);
25325 #ifdef HAVE_ENABLE_EXECUTE_STACK
25326 #ifdef CHECK_EXECUTE_STACK_ENABLED
25327 if (CHECK_EXECUTE_STACK_ENABLED
)
25329 emit_library_call (gen_rtx_SYMBOL_REF (Pmode
, "__enable_execute_stack"),
25330 LCT_NORMAL
, VOIDmode
, 1, XEXP (m_tramp
, 0), Pmode
);
25334 /* The following file contains several enumerations and data structures
25335 built from the definitions in i386-builtin-types.def. */
25337 #include "i386-builtin-types.inc"
25339 /* Table for the ix86 builtin non-function types. */
25340 static GTY(()) tree ix86_builtin_type_tab
[(int) IX86_BT_LAST_CPTR
+ 1];
25342 /* Retrieve an element from the above table, building some of
25343 the types lazily. */
25346 ix86_get_builtin_type (enum ix86_builtin_type tcode
)
25348 unsigned int index
;
25351 gcc_assert ((unsigned)tcode
< ARRAY_SIZE(ix86_builtin_type_tab
));
25353 type
= ix86_builtin_type_tab
[(int) tcode
];
25357 gcc_assert (tcode
> IX86_BT_LAST_PRIM
);
25358 if (tcode
<= IX86_BT_LAST_VECT
)
25360 enum machine_mode mode
;
25362 index
= tcode
- IX86_BT_LAST_PRIM
- 1;
25363 itype
= ix86_get_builtin_type (ix86_builtin_type_vect_base
[index
]);
25364 mode
= ix86_builtin_type_vect_mode
[index
];
25366 type
= build_vector_type_for_mode (itype
, mode
);
25372 index
= tcode
- IX86_BT_LAST_VECT
- 1;
25373 if (tcode
<= IX86_BT_LAST_PTR
)
25374 quals
= TYPE_UNQUALIFIED
;
25376 quals
= TYPE_QUAL_CONST
;
25378 itype
= ix86_get_builtin_type (ix86_builtin_type_ptr_base
[index
]);
25379 if (quals
!= TYPE_UNQUALIFIED
)
25380 itype
= build_qualified_type (itype
, quals
);
25382 type
= build_pointer_type (itype
);
25385 ix86_builtin_type_tab
[(int) tcode
] = type
;
25389 /* Table for the ix86 builtin function types. */
25390 static GTY(()) tree ix86_builtin_func_type_tab
[(int) IX86_BT_LAST_ALIAS
+ 1];
25392 /* Retrieve an element from the above table, building some of
25393 the types lazily. */
25396 ix86_get_builtin_func_type (enum ix86_builtin_func_type tcode
)
25400 gcc_assert ((unsigned)tcode
< ARRAY_SIZE (ix86_builtin_func_type_tab
));
25402 type
= ix86_builtin_func_type_tab
[(int) tcode
];
25406 if (tcode
<= IX86_BT_LAST_FUNC
)
25408 unsigned start
= ix86_builtin_func_start
[(int) tcode
];
25409 unsigned after
= ix86_builtin_func_start
[(int) tcode
+ 1];
25410 tree rtype
, atype
, args
= void_list_node
;
25413 rtype
= ix86_get_builtin_type (ix86_builtin_func_args
[start
]);
25414 for (i
= after
- 1; i
> start
; --i
)
25416 atype
= ix86_get_builtin_type (ix86_builtin_func_args
[i
]);
25417 args
= tree_cons (NULL
, atype
, args
);
25420 type
= build_function_type (rtype
, args
);
25424 unsigned index
= tcode
- IX86_BT_LAST_FUNC
- 1;
25425 enum ix86_builtin_func_type icode
;
25427 icode
= ix86_builtin_func_alias_base
[index
];
25428 type
= ix86_get_builtin_func_type (icode
);
25431 ix86_builtin_func_type_tab
[(int) tcode
] = type
;
25436 /* Codes for all the SSE/MMX builtins. */
25439 IX86_BUILTIN_ADDPS
,
25440 IX86_BUILTIN_ADDSS
,
25441 IX86_BUILTIN_DIVPS
,
25442 IX86_BUILTIN_DIVSS
,
25443 IX86_BUILTIN_MULPS
,
25444 IX86_BUILTIN_MULSS
,
25445 IX86_BUILTIN_SUBPS
,
25446 IX86_BUILTIN_SUBSS
,
25448 IX86_BUILTIN_CMPEQPS
,
25449 IX86_BUILTIN_CMPLTPS
,
25450 IX86_BUILTIN_CMPLEPS
,
25451 IX86_BUILTIN_CMPGTPS
,
25452 IX86_BUILTIN_CMPGEPS
,
25453 IX86_BUILTIN_CMPNEQPS
,
25454 IX86_BUILTIN_CMPNLTPS
,
25455 IX86_BUILTIN_CMPNLEPS
,
25456 IX86_BUILTIN_CMPNGTPS
,
25457 IX86_BUILTIN_CMPNGEPS
,
25458 IX86_BUILTIN_CMPORDPS
,
25459 IX86_BUILTIN_CMPUNORDPS
,
25460 IX86_BUILTIN_CMPEQSS
,
25461 IX86_BUILTIN_CMPLTSS
,
25462 IX86_BUILTIN_CMPLESS
,
25463 IX86_BUILTIN_CMPNEQSS
,
25464 IX86_BUILTIN_CMPNLTSS
,
25465 IX86_BUILTIN_CMPNLESS
,
25466 IX86_BUILTIN_CMPNGTSS
,
25467 IX86_BUILTIN_CMPNGESS
,
25468 IX86_BUILTIN_CMPORDSS
,
25469 IX86_BUILTIN_CMPUNORDSS
,
25471 IX86_BUILTIN_COMIEQSS
,
25472 IX86_BUILTIN_COMILTSS
,
25473 IX86_BUILTIN_COMILESS
,
25474 IX86_BUILTIN_COMIGTSS
,
25475 IX86_BUILTIN_COMIGESS
,
25476 IX86_BUILTIN_COMINEQSS
,
25477 IX86_BUILTIN_UCOMIEQSS
,
25478 IX86_BUILTIN_UCOMILTSS
,
25479 IX86_BUILTIN_UCOMILESS
,
25480 IX86_BUILTIN_UCOMIGTSS
,
25481 IX86_BUILTIN_UCOMIGESS
,
25482 IX86_BUILTIN_UCOMINEQSS
,
25484 IX86_BUILTIN_CVTPI2PS
,
25485 IX86_BUILTIN_CVTPS2PI
,
25486 IX86_BUILTIN_CVTSI2SS
,
25487 IX86_BUILTIN_CVTSI642SS
,
25488 IX86_BUILTIN_CVTSS2SI
,
25489 IX86_BUILTIN_CVTSS2SI64
,
25490 IX86_BUILTIN_CVTTPS2PI
,
25491 IX86_BUILTIN_CVTTSS2SI
,
25492 IX86_BUILTIN_CVTTSS2SI64
,
25494 IX86_BUILTIN_MAXPS
,
25495 IX86_BUILTIN_MAXSS
,
25496 IX86_BUILTIN_MINPS
,
25497 IX86_BUILTIN_MINSS
,
25499 IX86_BUILTIN_LOADUPS
,
25500 IX86_BUILTIN_STOREUPS
,
25501 IX86_BUILTIN_MOVSS
,
25503 IX86_BUILTIN_MOVHLPS
,
25504 IX86_BUILTIN_MOVLHPS
,
25505 IX86_BUILTIN_LOADHPS
,
25506 IX86_BUILTIN_LOADLPS
,
25507 IX86_BUILTIN_STOREHPS
,
25508 IX86_BUILTIN_STORELPS
,
25510 IX86_BUILTIN_MASKMOVQ
,
25511 IX86_BUILTIN_MOVMSKPS
,
25512 IX86_BUILTIN_PMOVMSKB
,
25514 IX86_BUILTIN_MOVNTPS
,
25515 IX86_BUILTIN_MOVNTQ
,
25517 IX86_BUILTIN_LOADDQU
,
25518 IX86_BUILTIN_STOREDQU
,
25520 IX86_BUILTIN_PACKSSWB
,
25521 IX86_BUILTIN_PACKSSDW
,
25522 IX86_BUILTIN_PACKUSWB
,
25524 IX86_BUILTIN_PADDB
,
25525 IX86_BUILTIN_PADDW
,
25526 IX86_BUILTIN_PADDD
,
25527 IX86_BUILTIN_PADDQ
,
25528 IX86_BUILTIN_PADDSB
,
25529 IX86_BUILTIN_PADDSW
,
25530 IX86_BUILTIN_PADDUSB
,
25531 IX86_BUILTIN_PADDUSW
,
25532 IX86_BUILTIN_PSUBB
,
25533 IX86_BUILTIN_PSUBW
,
25534 IX86_BUILTIN_PSUBD
,
25535 IX86_BUILTIN_PSUBQ
,
25536 IX86_BUILTIN_PSUBSB
,
25537 IX86_BUILTIN_PSUBSW
,
25538 IX86_BUILTIN_PSUBUSB
,
25539 IX86_BUILTIN_PSUBUSW
,
25542 IX86_BUILTIN_PANDN
,
25546 IX86_BUILTIN_PAVGB
,
25547 IX86_BUILTIN_PAVGW
,
25549 IX86_BUILTIN_PCMPEQB
,
25550 IX86_BUILTIN_PCMPEQW
,
25551 IX86_BUILTIN_PCMPEQD
,
25552 IX86_BUILTIN_PCMPGTB
,
25553 IX86_BUILTIN_PCMPGTW
,
25554 IX86_BUILTIN_PCMPGTD
,
25556 IX86_BUILTIN_PMADDWD
,
25558 IX86_BUILTIN_PMAXSW
,
25559 IX86_BUILTIN_PMAXUB
,
25560 IX86_BUILTIN_PMINSW
,
25561 IX86_BUILTIN_PMINUB
,
25563 IX86_BUILTIN_PMULHUW
,
25564 IX86_BUILTIN_PMULHW
,
25565 IX86_BUILTIN_PMULLW
,
25567 IX86_BUILTIN_PSADBW
,
25568 IX86_BUILTIN_PSHUFW
,
25570 IX86_BUILTIN_PSLLW
,
25571 IX86_BUILTIN_PSLLD
,
25572 IX86_BUILTIN_PSLLQ
,
25573 IX86_BUILTIN_PSRAW
,
25574 IX86_BUILTIN_PSRAD
,
25575 IX86_BUILTIN_PSRLW
,
25576 IX86_BUILTIN_PSRLD
,
25577 IX86_BUILTIN_PSRLQ
,
25578 IX86_BUILTIN_PSLLWI
,
25579 IX86_BUILTIN_PSLLDI
,
25580 IX86_BUILTIN_PSLLQI
,
25581 IX86_BUILTIN_PSRAWI
,
25582 IX86_BUILTIN_PSRADI
,
25583 IX86_BUILTIN_PSRLWI
,
25584 IX86_BUILTIN_PSRLDI
,
25585 IX86_BUILTIN_PSRLQI
,
25587 IX86_BUILTIN_PUNPCKHBW
,
25588 IX86_BUILTIN_PUNPCKHWD
,
25589 IX86_BUILTIN_PUNPCKHDQ
,
25590 IX86_BUILTIN_PUNPCKLBW
,
25591 IX86_BUILTIN_PUNPCKLWD
,
25592 IX86_BUILTIN_PUNPCKLDQ
,
25594 IX86_BUILTIN_SHUFPS
,
25596 IX86_BUILTIN_RCPPS
,
25597 IX86_BUILTIN_RCPSS
,
25598 IX86_BUILTIN_RSQRTPS
,
25599 IX86_BUILTIN_RSQRTPS_NR
,
25600 IX86_BUILTIN_RSQRTSS
,
25601 IX86_BUILTIN_RSQRTF
,
25602 IX86_BUILTIN_SQRTPS
,
25603 IX86_BUILTIN_SQRTPS_NR
,
25604 IX86_BUILTIN_SQRTSS
,
25606 IX86_BUILTIN_UNPCKHPS
,
25607 IX86_BUILTIN_UNPCKLPS
,
25609 IX86_BUILTIN_ANDPS
,
25610 IX86_BUILTIN_ANDNPS
,
25612 IX86_BUILTIN_XORPS
,
25615 IX86_BUILTIN_LDMXCSR
,
25616 IX86_BUILTIN_STMXCSR
,
25617 IX86_BUILTIN_SFENCE
,
25619 IX86_BUILTIN_FXSAVE
,
25620 IX86_BUILTIN_FXRSTOR
,
25621 IX86_BUILTIN_FXSAVE64
,
25622 IX86_BUILTIN_FXRSTOR64
,
25624 IX86_BUILTIN_XSAVE
,
25625 IX86_BUILTIN_XRSTOR
,
25626 IX86_BUILTIN_XSAVE64
,
25627 IX86_BUILTIN_XRSTOR64
,
25629 IX86_BUILTIN_XSAVEOPT
,
25630 IX86_BUILTIN_XSAVEOPT64
,
25632 /* 3DNow! Original */
25633 IX86_BUILTIN_FEMMS
,
25634 IX86_BUILTIN_PAVGUSB
,
25635 IX86_BUILTIN_PF2ID
,
25636 IX86_BUILTIN_PFACC
,
25637 IX86_BUILTIN_PFADD
,
25638 IX86_BUILTIN_PFCMPEQ
,
25639 IX86_BUILTIN_PFCMPGE
,
25640 IX86_BUILTIN_PFCMPGT
,
25641 IX86_BUILTIN_PFMAX
,
25642 IX86_BUILTIN_PFMIN
,
25643 IX86_BUILTIN_PFMUL
,
25644 IX86_BUILTIN_PFRCP
,
25645 IX86_BUILTIN_PFRCPIT1
,
25646 IX86_BUILTIN_PFRCPIT2
,
25647 IX86_BUILTIN_PFRSQIT1
,
25648 IX86_BUILTIN_PFRSQRT
,
25649 IX86_BUILTIN_PFSUB
,
25650 IX86_BUILTIN_PFSUBR
,
25651 IX86_BUILTIN_PI2FD
,
25652 IX86_BUILTIN_PMULHRW
,
25654 /* 3DNow! Athlon Extensions */
25655 IX86_BUILTIN_PF2IW
,
25656 IX86_BUILTIN_PFNACC
,
25657 IX86_BUILTIN_PFPNACC
,
25658 IX86_BUILTIN_PI2FW
,
25659 IX86_BUILTIN_PSWAPDSI
,
25660 IX86_BUILTIN_PSWAPDSF
,
25663 IX86_BUILTIN_ADDPD
,
25664 IX86_BUILTIN_ADDSD
,
25665 IX86_BUILTIN_DIVPD
,
25666 IX86_BUILTIN_DIVSD
,
25667 IX86_BUILTIN_MULPD
,
25668 IX86_BUILTIN_MULSD
,
25669 IX86_BUILTIN_SUBPD
,
25670 IX86_BUILTIN_SUBSD
,
25672 IX86_BUILTIN_CMPEQPD
,
25673 IX86_BUILTIN_CMPLTPD
,
25674 IX86_BUILTIN_CMPLEPD
,
25675 IX86_BUILTIN_CMPGTPD
,
25676 IX86_BUILTIN_CMPGEPD
,
25677 IX86_BUILTIN_CMPNEQPD
,
25678 IX86_BUILTIN_CMPNLTPD
,
25679 IX86_BUILTIN_CMPNLEPD
,
25680 IX86_BUILTIN_CMPNGTPD
,
25681 IX86_BUILTIN_CMPNGEPD
,
25682 IX86_BUILTIN_CMPORDPD
,
25683 IX86_BUILTIN_CMPUNORDPD
,
25684 IX86_BUILTIN_CMPEQSD
,
25685 IX86_BUILTIN_CMPLTSD
,
25686 IX86_BUILTIN_CMPLESD
,
25687 IX86_BUILTIN_CMPNEQSD
,
25688 IX86_BUILTIN_CMPNLTSD
,
25689 IX86_BUILTIN_CMPNLESD
,
25690 IX86_BUILTIN_CMPORDSD
,
25691 IX86_BUILTIN_CMPUNORDSD
,
25693 IX86_BUILTIN_COMIEQSD
,
25694 IX86_BUILTIN_COMILTSD
,
25695 IX86_BUILTIN_COMILESD
,
25696 IX86_BUILTIN_COMIGTSD
,
25697 IX86_BUILTIN_COMIGESD
,
25698 IX86_BUILTIN_COMINEQSD
,
25699 IX86_BUILTIN_UCOMIEQSD
,
25700 IX86_BUILTIN_UCOMILTSD
,
25701 IX86_BUILTIN_UCOMILESD
,
25702 IX86_BUILTIN_UCOMIGTSD
,
25703 IX86_BUILTIN_UCOMIGESD
,
25704 IX86_BUILTIN_UCOMINEQSD
,
25706 IX86_BUILTIN_MAXPD
,
25707 IX86_BUILTIN_MAXSD
,
25708 IX86_BUILTIN_MINPD
,
25709 IX86_BUILTIN_MINSD
,
25711 IX86_BUILTIN_ANDPD
,
25712 IX86_BUILTIN_ANDNPD
,
25714 IX86_BUILTIN_XORPD
,
25716 IX86_BUILTIN_SQRTPD
,
25717 IX86_BUILTIN_SQRTSD
,
25719 IX86_BUILTIN_UNPCKHPD
,
25720 IX86_BUILTIN_UNPCKLPD
,
25722 IX86_BUILTIN_SHUFPD
,
25724 IX86_BUILTIN_LOADUPD
,
25725 IX86_BUILTIN_STOREUPD
,
25726 IX86_BUILTIN_MOVSD
,
25728 IX86_BUILTIN_LOADHPD
,
25729 IX86_BUILTIN_LOADLPD
,
25731 IX86_BUILTIN_CVTDQ2PD
,
25732 IX86_BUILTIN_CVTDQ2PS
,
25734 IX86_BUILTIN_CVTPD2DQ
,
25735 IX86_BUILTIN_CVTPD2PI
,
25736 IX86_BUILTIN_CVTPD2PS
,
25737 IX86_BUILTIN_CVTTPD2DQ
,
25738 IX86_BUILTIN_CVTTPD2PI
,
25740 IX86_BUILTIN_CVTPI2PD
,
25741 IX86_BUILTIN_CVTSI2SD
,
25742 IX86_BUILTIN_CVTSI642SD
,
25744 IX86_BUILTIN_CVTSD2SI
,
25745 IX86_BUILTIN_CVTSD2SI64
,
25746 IX86_BUILTIN_CVTSD2SS
,
25747 IX86_BUILTIN_CVTSS2SD
,
25748 IX86_BUILTIN_CVTTSD2SI
,
25749 IX86_BUILTIN_CVTTSD2SI64
,
25751 IX86_BUILTIN_CVTPS2DQ
,
25752 IX86_BUILTIN_CVTPS2PD
,
25753 IX86_BUILTIN_CVTTPS2DQ
,
25755 IX86_BUILTIN_MOVNTI
,
25756 IX86_BUILTIN_MOVNTI64
,
25757 IX86_BUILTIN_MOVNTPD
,
25758 IX86_BUILTIN_MOVNTDQ
,
25760 IX86_BUILTIN_MOVQ128
,
25763 IX86_BUILTIN_MASKMOVDQU
,
25764 IX86_BUILTIN_MOVMSKPD
,
25765 IX86_BUILTIN_PMOVMSKB128
,
25767 IX86_BUILTIN_PACKSSWB128
,
25768 IX86_BUILTIN_PACKSSDW128
,
25769 IX86_BUILTIN_PACKUSWB128
,
25771 IX86_BUILTIN_PADDB128
,
25772 IX86_BUILTIN_PADDW128
,
25773 IX86_BUILTIN_PADDD128
,
25774 IX86_BUILTIN_PADDQ128
,
25775 IX86_BUILTIN_PADDSB128
,
25776 IX86_BUILTIN_PADDSW128
,
25777 IX86_BUILTIN_PADDUSB128
,
25778 IX86_BUILTIN_PADDUSW128
,
25779 IX86_BUILTIN_PSUBB128
,
25780 IX86_BUILTIN_PSUBW128
,
25781 IX86_BUILTIN_PSUBD128
,
25782 IX86_BUILTIN_PSUBQ128
,
25783 IX86_BUILTIN_PSUBSB128
,
25784 IX86_BUILTIN_PSUBSW128
,
25785 IX86_BUILTIN_PSUBUSB128
,
25786 IX86_BUILTIN_PSUBUSW128
,
25788 IX86_BUILTIN_PAND128
,
25789 IX86_BUILTIN_PANDN128
,
25790 IX86_BUILTIN_POR128
,
25791 IX86_BUILTIN_PXOR128
,
25793 IX86_BUILTIN_PAVGB128
,
25794 IX86_BUILTIN_PAVGW128
,
25796 IX86_BUILTIN_PCMPEQB128
,
25797 IX86_BUILTIN_PCMPEQW128
,
25798 IX86_BUILTIN_PCMPEQD128
,
25799 IX86_BUILTIN_PCMPGTB128
,
25800 IX86_BUILTIN_PCMPGTW128
,
25801 IX86_BUILTIN_PCMPGTD128
,
25803 IX86_BUILTIN_PMADDWD128
,
25805 IX86_BUILTIN_PMAXSW128
,
25806 IX86_BUILTIN_PMAXUB128
,
25807 IX86_BUILTIN_PMINSW128
,
25808 IX86_BUILTIN_PMINUB128
,
25810 IX86_BUILTIN_PMULUDQ
,
25811 IX86_BUILTIN_PMULUDQ128
,
25812 IX86_BUILTIN_PMULHUW128
,
25813 IX86_BUILTIN_PMULHW128
,
25814 IX86_BUILTIN_PMULLW128
,
25816 IX86_BUILTIN_PSADBW128
,
25817 IX86_BUILTIN_PSHUFHW
,
25818 IX86_BUILTIN_PSHUFLW
,
25819 IX86_BUILTIN_PSHUFD
,
25821 IX86_BUILTIN_PSLLDQI128
,
25822 IX86_BUILTIN_PSLLWI128
,
25823 IX86_BUILTIN_PSLLDI128
,
25824 IX86_BUILTIN_PSLLQI128
,
25825 IX86_BUILTIN_PSRAWI128
,
25826 IX86_BUILTIN_PSRADI128
,
25827 IX86_BUILTIN_PSRLDQI128
,
25828 IX86_BUILTIN_PSRLWI128
,
25829 IX86_BUILTIN_PSRLDI128
,
25830 IX86_BUILTIN_PSRLQI128
,
25832 IX86_BUILTIN_PSLLDQ128
,
25833 IX86_BUILTIN_PSLLW128
,
25834 IX86_BUILTIN_PSLLD128
,
25835 IX86_BUILTIN_PSLLQ128
,
25836 IX86_BUILTIN_PSRAW128
,
25837 IX86_BUILTIN_PSRAD128
,
25838 IX86_BUILTIN_PSRLW128
,
25839 IX86_BUILTIN_PSRLD128
,
25840 IX86_BUILTIN_PSRLQ128
,
25842 IX86_BUILTIN_PUNPCKHBW128
,
25843 IX86_BUILTIN_PUNPCKHWD128
,
25844 IX86_BUILTIN_PUNPCKHDQ128
,
25845 IX86_BUILTIN_PUNPCKHQDQ128
,
25846 IX86_BUILTIN_PUNPCKLBW128
,
25847 IX86_BUILTIN_PUNPCKLWD128
,
25848 IX86_BUILTIN_PUNPCKLDQ128
,
25849 IX86_BUILTIN_PUNPCKLQDQ128
,
25851 IX86_BUILTIN_CLFLUSH
,
25852 IX86_BUILTIN_MFENCE
,
25853 IX86_BUILTIN_LFENCE
,
25854 IX86_BUILTIN_PAUSE
,
25856 IX86_BUILTIN_BSRSI
,
25857 IX86_BUILTIN_BSRDI
,
25858 IX86_BUILTIN_RDPMC
,
25859 IX86_BUILTIN_RDTSC
,
25860 IX86_BUILTIN_RDTSCP
,
25861 IX86_BUILTIN_ROLQI
,
25862 IX86_BUILTIN_ROLHI
,
25863 IX86_BUILTIN_RORQI
,
25864 IX86_BUILTIN_RORHI
,
25867 IX86_BUILTIN_ADDSUBPS
,
25868 IX86_BUILTIN_HADDPS
,
25869 IX86_BUILTIN_HSUBPS
,
25870 IX86_BUILTIN_MOVSHDUP
,
25871 IX86_BUILTIN_MOVSLDUP
,
25872 IX86_BUILTIN_ADDSUBPD
,
25873 IX86_BUILTIN_HADDPD
,
25874 IX86_BUILTIN_HSUBPD
,
25875 IX86_BUILTIN_LDDQU
,
25877 IX86_BUILTIN_MONITOR
,
25878 IX86_BUILTIN_MWAIT
,
25881 IX86_BUILTIN_PHADDW
,
25882 IX86_BUILTIN_PHADDD
,
25883 IX86_BUILTIN_PHADDSW
,
25884 IX86_BUILTIN_PHSUBW
,
25885 IX86_BUILTIN_PHSUBD
,
25886 IX86_BUILTIN_PHSUBSW
,
25887 IX86_BUILTIN_PMADDUBSW
,
25888 IX86_BUILTIN_PMULHRSW
,
25889 IX86_BUILTIN_PSHUFB
,
25890 IX86_BUILTIN_PSIGNB
,
25891 IX86_BUILTIN_PSIGNW
,
25892 IX86_BUILTIN_PSIGND
,
25893 IX86_BUILTIN_PALIGNR
,
25894 IX86_BUILTIN_PABSB
,
25895 IX86_BUILTIN_PABSW
,
25896 IX86_BUILTIN_PABSD
,
25898 IX86_BUILTIN_PHADDW128
,
25899 IX86_BUILTIN_PHADDD128
,
25900 IX86_BUILTIN_PHADDSW128
,
25901 IX86_BUILTIN_PHSUBW128
,
25902 IX86_BUILTIN_PHSUBD128
,
25903 IX86_BUILTIN_PHSUBSW128
,
25904 IX86_BUILTIN_PMADDUBSW128
,
25905 IX86_BUILTIN_PMULHRSW128
,
25906 IX86_BUILTIN_PSHUFB128
,
25907 IX86_BUILTIN_PSIGNB128
,
25908 IX86_BUILTIN_PSIGNW128
,
25909 IX86_BUILTIN_PSIGND128
,
25910 IX86_BUILTIN_PALIGNR128
,
25911 IX86_BUILTIN_PABSB128
,
25912 IX86_BUILTIN_PABSW128
,
25913 IX86_BUILTIN_PABSD128
,
25915 /* AMDFAM10 - SSE4A New Instructions. */
25916 IX86_BUILTIN_MOVNTSD
,
25917 IX86_BUILTIN_MOVNTSS
,
25918 IX86_BUILTIN_EXTRQI
,
25919 IX86_BUILTIN_EXTRQ
,
25920 IX86_BUILTIN_INSERTQI
,
25921 IX86_BUILTIN_INSERTQ
,
25924 IX86_BUILTIN_BLENDPD
,
25925 IX86_BUILTIN_BLENDPS
,
25926 IX86_BUILTIN_BLENDVPD
,
25927 IX86_BUILTIN_BLENDVPS
,
25928 IX86_BUILTIN_PBLENDVB128
,
25929 IX86_BUILTIN_PBLENDW128
,
25934 IX86_BUILTIN_INSERTPS128
,
25936 IX86_BUILTIN_MOVNTDQA
,
25937 IX86_BUILTIN_MPSADBW128
,
25938 IX86_BUILTIN_PACKUSDW128
,
25939 IX86_BUILTIN_PCMPEQQ
,
25940 IX86_BUILTIN_PHMINPOSUW128
,
25942 IX86_BUILTIN_PMAXSB128
,
25943 IX86_BUILTIN_PMAXSD128
,
25944 IX86_BUILTIN_PMAXUD128
,
25945 IX86_BUILTIN_PMAXUW128
,
25947 IX86_BUILTIN_PMINSB128
,
25948 IX86_BUILTIN_PMINSD128
,
25949 IX86_BUILTIN_PMINUD128
,
25950 IX86_BUILTIN_PMINUW128
,
25952 IX86_BUILTIN_PMOVSXBW128
,
25953 IX86_BUILTIN_PMOVSXBD128
,
25954 IX86_BUILTIN_PMOVSXBQ128
,
25955 IX86_BUILTIN_PMOVSXWD128
,
25956 IX86_BUILTIN_PMOVSXWQ128
,
25957 IX86_BUILTIN_PMOVSXDQ128
,
25959 IX86_BUILTIN_PMOVZXBW128
,
25960 IX86_BUILTIN_PMOVZXBD128
,
25961 IX86_BUILTIN_PMOVZXBQ128
,
25962 IX86_BUILTIN_PMOVZXWD128
,
25963 IX86_BUILTIN_PMOVZXWQ128
,
25964 IX86_BUILTIN_PMOVZXDQ128
,
25966 IX86_BUILTIN_PMULDQ128
,
25967 IX86_BUILTIN_PMULLD128
,
25969 IX86_BUILTIN_ROUNDSD
,
25970 IX86_BUILTIN_ROUNDSS
,
25972 IX86_BUILTIN_ROUNDPD
,
25973 IX86_BUILTIN_ROUNDPS
,
25975 IX86_BUILTIN_FLOORPD
,
25976 IX86_BUILTIN_CEILPD
,
25977 IX86_BUILTIN_TRUNCPD
,
25978 IX86_BUILTIN_RINTPD
,
25979 IX86_BUILTIN_ROUNDPD_AZ
,
25981 IX86_BUILTIN_FLOORPD_VEC_PACK_SFIX
,
25982 IX86_BUILTIN_CEILPD_VEC_PACK_SFIX
,
25983 IX86_BUILTIN_ROUNDPD_AZ_VEC_PACK_SFIX
,
25985 IX86_BUILTIN_FLOORPS
,
25986 IX86_BUILTIN_CEILPS
,
25987 IX86_BUILTIN_TRUNCPS
,
25988 IX86_BUILTIN_RINTPS
,
25989 IX86_BUILTIN_ROUNDPS_AZ
,
25991 IX86_BUILTIN_FLOORPS_SFIX
,
25992 IX86_BUILTIN_CEILPS_SFIX
,
25993 IX86_BUILTIN_ROUNDPS_AZ_SFIX
,
25995 IX86_BUILTIN_PTESTZ
,
25996 IX86_BUILTIN_PTESTC
,
25997 IX86_BUILTIN_PTESTNZC
,
25999 IX86_BUILTIN_VEC_INIT_V2SI
,
26000 IX86_BUILTIN_VEC_INIT_V4HI
,
26001 IX86_BUILTIN_VEC_INIT_V8QI
,
26002 IX86_BUILTIN_VEC_EXT_V2DF
,
26003 IX86_BUILTIN_VEC_EXT_V2DI
,
26004 IX86_BUILTIN_VEC_EXT_V4SF
,
26005 IX86_BUILTIN_VEC_EXT_V4SI
,
26006 IX86_BUILTIN_VEC_EXT_V8HI
,
26007 IX86_BUILTIN_VEC_EXT_V2SI
,
26008 IX86_BUILTIN_VEC_EXT_V4HI
,
26009 IX86_BUILTIN_VEC_EXT_V16QI
,
26010 IX86_BUILTIN_VEC_SET_V2DI
,
26011 IX86_BUILTIN_VEC_SET_V4SF
,
26012 IX86_BUILTIN_VEC_SET_V4SI
,
26013 IX86_BUILTIN_VEC_SET_V8HI
,
26014 IX86_BUILTIN_VEC_SET_V4HI
,
26015 IX86_BUILTIN_VEC_SET_V16QI
,
26017 IX86_BUILTIN_VEC_PACK_SFIX
,
26018 IX86_BUILTIN_VEC_PACK_SFIX256
,
26021 IX86_BUILTIN_CRC32QI
,
26022 IX86_BUILTIN_CRC32HI
,
26023 IX86_BUILTIN_CRC32SI
,
26024 IX86_BUILTIN_CRC32DI
,
26026 IX86_BUILTIN_PCMPESTRI128
,
26027 IX86_BUILTIN_PCMPESTRM128
,
26028 IX86_BUILTIN_PCMPESTRA128
,
26029 IX86_BUILTIN_PCMPESTRC128
,
26030 IX86_BUILTIN_PCMPESTRO128
,
26031 IX86_BUILTIN_PCMPESTRS128
,
26032 IX86_BUILTIN_PCMPESTRZ128
,
26033 IX86_BUILTIN_PCMPISTRI128
,
26034 IX86_BUILTIN_PCMPISTRM128
,
26035 IX86_BUILTIN_PCMPISTRA128
,
26036 IX86_BUILTIN_PCMPISTRC128
,
26037 IX86_BUILTIN_PCMPISTRO128
,
26038 IX86_BUILTIN_PCMPISTRS128
,
26039 IX86_BUILTIN_PCMPISTRZ128
,
26041 IX86_BUILTIN_PCMPGTQ
,
26043 /* AES instructions */
26044 IX86_BUILTIN_AESENC128
,
26045 IX86_BUILTIN_AESENCLAST128
,
26046 IX86_BUILTIN_AESDEC128
,
26047 IX86_BUILTIN_AESDECLAST128
,
26048 IX86_BUILTIN_AESIMC128
,
26049 IX86_BUILTIN_AESKEYGENASSIST128
,
26051 /* PCLMUL instruction */
26052 IX86_BUILTIN_PCLMULQDQ128
,
26055 IX86_BUILTIN_ADDPD256
,
26056 IX86_BUILTIN_ADDPS256
,
26057 IX86_BUILTIN_ADDSUBPD256
,
26058 IX86_BUILTIN_ADDSUBPS256
,
26059 IX86_BUILTIN_ANDPD256
,
26060 IX86_BUILTIN_ANDPS256
,
26061 IX86_BUILTIN_ANDNPD256
,
26062 IX86_BUILTIN_ANDNPS256
,
26063 IX86_BUILTIN_BLENDPD256
,
26064 IX86_BUILTIN_BLENDPS256
,
26065 IX86_BUILTIN_BLENDVPD256
,
26066 IX86_BUILTIN_BLENDVPS256
,
26067 IX86_BUILTIN_DIVPD256
,
26068 IX86_BUILTIN_DIVPS256
,
26069 IX86_BUILTIN_DPPS256
,
26070 IX86_BUILTIN_HADDPD256
,
26071 IX86_BUILTIN_HADDPS256
,
26072 IX86_BUILTIN_HSUBPD256
,
26073 IX86_BUILTIN_HSUBPS256
,
26074 IX86_BUILTIN_MAXPD256
,
26075 IX86_BUILTIN_MAXPS256
,
26076 IX86_BUILTIN_MINPD256
,
26077 IX86_BUILTIN_MINPS256
,
26078 IX86_BUILTIN_MULPD256
,
26079 IX86_BUILTIN_MULPS256
,
26080 IX86_BUILTIN_ORPD256
,
26081 IX86_BUILTIN_ORPS256
,
26082 IX86_BUILTIN_SHUFPD256
,
26083 IX86_BUILTIN_SHUFPS256
,
26084 IX86_BUILTIN_SUBPD256
,
26085 IX86_BUILTIN_SUBPS256
,
26086 IX86_BUILTIN_XORPD256
,
26087 IX86_BUILTIN_XORPS256
,
26088 IX86_BUILTIN_CMPSD
,
26089 IX86_BUILTIN_CMPSS
,
26090 IX86_BUILTIN_CMPPD
,
26091 IX86_BUILTIN_CMPPS
,
26092 IX86_BUILTIN_CMPPD256
,
26093 IX86_BUILTIN_CMPPS256
,
26094 IX86_BUILTIN_CVTDQ2PD256
,
26095 IX86_BUILTIN_CVTDQ2PS256
,
26096 IX86_BUILTIN_CVTPD2PS256
,
26097 IX86_BUILTIN_CVTPS2DQ256
,
26098 IX86_BUILTIN_CVTPS2PD256
,
26099 IX86_BUILTIN_CVTTPD2DQ256
,
26100 IX86_BUILTIN_CVTPD2DQ256
,
26101 IX86_BUILTIN_CVTTPS2DQ256
,
26102 IX86_BUILTIN_EXTRACTF128PD256
,
26103 IX86_BUILTIN_EXTRACTF128PS256
,
26104 IX86_BUILTIN_EXTRACTF128SI256
,
26105 IX86_BUILTIN_VZEROALL
,
26106 IX86_BUILTIN_VZEROUPPER
,
26107 IX86_BUILTIN_VPERMILVARPD
,
26108 IX86_BUILTIN_VPERMILVARPS
,
26109 IX86_BUILTIN_VPERMILVARPD256
,
26110 IX86_BUILTIN_VPERMILVARPS256
,
26111 IX86_BUILTIN_VPERMILPD
,
26112 IX86_BUILTIN_VPERMILPS
,
26113 IX86_BUILTIN_VPERMILPD256
,
26114 IX86_BUILTIN_VPERMILPS256
,
26115 IX86_BUILTIN_VPERMIL2PD
,
26116 IX86_BUILTIN_VPERMIL2PS
,
26117 IX86_BUILTIN_VPERMIL2PD256
,
26118 IX86_BUILTIN_VPERMIL2PS256
,
26119 IX86_BUILTIN_VPERM2F128PD256
,
26120 IX86_BUILTIN_VPERM2F128PS256
,
26121 IX86_BUILTIN_VPERM2F128SI256
,
26122 IX86_BUILTIN_VBROADCASTSS
,
26123 IX86_BUILTIN_VBROADCASTSD256
,
26124 IX86_BUILTIN_VBROADCASTSS256
,
26125 IX86_BUILTIN_VBROADCASTPD256
,
26126 IX86_BUILTIN_VBROADCASTPS256
,
26127 IX86_BUILTIN_VINSERTF128PD256
,
26128 IX86_BUILTIN_VINSERTF128PS256
,
26129 IX86_BUILTIN_VINSERTF128SI256
,
26130 IX86_BUILTIN_LOADUPD256
,
26131 IX86_BUILTIN_LOADUPS256
,
26132 IX86_BUILTIN_STOREUPD256
,
26133 IX86_BUILTIN_STOREUPS256
,
26134 IX86_BUILTIN_LDDQU256
,
26135 IX86_BUILTIN_MOVNTDQ256
,
26136 IX86_BUILTIN_MOVNTPD256
,
26137 IX86_BUILTIN_MOVNTPS256
,
26138 IX86_BUILTIN_LOADDQU256
,
26139 IX86_BUILTIN_STOREDQU256
,
26140 IX86_BUILTIN_MASKLOADPD
,
26141 IX86_BUILTIN_MASKLOADPS
,
26142 IX86_BUILTIN_MASKSTOREPD
,
26143 IX86_BUILTIN_MASKSTOREPS
,
26144 IX86_BUILTIN_MASKLOADPD256
,
26145 IX86_BUILTIN_MASKLOADPS256
,
26146 IX86_BUILTIN_MASKSTOREPD256
,
26147 IX86_BUILTIN_MASKSTOREPS256
,
26148 IX86_BUILTIN_MOVSHDUP256
,
26149 IX86_BUILTIN_MOVSLDUP256
,
26150 IX86_BUILTIN_MOVDDUP256
,
26152 IX86_BUILTIN_SQRTPD256
,
26153 IX86_BUILTIN_SQRTPS256
,
26154 IX86_BUILTIN_SQRTPS_NR256
,
26155 IX86_BUILTIN_RSQRTPS256
,
26156 IX86_BUILTIN_RSQRTPS_NR256
,
26158 IX86_BUILTIN_RCPPS256
,
26160 IX86_BUILTIN_ROUNDPD256
,
26161 IX86_BUILTIN_ROUNDPS256
,
26163 IX86_BUILTIN_FLOORPD256
,
26164 IX86_BUILTIN_CEILPD256
,
26165 IX86_BUILTIN_TRUNCPD256
,
26166 IX86_BUILTIN_RINTPD256
,
26167 IX86_BUILTIN_ROUNDPD_AZ256
,
26169 IX86_BUILTIN_FLOORPD_VEC_PACK_SFIX256
,
26170 IX86_BUILTIN_CEILPD_VEC_PACK_SFIX256
,
26171 IX86_BUILTIN_ROUNDPD_AZ_VEC_PACK_SFIX256
,
26173 IX86_BUILTIN_FLOORPS256
,
26174 IX86_BUILTIN_CEILPS256
,
26175 IX86_BUILTIN_TRUNCPS256
,
26176 IX86_BUILTIN_RINTPS256
,
26177 IX86_BUILTIN_ROUNDPS_AZ256
,
26179 IX86_BUILTIN_FLOORPS_SFIX256
,
26180 IX86_BUILTIN_CEILPS_SFIX256
,
26181 IX86_BUILTIN_ROUNDPS_AZ_SFIX256
,
26183 IX86_BUILTIN_UNPCKHPD256
,
26184 IX86_BUILTIN_UNPCKLPD256
,
26185 IX86_BUILTIN_UNPCKHPS256
,
26186 IX86_BUILTIN_UNPCKLPS256
,
26188 IX86_BUILTIN_SI256_SI
,
26189 IX86_BUILTIN_PS256_PS
,
26190 IX86_BUILTIN_PD256_PD
,
26191 IX86_BUILTIN_SI_SI256
,
26192 IX86_BUILTIN_PS_PS256
,
26193 IX86_BUILTIN_PD_PD256
,
26195 IX86_BUILTIN_VTESTZPD
,
26196 IX86_BUILTIN_VTESTCPD
,
26197 IX86_BUILTIN_VTESTNZCPD
,
26198 IX86_BUILTIN_VTESTZPS
,
26199 IX86_BUILTIN_VTESTCPS
,
26200 IX86_BUILTIN_VTESTNZCPS
,
26201 IX86_BUILTIN_VTESTZPD256
,
26202 IX86_BUILTIN_VTESTCPD256
,
26203 IX86_BUILTIN_VTESTNZCPD256
,
26204 IX86_BUILTIN_VTESTZPS256
,
26205 IX86_BUILTIN_VTESTCPS256
,
26206 IX86_BUILTIN_VTESTNZCPS256
,
26207 IX86_BUILTIN_PTESTZ256
,
26208 IX86_BUILTIN_PTESTC256
,
26209 IX86_BUILTIN_PTESTNZC256
,
26211 IX86_BUILTIN_MOVMSKPD256
,
26212 IX86_BUILTIN_MOVMSKPS256
,
26215 IX86_BUILTIN_MPSADBW256
,
26216 IX86_BUILTIN_PABSB256
,
26217 IX86_BUILTIN_PABSW256
,
26218 IX86_BUILTIN_PABSD256
,
26219 IX86_BUILTIN_PACKSSDW256
,
26220 IX86_BUILTIN_PACKSSWB256
,
26221 IX86_BUILTIN_PACKUSDW256
,
26222 IX86_BUILTIN_PACKUSWB256
,
26223 IX86_BUILTIN_PADDB256
,
26224 IX86_BUILTIN_PADDW256
,
26225 IX86_BUILTIN_PADDD256
,
26226 IX86_BUILTIN_PADDQ256
,
26227 IX86_BUILTIN_PADDSB256
,
26228 IX86_BUILTIN_PADDSW256
,
26229 IX86_BUILTIN_PADDUSB256
,
26230 IX86_BUILTIN_PADDUSW256
,
26231 IX86_BUILTIN_PALIGNR256
,
26232 IX86_BUILTIN_AND256I
,
26233 IX86_BUILTIN_ANDNOT256I
,
26234 IX86_BUILTIN_PAVGB256
,
26235 IX86_BUILTIN_PAVGW256
,
26236 IX86_BUILTIN_PBLENDVB256
,
26237 IX86_BUILTIN_PBLENDVW256
,
26238 IX86_BUILTIN_PCMPEQB256
,
26239 IX86_BUILTIN_PCMPEQW256
,
26240 IX86_BUILTIN_PCMPEQD256
,
26241 IX86_BUILTIN_PCMPEQQ256
,
26242 IX86_BUILTIN_PCMPGTB256
,
26243 IX86_BUILTIN_PCMPGTW256
,
26244 IX86_BUILTIN_PCMPGTD256
,
26245 IX86_BUILTIN_PCMPGTQ256
,
26246 IX86_BUILTIN_PHADDW256
,
26247 IX86_BUILTIN_PHADDD256
,
26248 IX86_BUILTIN_PHADDSW256
,
26249 IX86_BUILTIN_PHSUBW256
,
26250 IX86_BUILTIN_PHSUBD256
,
26251 IX86_BUILTIN_PHSUBSW256
,
26252 IX86_BUILTIN_PMADDUBSW256
,
26253 IX86_BUILTIN_PMADDWD256
,
26254 IX86_BUILTIN_PMAXSB256
,
26255 IX86_BUILTIN_PMAXSW256
,
26256 IX86_BUILTIN_PMAXSD256
,
26257 IX86_BUILTIN_PMAXUB256
,
26258 IX86_BUILTIN_PMAXUW256
,
26259 IX86_BUILTIN_PMAXUD256
,
26260 IX86_BUILTIN_PMINSB256
,
26261 IX86_BUILTIN_PMINSW256
,
26262 IX86_BUILTIN_PMINSD256
,
26263 IX86_BUILTIN_PMINUB256
,
26264 IX86_BUILTIN_PMINUW256
,
26265 IX86_BUILTIN_PMINUD256
,
26266 IX86_BUILTIN_PMOVMSKB256
,
26267 IX86_BUILTIN_PMOVSXBW256
,
26268 IX86_BUILTIN_PMOVSXBD256
,
26269 IX86_BUILTIN_PMOVSXBQ256
,
26270 IX86_BUILTIN_PMOVSXWD256
,
26271 IX86_BUILTIN_PMOVSXWQ256
,
26272 IX86_BUILTIN_PMOVSXDQ256
,
26273 IX86_BUILTIN_PMOVZXBW256
,
26274 IX86_BUILTIN_PMOVZXBD256
,
26275 IX86_BUILTIN_PMOVZXBQ256
,
26276 IX86_BUILTIN_PMOVZXWD256
,
26277 IX86_BUILTIN_PMOVZXWQ256
,
26278 IX86_BUILTIN_PMOVZXDQ256
,
26279 IX86_BUILTIN_PMULDQ256
,
26280 IX86_BUILTIN_PMULHRSW256
,
26281 IX86_BUILTIN_PMULHUW256
,
26282 IX86_BUILTIN_PMULHW256
,
26283 IX86_BUILTIN_PMULLW256
,
26284 IX86_BUILTIN_PMULLD256
,
26285 IX86_BUILTIN_PMULUDQ256
,
26286 IX86_BUILTIN_POR256
,
26287 IX86_BUILTIN_PSADBW256
,
26288 IX86_BUILTIN_PSHUFB256
,
26289 IX86_BUILTIN_PSHUFD256
,
26290 IX86_BUILTIN_PSHUFHW256
,
26291 IX86_BUILTIN_PSHUFLW256
,
26292 IX86_BUILTIN_PSIGNB256
,
26293 IX86_BUILTIN_PSIGNW256
,
26294 IX86_BUILTIN_PSIGND256
,
26295 IX86_BUILTIN_PSLLDQI256
,
26296 IX86_BUILTIN_PSLLWI256
,
26297 IX86_BUILTIN_PSLLW256
,
26298 IX86_BUILTIN_PSLLDI256
,
26299 IX86_BUILTIN_PSLLD256
,
26300 IX86_BUILTIN_PSLLQI256
,
26301 IX86_BUILTIN_PSLLQ256
,
26302 IX86_BUILTIN_PSRAWI256
,
26303 IX86_BUILTIN_PSRAW256
,
26304 IX86_BUILTIN_PSRADI256
,
26305 IX86_BUILTIN_PSRAD256
,
26306 IX86_BUILTIN_PSRLDQI256
,
26307 IX86_BUILTIN_PSRLWI256
,
26308 IX86_BUILTIN_PSRLW256
,
26309 IX86_BUILTIN_PSRLDI256
,
26310 IX86_BUILTIN_PSRLD256
,
26311 IX86_BUILTIN_PSRLQI256
,
26312 IX86_BUILTIN_PSRLQ256
,
26313 IX86_BUILTIN_PSUBB256
,
26314 IX86_BUILTIN_PSUBW256
,
26315 IX86_BUILTIN_PSUBD256
,
26316 IX86_BUILTIN_PSUBQ256
,
26317 IX86_BUILTIN_PSUBSB256
,
26318 IX86_BUILTIN_PSUBSW256
,
26319 IX86_BUILTIN_PSUBUSB256
,
26320 IX86_BUILTIN_PSUBUSW256
,
26321 IX86_BUILTIN_PUNPCKHBW256
,
26322 IX86_BUILTIN_PUNPCKHWD256
,
26323 IX86_BUILTIN_PUNPCKHDQ256
,
26324 IX86_BUILTIN_PUNPCKHQDQ256
,
26325 IX86_BUILTIN_PUNPCKLBW256
,
26326 IX86_BUILTIN_PUNPCKLWD256
,
26327 IX86_BUILTIN_PUNPCKLDQ256
,
26328 IX86_BUILTIN_PUNPCKLQDQ256
,
26329 IX86_BUILTIN_PXOR256
,
26330 IX86_BUILTIN_MOVNTDQA256
,
26331 IX86_BUILTIN_VBROADCASTSS_PS
,
26332 IX86_BUILTIN_VBROADCASTSS_PS256
,
26333 IX86_BUILTIN_VBROADCASTSD_PD256
,
26334 IX86_BUILTIN_VBROADCASTSI256
,
26335 IX86_BUILTIN_PBLENDD256
,
26336 IX86_BUILTIN_PBLENDD128
,
26337 IX86_BUILTIN_PBROADCASTB256
,
26338 IX86_BUILTIN_PBROADCASTW256
,
26339 IX86_BUILTIN_PBROADCASTD256
,
26340 IX86_BUILTIN_PBROADCASTQ256
,
26341 IX86_BUILTIN_PBROADCASTB128
,
26342 IX86_BUILTIN_PBROADCASTW128
,
26343 IX86_BUILTIN_PBROADCASTD128
,
26344 IX86_BUILTIN_PBROADCASTQ128
,
26345 IX86_BUILTIN_VPERMVARSI256
,
26346 IX86_BUILTIN_VPERMDF256
,
26347 IX86_BUILTIN_VPERMVARSF256
,
26348 IX86_BUILTIN_VPERMDI256
,
26349 IX86_BUILTIN_VPERMTI256
,
26350 IX86_BUILTIN_VEXTRACT128I256
,
26351 IX86_BUILTIN_VINSERT128I256
,
26352 IX86_BUILTIN_MASKLOADD
,
26353 IX86_BUILTIN_MASKLOADQ
,
26354 IX86_BUILTIN_MASKLOADD256
,
26355 IX86_BUILTIN_MASKLOADQ256
,
26356 IX86_BUILTIN_MASKSTORED
,
26357 IX86_BUILTIN_MASKSTOREQ
,
26358 IX86_BUILTIN_MASKSTORED256
,
26359 IX86_BUILTIN_MASKSTOREQ256
,
26360 IX86_BUILTIN_PSLLVV4DI
,
26361 IX86_BUILTIN_PSLLVV2DI
,
26362 IX86_BUILTIN_PSLLVV8SI
,
26363 IX86_BUILTIN_PSLLVV4SI
,
26364 IX86_BUILTIN_PSRAVV8SI
,
26365 IX86_BUILTIN_PSRAVV4SI
,
26366 IX86_BUILTIN_PSRLVV4DI
,
26367 IX86_BUILTIN_PSRLVV2DI
,
26368 IX86_BUILTIN_PSRLVV8SI
,
26369 IX86_BUILTIN_PSRLVV4SI
,
26371 IX86_BUILTIN_GATHERSIV2DF
,
26372 IX86_BUILTIN_GATHERSIV4DF
,
26373 IX86_BUILTIN_GATHERDIV2DF
,
26374 IX86_BUILTIN_GATHERDIV4DF
,
26375 IX86_BUILTIN_GATHERSIV4SF
,
26376 IX86_BUILTIN_GATHERSIV8SF
,
26377 IX86_BUILTIN_GATHERDIV4SF
,
26378 IX86_BUILTIN_GATHERDIV8SF
,
26379 IX86_BUILTIN_GATHERSIV2DI
,
26380 IX86_BUILTIN_GATHERSIV4DI
,
26381 IX86_BUILTIN_GATHERDIV2DI
,
26382 IX86_BUILTIN_GATHERDIV4DI
,
26383 IX86_BUILTIN_GATHERSIV4SI
,
26384 IX86_BUILTIN_GATHERSIV8SI
,
26385 IX86_BUILTIN_GATHERDIV4SI
,
26386 IX86_BUILTIN_GATHERDIV8SI
,
26388 /* Alternate 4 element gather for the vectorizer where
26389 all operands are 32-byte wide. */
26390 IX86_BUILTIN_GATHERALTSIV4DF
,
26391 IX86_BUILTIN_GATHERALTDIV8SF
,
26392 IX86_BUILTIN_GATHERALTSIV4DI
,
26393 IX86_BUILTIN_GATHERALTDIV8SI
,
26395 /* TFmode support builtins. */
26397 IX86_BUILTIN_HUGE_VALQ
,
26398 IX86_BUILTIN_FABSQ
,
26399 IX86_BUILTIN_COPYSIGNQ
,
26401 /* Vectorizer support builtins. */
26402 IX86_BUILTIN_CPYSGNPS
,
26403 IX86_BUILTIN_CPYSGNPD
,
26404 IX86_BUILTIN_CPYSGNPS256
,
26405 IX86_BUILTIN_CPYSGNPD256
,
26407 /* FMA4 instructions. */
26408 IX86_BUILTIN_VFMADDSS
,
26409 IX86_BUILTIN_VFMADDSD
,
26410 IX86_BUILTIN_VFMADDPS
,
26411 IX86_BUILTIN_VFMADDPD
,
26412 IX86_BUILTIN_VFMADDPS256
,
26413 IX86_BUILTIN_VFMADDPD256
,
26414 IX86_BUILTIN_VFMADDSUBPS
,
26415 IX86_BUILTIN_VFMADDSUBPD
,
26416 IX86_BUILTIN_VFMADDSUBPS256
,
26417 IX86_BUILTIN_VFMADDSUBPD256
,
26419 /* FMA3 instructions. */
26420 IX86_BUILTIN_VFMADDSS3
,
26421 IX86_BUILTIN_VFMADDSD3
,
26423 /* XOP instructions. */
26424 IX86_BUILTIN_VPCMOV
,
26425 IX86_BUILTIN_VPCMOV_V2DI
,
26426 IX86_BUILTIN_VPCMOV_V4SI
,
26427 IX86_BUILTIN_VPCMOV_V8HI
,
26428 IX86_BUILTIN_VPCMOV_V16QI
,
26429 IX86_BUILTIN_VPCMOV_V4SF
,
26430 IX86_BUILTIN_VPCMOV_V2DF
,
26431 IX86_BUILTIN_VPCMOV256
,
26432 IX86_BUILTIN_VPCMOV_V4DI256
,
26433 IX86_BUILTIN_VPCMOV_V8SI256
,
26434 IX86_BUILTIN_VPCMOV_V16HI256
,
26435 IX86_BUILTIN_VPCMOV_V32QI256
,
26436 IX86_BUILTIN_VPCMOV_V8SF256
,
26437 IX86_BUILTIN_VPCMOV_V4DF256
,
26439 IX86_BUILTIN_VPPERM
,
26441 IX86_BUILTIN_VPMACSSWW
,
26442 IX86_BUILTIN_VPMACSWW
,
26443 IX86_BUILTIN_VPMACSSWD
,
26444 IX86_BUILTIN_VPMACSWD
,
26445 IX86_BUILTIN_VPMACSSDD
,
26446 IX86_BUILTIN_VPMACSDD
,
26447 IX86_BUILTIN_VPMACSSDQL
,
26448 IX86_BUILTIN_VPMACSSDQH
,
26449 IX86_BUILTIN_VPMACSDQL
,
26450 IX86_BUILTIN_VPMACSDQH
,
26451 IX86_BUILTIN_VPMADCSSWD
,
26452 IX86_BUILTIN_VPMADCSWD
,
26454 IX86_BUILTIN_VPHADDBW
,
26455 IX86_BUILTIN_VPHADDBD
,
26456 IX86_BUILTIN_VPHADDBQ
,
26457 IX86_BUILTIN_VPHADDWD
,
26458 IX86_BUILTIN_VPHADDWQ
,
26459 IX86_BUILTIN_VPHADDDQ
,
26460 IX86_BUILTIN_VPHADDUBW
,
26461 IX86_BUILTIN_VPHADDUBD
,
26462 IX86_BUILTIN_VPHADDUBQ
,
26463 IX86_BUILTIN_VPHADDUWD
,
26464 IX86_BUILTIN_VPHADDUWQ
,
26465 IX86_BUILTIN_VPHADDUDQ
,
26466 IX86_BUILTIN_VPHSUBBW
,
26467 IX86_BUILTIN_VPHSUBWD
,
26468 IX86_BUILTIN_VPHSUBDQ
,
26470 IX86_BUILTIN_VPROTB
,
26471 IX86_BUILTIN_VPROTW
,
26472 IX86_BUILTIN_VPROTD
,
26473 IX86_BUILTIN_VPROTQ
,
26474 IX86_BUILTIN_VPROTB_IMM
,
26475 IX86_BUILTIN_VPROTW_IMM
,
26476 IX86_BUILTIN_VPROTD_IMM
,
26477 IX86_BUILTIN_VPROTQ_IMM
,
26479 IX86_BUILTIN_VPSHLB
,
26480 IX86_BUILTIN_VPSHLW
,
26481 IX86_BUILTIN_VPSHLD
,
26482 IX86_BUILTIN_VPSHLQ
,
26483 IX86_BUILTIN_VPSHAB
,
26484 IX86_BUILTIN_VPSHAW
,
26485 IX86_BUILTIN_VPSHAD
,
26486 IX86_BUILTIN_VPSHAQ
,
26488 IX86_BUILTIN_VFRCZSS
,
26489 IX86_BUILTIN_VFRCZSD
,
26490 IX86_BUILTIN_VFRCZPS
,
26491 IX86_BUILTIN_VFRCZPD
,
26492 IX86_BUILTIN_VFRCZPS256
,
26493 IX86_BUILTIN_VFRCZPD256
,
26495 IX86_BUILTIN_VPCOMEQUB
,
26496 IX86_BUILTIN_VPCOMNEUB
,
26497 IX86_BUILTIN_VPCOMLTUB
,
26498 IX86_BUILTIN_VPCOMLEUB
,
26499 IX86_BUILTIN_VPCOMGTUB
,
26500 IX86_BUILTIN_VPCOMGEUB
,
26501 IX86_BUILTIN_VPCOMFALSEUB
,
26502 IX86_BUILTIN_VPCOMTRUEUB
,
26504 IX86_BUILTIN_VPCOMEQUW
,
26505 IX86_BUILTIN_VPCOMNEUW
,
26506 IX86_BUILTIN_VPCOMLTUW
,
26507 IX86_BUILTIN_VPCOMLEUW
,
26508 IX86_BUILTIN_VPCOMGTUW
,
26509 IX86_BUILTIN_VPCOMGEUW
,
26510 IX86_BUILTIN_VPCOMFALSEUW
,
26511 IX86_BUILTIN_VPCOMTRUEUW
,
26513 IX86_BUILTIN_VPCOMEQUD
,
26514 IX86_BUILTIN_VPCOMNEUD
,
26515 IX86_BUILTIN_VPCOMLTUD
,
26516 IX86_BUILTIN_VPCOMLEUD
,
26517 IX86_BUILTIN_VPCOMGTUD
,
26518 IX86_BUILTIN_VPCOMGEUD
,
26519 IX86_BUILTIN_VPCOMFALSEUD
,
26520 IX86_BUILTIN_VPCOMTRUEUD
,
26522 IX86_BUILTIN_VPCOMEQUQ
,
26523 IX86_BUILTIN_VPCOMNEUQ
,
26524 IX86_BUILTIN_VPCOMLTUQ
,
26525 IX86_BUILTIN_VPCOMLEUQ
,
26526 IX86_BUILTIN_VPCOMGTUQ
,
26527 IX86_BUILTIN_VPCOMGEUQ
,
26528 IX86_BUILTIN_VPCOMFALSEUQ
,
26529 IX86_BUILTIN_VPCOMTRUEUQ
,
26531 IX86_BUILTIN_VPCOMEQB
,
26532 IX86_BUILTIN_VPCOMNEB
,
26533 IX86_BUILTIN_VPCOMLTB
,
26534 IX86_BUILTIN_VPCOMLEB
,
26535 IX86_BUILTIN_VPCOMGTB
,
26536 IX86_BUILTIN_VPCOMGEB
,
26537 IX86_BUILTIN_VPCOMFALSEB
,
26538 IX86_BUILTIN_VPCOMTRUEB
,
26540 IX86_BUILTIN_VPCOMEQW
,
26541 IX86_BUILTIN_VPCOMNEW
,
26542 IX86_BUILTIN_VPCOMLTW
,
26543 IX86_BUILTIN_VPCOMLEW
,
26544 IX86_BUILTIN_VPCOMGTW
,
26545 IX86_BUILTIN_VPCOMGEW
,
26546 IX86_BUILTIN_VPCOMFALSEW
,
26547 IX86_BUILTIN_VPCOMTRUEW
,
26549 IX86_BUILTIN_VPCOMEQD
,
26550 IX86_BUILTIN_VPCOMNED
,
26551 IX86_BUILTIN_VPCOMLTD
,
26552 IX86_BUILTIN_VPCOMLED
,
26553 IX86_BUILTIN_VPCOMGTD
,
26554 IX86_BUILTIN_VPCOMGED
,
26555 IX86_BUILTIN_VPCOMFALSED
,
26556 IX86_BUILTIN_VPCOMTRUED
,
26558 IX86_BUILTIN_VPCOMEQQ
,
26559 IX86_BUILTIN_VPCOMNEQ
,
26560 IX86_BUILTIN_VPCOMLTQ
,
26561 IX86_BUILTIN_VPCOMLEQ
,
26562 IX86_BUILTIN_VPCOMGTQ
,
26563 IX86_BUILTIN_VPCOMGEQ
,
26564 IX86_BUILTIN_VPCOMFALSEQ
,
26565 IX86_BUILTIN_VPCOMTRUEQ
,
26567 /* LWP instructions. */
26568 IX86_BUILTIN_LLWPCB
,
26569 IX86_BUILTIN_SLWPCB
,
26570 IX86_BUILTIN_LWPVAL32
,
26571 IX86_BUILTIN_LWPVAL64
,
26572 IX86_BUILTIN_LWPINS32
,
26573 IX86_BUILTIN_LWPINS64
,
26578 IX86_BUILTIN_XBEGIN
,
26580 IX86_BUILTIN_XABORT
,
26581 IX86_BUILTIN_XTEST
,
26583 /* BMI instructions. */
26584 IX86_BUILTIN_BEXTR32
,
26585 IX86_BUILTIN_BEXTR64
,
26588 /* TBM instructions. */
26589 IX86_BUILTIN_BEXTRI32
,
26590 IX86_BUILTIN_BEXTRI64
,
26592 /* BMI2 instructions. */
26593 IX86_BUILTIN_BZHI32
,
26594 IX86_BUILTIN_BZHI64
,
26595 IX86_BUILTIN_PDEP32
,
26596 IX86_BUILTIN_PDEP64
,
26597 IX86_BUILTIN_PEXT32
,
26598 IX86_BUILTIN_PEXT64
,
26600 /* ADX instructions. */
26601 IX86_BUILTIN_ADDCARRYX32
,
26602 IX86_BUILTIN_ADDCARRYX64
,
26604 /* FSGSBASE instructions. */
26605 IX86_BUILTIN_RDFSBASE32
,
26606 IX86_BUILTIN_RDFSBASE64
,
26607 IX86_BUILTIN_RDGSBASE32
,
26608 IX86_BUILTIN_RDGSBASE64
,
26609 IX86_BUILTIN_WRFSBASE32
,
26610 IX86_BUILTIN_WRFSBASE64
,
26611 IX86_BUILTIN_WRGSBASE32
,
26612 IX86_BUILTIN_WRGSBASE64
,
26614 /* RDRND instructions. */
26615 IX86_BUILTIN_RDRAND16_STEP
,
26616 IX86_BUILTIN_RDRAND32_STEP
,
26617 IX86_BUILTIN_RDRAND64_STEP
,
26619 /* RDSEED instructions. */
26620 IX86_BUILTIN_RDSEED16_STEP
,
26621 IX86_BUILTIN_RDSEED32_STEP
,
26622 IX86_BUILTIN_RDSEED64_STEP
,
26624 /* F16C instructions. */
26625 IX86_BUILTIN_CVTPH2PS
,
26626 IX86_BUILTIN_CVTPH2PS256
,
26627 IX86_BUILTIN_CVTPS2PH
,
26628 IX86_BUILTIN_CVTPS2PH256
,
26630 /* CFString built-in for darwin */
26631 IX86_BUILTIN_CFSTRING
,
26633 /* Builtins to get CPU type and supported features. */
26634 IX86_BUILTIN_CPU_INIT
,
26635 IX86_BUILTIN_CPU_IS
,
26636 IX86_BUILTIN_CPU_SUPPORTS
,
26641 /* Table for the ix86 builtin decls. */
26642 static GTY(()) tree ix86_builtins
[(int) IX86_BUILTIN_MAX
];
26644 /* Table of all of the builtin functions that are possible with different ISA's
26645 but are waiting to be built until a function is declared to use that
26647 struct builtin_isa
{
26648 const char *name
; /* function name */
26649 enum ix86_builtin_func_type tcode
; /* type to use in the declaration */
26650 HOST_WIDE_INT isa
; /* isa_flags this builtin is defined for */
26651 bool const_p
; /* true if the declaration is constant */
26652 bool set_and_not_built_p
;
26655 static struct builtin_isa ix86_builtins_isa
[(int) IX86_BUILTIN_MAX
];
26658 /* Add an ix86 target builtin function with CODE, NAME and TYPE. Save the MASK
26659 of which isa_flags to use in the ix86_builtins_isa array. Stores the
26660 function decl in the ix86_builtins array. Returns the function decl or
26661 NULL_TREE, if the builtin was not added.
26663 If the front end has a special hook for builtin functions, delay adding
26664 builtin functions that aren't in the current ISA until the ISA is changed
26665 with function specific optimization. Doing so, can save about 300K for the
26666 default compiler. When the builtin is expanded, check at that time whether
26669 If the front end doesn't have a special hook, record all builtins, even if
26670 it isn't an instruction set in the current ISA in case the user uses
26671 function specific options for a different ISA, so that we don't get scope
26672 errors if a builtin is added in the middle of a function scope. */
26675 def_builtin (HOST_WIDE_INT mask
, const char *name
,
26676 enum ix86_builtin_func_type tcode
,
26677 enum ix86_builtins code
)
26679 tree decl
= NULL_TREE
;
26681 if (!(mask
& OPTION_MASK_ISA_64BIT
) || TARGET_64BIT
)
26683 ix86_builtins_isa
[(int) code
].isa
= mask
;
26685 mask
&= ~OPTION_MASK_ISA_64BIT
;
26687 || (mask
& ix86_isa_flags
) != 0
26688 || (lang_hooks
.builtin_function
26689 == lang_hooks
.builtin_function_ext_scope
))
26692 tree type
= ix86_get_builtin_func_type (tcode
);
26693 decl
= add_builtin_function (name
, type
, code
, BUILT_IN_MD
,
26695 ix86_builtins
[(int) code
] = decl
;
26696 ix86_builtins_isa
[(int) code
].set_and_not_built_p
= false;
26700 ix86_builtins
[(int) code
] = NULL_TREE
;
26701 ix86_builtins_isa
[(int) code
].tcode
= tcode
;
26702 ix86_builtins_isa
[(int) code
].name
= name
;
26703 ix86_builtins_isa
[(int) code
].const_p
= false;
26704 ix86_builtins_isa
[(int) code
].set_and_not_built_p
= true;
26711 /* Like def_builtin, but also marks the function decl "const". */
26714 def_builtin_const (HOST_WIDE_INT mask
, const char *name
,
26715 enum ix86_builtin_func_type tcode
, enum ix86_builtins code
)
26717 tree decl
= def_builtin (mask
, name
, tcode
, code
);
26719 TREE_READONLY (decl
) = 1;
26721 ix86_builtins_isa
[(int) code
].const_p
= true;
26726 /* Add any new builtin functions for a given ISA that may not have been
26727 declared. This saves a bit of space compared to adding all of the
26728 declarations to the tree, even if we didn't use them. */
26731 ix86_add_new_builtins (HOST_WIDE_INT isa
)
26735 for (i
= 0; i
< (int)IX86_BUILTIN_MAX
; i
++)
26737 if ((ix86_builtins_isa
[i
].isa
& isa
) != 0
26738 && ix86_builtins_isa
[i
].set_and_not_built_p
)
26742 /* Don't define the builtin again. */
26743 ix86_builtins_isa
[i
].set_and_not_built_p
= false;
26745 type
= ix86_get_builtin_func_type (ix86_builtins_isa
[i
].tcode
);
26746 decl
= add_builtin_function_ext_scope (ix86_builtins_isa
[i
].name
,
26747 type
, i
, BUILT_IN_MD
, NULL
,
26750 ix86_builtins
[i
] = decl
;
26751 if (ix86_builtins_isa
[i
].const_p
)
26752 TREE_READONLY (decl
) = 1;
26757 /* Bits for builtin_description.flag. */
26759 /* Set when we don't support the comparison natively, and should
26760 swap_comparison in order to support it. */
26761 #define BUILTIN_DESC_SWAP_OPERANDS 1
26763 struct builtin_description
26765 const HOST_WIDE_INT mask
;
26766 const enum insn_code icode
;
26767 const char *const name
;
26768 const enum ix86_builtins code
;
26769 const enum rtx_code comparison
;
26773 static const struct builtin_description bdesc_comi
[] =
26775 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_comi
, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS
, UNEQ
, 0 },
26776 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_comi
, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS
, UNLT
, 0 },
26777 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_comi
, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS
, UNLE
, 0 },
26778 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_comi
, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS
, GT
, 0 },
26779 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_comi
, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS
, GE
, 0 },
26780 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_comi
, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS
, LTGT
, 0 },
26781 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_ucomi
, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS
, UNEQ
, 0 },
26782 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_ucomi
, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS
, UNLT
, 0 },
26783 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_ucomi
, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS
, UNLE
, 0 },
26784 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_ucomi
, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS
, GT
, 0 },
26785 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_ucomi
, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS
, GE
, 0 },
26786 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_ucomi
, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS
, LTGT
, 0 },
26787 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_comi
, "__builtin_ia32_comisdeq", IX86_BUILTIN_COMIEQSD
, UNEQ
, 0 },
26788 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_comi
, "__builtin_ia32_comisdlt", IX86_BUILTIN_COMILTSD
, UNLT
, 0 },
26789 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_comi
, "__builtin_ia32_comisdle", IX86_BUILTIN_COMILESD
, UNLE
, 0 },
26790 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_comi
, "__builtin_ia32_comisdgt", IX86_BUILTIN_COMIGTSD
, GT
, 0 },
26791 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_comi
, "__builtin_ia32_comisdge", IX86_BUILTIN_COMIGESD
, GE
, 0 },
26792 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_comi
, "__builtin_ia32_comisdneq", IX86_BUILTIN_COMINEQSD
, LTGT
, 0 },
26793 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_ucomi
, "__builtin_ia32_ucomisdeq", IX86_BUILTIN_UCOMIEQSD
, UNEQ
, 0 },
26794 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_ucomi
, "__builtin_ia32_ucomisdlt", IX86_BUILTIN_UCOMILTSD
, UNLT
, 0 },
26795 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_ucomi
, "__builtin_ia32_ucomisdle", IX86_BUILTIN_UCOMILESD
, UNLE
, 0 },
26796 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_ucomi
, "__builtin_ia32_ucomisdgt", IX86_BUILTIN_UCOMIGTSD
, GT
, 0 },
26797 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_ucomi
, "__builtin_ia32_ucomisdge", IX86_BUILTIN_UCOMIGESD
, GE
, 0 },
26798 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_ucomi
, "__builtin_ia32_ucomisdneq", IX86_BUILTIN_UCOMINEQSD
, LTGT
, 0 },
26801 static const struct builtin_description bdesc_pcmpestr
[] =
26804 { OPTION_MASK_ISA_SSE4_2
, CODE_FOR_sse4_2_pcmpestr
, "__builtin_ia32_pcmpestri128", IX86_BUILTIN_PCMPESTRI128
, UNKNOWN
, 0 },
26805 { OPTION_MASK_ISA_SSE4_2
, CODE_FOR_sse4_2_pcmpestr
, "__builtin_ia32_pcmpestrm128", IX86_BUILTIN_PCMPESTRM128
, UNKNOWN
, 0 },
26806 { OPTION_MASK_ISA_SSE4_2
, CODE_FOR_sse4_2_pcmpestr
, "__builtin_ia32_pcmpestria128", IX86_BUILTIN_PCMPESTRA128
, UNKNOWN
, (int) CCAmode
},
26807 { OPTION_MASK_ISA_SSE4_2
, CODE_FOR_sse4_2_pcmpestr
, "__builtin_ia32_pcmpestric128", IX86_BUILTIN_PCMPESTRC128
, UNKNOWN
, (int) CCCmode
},
26808 { OPTION_MASK_ISA_SSE4_2
, CODE_FOR_sse4_2_pcmpestr
, "__builtin_ia32_pcmpestrio128", IX86_BUILTIN_PCMPESTRO128
, UNKNOWN
, (int) CCOmode
},
26809 { OPTION_MASK_ISA_SSE4_2
, CODE_FOR_sse4_2_pcmpestr
, "__builtin_ia32_pcmpestris128", IX86_BUILTIN_PCMPESTRS128
, UNKNOWN
, (int) CCSmode
},
26810 { OPTION_MASK_ISA_SSE4_2
, CODE_FOR_sse4_2_pcmpestr
, "__builtin_ia32_pcmpestriz128", IX86_BUILTIN_PCMPESTRZ128
, UNKNOWN
, (int) CCZmode
},
26813 static const struct builtin_description bdesc_pcmpistr
[] =
26816 { OPTION_MASK_ISA_SSE4_2
, CODE_FOR_sse4_2_pcmpistr
, "__builtin_ia32_pcmpistri128", IX86_BUILTIN_PCMPISTRI128
, UNKNOWN
, 0 },
26817 { OPTION_MASK_ISA_SSE4_2
, CODE_FOR_sse4_2_pcmpistr
, "__builtin_ia32_pcmpistrm128", IX86_BUILTIN_PCMPISTRM128
, UNKNOWN
, 0 },
26818 { OPTION_MASK_ISA_SSE4_2
, CODE_FOR_sse4_2_pcmpistr
, "__builtin_ia32_pcmpistria128", IX86_BUILTIN_PCMPISTRA128
, UNKNOWN
, (int) CCAmode
},
26819 { OPTION_MASK_ISA_SSE4_2
, CODE_FOR_sse4_2_pcmpistr
, "__builtin_ia32_pcmpistric128", IX86_BUILTIN_PCMPISTRC128
, UNKNOWN
, (int) CCCmode
},
26820 { OPTION_MASK_ISA_SSE4_2
, CODE_FOR_sse4_2_pcmpistr
, "__builtin_ia32_pcmpistrio128", IX86_BUILTIN_PCMPISTRO128
, UNKNOWN
, (int) CCOmode
},
26821 { OPTION_MASK_ISA_SSE4_2
, CODE_FOR_sse4_2_pcmpistr
, "__builtin_ia32_pcmpistris128", IX86_BUILTIN_PCMPISTRS128
, UNKNOWN
, (int) CCSmode
},
26822 { OPTION_MASK_ISA_SSE4_2
, CODE_FOR_sse4_2_pcmpistr
, "__builtin_ia32_pcmpistriz128", IX86_BUILTIN_PCMPISTRZ128
, UNKNOWN
, (int) CCZmode
},
26825 /* Special builtins with variable number of arguments. */
26826 static const struct builtin_description bdesc_special_args
[] =
26828 { ~OPTION_MASK_ISA_64BIT
, CODE_FOR_nothing
, "__builtin_ia32_rdtsc", IX86_BUILTIN_RDTSC
, UNKNOWN
, (int) UINT64_FTYPE_VOID
},
26829 { ~OPTION_MASK_ISA_64BIT
, CODE_FOR_nothing
, "__builtin_ia32_rdtscp", IX86_BUILTIN_RDTSCP
, UNKNOWN
, (int) UINT64_FTYPE_PUNSIGNED
},
26830 { ~OPTION_MASK_ISA_64BIT
, CODE_FOR_pause
, "__builtin_ia32_pause", IX86_BUILTIN_PAUSE
, UNKNOWN
, (int) VOID_FTYPE_VOID
},
26833 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_emms
, "__builtin_ia32_emms", IX86_BUILTIN_EMMS
, UNKNOWN
, (int) VOID_FTYPE_VOID
},
26836 { OPTION_MASK_ISA_3DNOW
, CODE_FOR_mmx_femms
, "__builtin_ia32_femms", IX86_BUILTIN_FEMMS
, UNKNOWN
, (int) VOID_FTYPE_VOID
},
26838 /* FXSR, XSAVE and XSAVEOPT */
26839 { OPTION_MASK_ISA_FXSR
, CODE_FOR_nothing
, "__builtin_ia32_fxsave", IX86_BUILTIN_FXSAVE
, UNKNOWN
, (int) VOID_FTYPE_PVOID
},
26840 { OPTION_MASK_ISA_FXSR
, CODE_FOR_nothing
, "__builtin_ia32_fxrstor", IX86_BUILTIN_FXRSTOR
, UNKNOWN
, (int) VOID_FTYPE_PVOID
},
26841 { OPTION_MASK_ISA_XSAVE
, CODE_FOR_nothing
, "__builtin_ia32_xsave", IX86_BUILTIN_XSAVE
, UNKNOWN
, (int) VOID_FTYPE_PVOID_INT64
},
26842 { OPTION_MASK_ISA_XSAVE
, CODE_FOR_nothing
, "__builtin_ia32_xrstor", IX86_BUILTIN_XRSTOR
, UNKNOWN
, (int) VOID_FTYPE_PVOID_INT64
},
26843 { OPTION_MASK_ISA_XSAVEOPT
, CODE_FOR_nothing
, "__builtin_ia32_xsaveopt", IX86_BUILTIN_XSAVEOPT
, UNKNOWN
, (int) VOID_FTYPE_PVOID_INT64
},
26845 { OPTION_MASK_ISA_FXSR
| OPTION_MASK_ISA_64BIT
, CODE_FOR_nothing
, "__builtin_ia32_fxsave64", IX86_BUILTIN_FXSAVE64
, UNKNOWN
, (int) VOID_FTYPE_PVOID
},
26846 { OPTION_MASK_ISA_FXSR
| OPTION_MASK_ISA_64BIT
, CODE_FOR_nothing
, "__builtin_ia32_fxrstor64", IX86_BUILTIN_FXRSTOR64
, UNKNOWN
, (int) VOID_FTYPE_PVOID
},
26847 { OPTION_MASK_ISA_XSAVE
| OPTION_MASK_ISA_64BIT
, CODE_FOR_nothing
, "__builtin_ia32_xsave64", IX86_BUILTIN_XSAVE64
, UNKNOWN
, (int) VOID_FTYPE_PVOID_INT64
},
26848 { OPTION_MASK_ISA_XSAVE
| OPTION_MASK_ISA_64BIT
, CODE_FOR_nothing
, "__builtin_ia32_xrstor64", IX86_BUILTIN_XRSTOR64
, UNKNOWN
, (int) VOID_FTYPE_PVOID_INT64
},
26849 { OPTION_MASK_ISA_XSAVEOPT
| OPTION_MASK_ISA_64BIT
, CODE_FOR_nothing
, "__builtin_ia32_xsaveopt64", IX86_BUILTIN_XSAVEOPT64
, UNKNOWN
, (int) VOID_FTYPE_PVOID_INT64
},
26852 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_storeups
, "__builtin_ia32_storeups", IX86_BUILTIN_STOREUPS
, UNKNOWN
, (int) VOID_FTYPE_PFLOAT_V4SF
},
26853 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_movntv4sf
, "__builtin_ia32_movntps", IX86_BUILTIN_MOVNTPS
, UNKNOWN
, (int) VOID_FTYPE_PFLOAT_V4SF
},
26854 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_loadups
, "__builtin_ia32_loadups", IX86_BUILTIN_LOADUPS
, UNKNOWN
, (int) V4SF_FTYPE_PCFLOAT
},
26856 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_loadhps_exp
, "__builtin_ia32_loadhps", IX86_BUILTIN_LOADHPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_PCV2SF
},
26857 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_loadlps_exp
, "__builtin_ia32_loadlps", IX86_BUILTIN_LOADLPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_PCV2SF
},
26858 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_storehps
, "__builtin_ia32_storehps", IX86_BUILTIN_STOREHPS
, UNKNOWN
, (int) VOID_FTYPE_PV2SF_V4SF
},
26859 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_storelps
, "__builtin_ia32_storelps", IX86_BUILTIN_STORELPS
, UNKNOWN
, (int) VOID_FTYPE_PV2SF_V4SF
},
26861 /* SSE or 3DNow!A */
26862 { OPTION_MASK_ISA_SSE
| OPTION_MASK_ISA_3DNOW_A
, CODE_FOR_sse_sfence
, "__builtin_ia32_sfence", IX86_BUILTIN_SFENCE
, UNKNOWN
, (int) VOID_FTYPE_VOID
},
26863 { OPTION_MASK_ISA_SSE
| OPTION_MASK_ISA_3DNOW_A
, CODE_FOR_sse_movntq
, "__builtin_ia32_movntq", IX86_BUILTIN_MOVNTQ
, UNKNOWN
, (int) VOID_FTYPE_PULONGLONG_ULONGLONG
},
26866 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_lfence
, "__builtin_ia32_lfence", IX86_BUILTIN_LFENCE
, UNKNOWN
, (int) VOID_FTYPE_VOID
},
26867 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_mfence
, 0, IX86_BUILTIN_MFENCE
, UNKNOWN
, (int) VOID_FTYPE_VOID
},
26868 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_storeupd
, "__builtin_ia32_storeupd", IX86_BUILTIN_STOREUPD
, UNKNOWN
, (int) VOID_FTYPE_PDOUBLE_V2DF
},
26869 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_storedqu
, "__builtin_ia32_storedqu", IX86_BUILTIN_STOREDQU
, UNKNOWN
, (int) VOID_FTYPE_PCHAR_V16QI
},
26870 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_movntv2df
, "__builtin_ia32_movntpd", IX86_BUILTIN_MOVNTPD
, UNKNOWN
, (int) VOID_FTYPE_PDOUBLE_V2DF
},
26871 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_movntv2di
, "__builtin_ia32_movntdq", IX86_BUILTIN_MOVNTDQ
, UNKNOWN
, (int) VOID_FTYPE_PV2DI_V2DI
},
26872 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_movntisi
, "__builtin_ia32_movnti", IX86_BUILTIN_MOVNTI
, UNKNOWN
, (int) VOID_FTYPE_PINT_INT
},
26873 { OPTION_MASK_ISA_SSE2
| OPTION_MASK_ISA_64BIT
, CODE_FOR_sse2_movntidi
, "__builtin_ia32_movnti64", IX86_BUILTIN_MOVNTI64
, UNKNOWN
, (int) VOID_FTYPE_PLONGLONG_LONGLONG
},
26874 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_loadupd
, "__builtin_ia32_loadupd", IX86_BUILTIN_LOADUPD
, UNKNOWN
, (int) V2DF_FTYPE_PCDOUBLE
},
26875 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_loaddqu
, "__builtin_ia32_loaddqu", IX86_BUILTIN_LOADDQU
, UNKNOWN
, (int) V16QI_FTYPE_PCCHAR
},
26877 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_loadhpd_exp
, "__builtin_ia32_loadhpd", IX86_BUILTIN_LOADHPD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_PCDOUBLE
},
26878 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_loadlpd_exp
, "__builtin_ia32_loadlpd", IX86_BUILTIN_LOADLPD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_PCDOUBLE
},
26881 { OPTION_MASK_ISA_SSE3
, CODE_FOR_sse3_lddqu
, "__builtin_ia32_lddqu", IX86_BUILTIN_LDDQU
, UNKNOWN
, (int) V16QI_FTYPE_PCCHAR
},
26884 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_sse4_1_movntdqa
, "__builtin_ia32_movntdqa", IX86_BUILTIN_MOVNTDQA
, UNKNOWN
, (int) V2DI_FTYPE_PV2DI
},
26887 { OPTION_MASK_ISA_SSE4A
, CODE_FOR_sse4a_vmmovntv2df
, "__builtin_ia32_movntsd", IX86_BUILTIN_MOVNTSD
, UNKNOWN
, (int) VOID_FTYPE_PDOUBLE_V2DF
},
26888 { OPTION_MASK_ISA_SSE4A
, CODE_FOR_sse4a_vmmovntv4sf
, "__builtin_ia32_movntss", IX86_BUILTIN_MOVNTSS
, UNKNOWN
, (int) VOID_FTYPE_PFLOAT_V4SF
},
26891 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vzeroall
, "__builtin_ia32_vzeroall", IX86_BUILTIN_VZEROALL
, UNKNOWN
, (int) VOID_FTYPE_VOID
},
26892 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vzeroupper
, "__builtin_ia32_vzeroupper", IX86_BUILTIN_VZEROUPPER
, UNKNOWN
, (int) VOID_FTYPE_VOID
},
26894 { OPTION_MASK_ISA_AVX
, CODE_FOR_vec_dupv4sf
, "__builtin_ia32_vbroadcastss", IX86_BUILTIN_VBROADCASTSS
, UNKNOWN
, (int) V4SF_FTYPE_PCFLOAT
},
26895 { OPTION_MASK_ISA_AVX
, CODE_FOR_vec_dupv4df
, "__builtin_ia32_vbroadcastsd256", IX86_BUILTIN_VBROADCASTSD256
, UNKNOWN
, (int) V4DF_FTYPE_PCDOUBLE
},
26896 { OPTION_MASK_ISA_AVX
, CODE_FOR_vec_dupv8sf
, "__builtin_ia32_vbroadcastss256", IX86_BUILTIN_VBROADCASTSS256
, UNKNOWN
, (int) V8SF_FTYPE_PCFLOAT
},
26897 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vbroadcastf128_v4df
, "__builtin_ia32_vbroadcastf128_pd256", IX86_BUILTIN_VBROADCASTPD256
, UNKNOWN
, (int) V4DF_FTYPE_PCV2DF
},
26898 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vbroadcastf128_v8sf
, "__builtin_ia32_vbroadcastf128_ps256", IX86_BUILTIN_VBROADCASTPS256
, UNKNOWN
, (int) V8SF_FTYPE_PCV4SF
},
26900 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_loadupd256
, "__builtin_ia32_loadupd256", IX86_BUILTIN_LOADUPD256
, UNKNOWN
, (int) V4DF_FTYPE_PCDOUBLE
},
26901 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_loadups256
, "__builtin_ia32_loadups256", IX86_BUILTIN_LOADUPS256
, UNKNOWN
, (int) V8SF_FTYPE_PCFLOAT
},
26902 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_storeupd256
, "__builtin_ia32_storeupd256", IX86_BUILTIN_STOREUPD256
, UNKNOWN
, (int) VOID_FTYPE_PDOUBLE_V4DF
},
26903 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_storeups256
, "__builtin_ia32_storeups256", IX86_BUILTIN_STOREUPS256
, UNKNOWN
, (int) VOID_FTYPE_PFLOAT_V8SF
},
26904 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_loaddqu256
, "__builtin_ia32_loaddqu256", IX86_BUILTIN_LOADDQU256
, UNKNOWN
, (int) V32QI_FTYPE_PCCHAR
},
26905 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_storedqu256
, "__builtin_ia32_storedqu256", IX86_BUILTIN_STOREDQU256
, UNKNOWN
, (int) VOID_FTYPE_PCHAR_V32QI
},
26906 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_lddqu256
, "__builtin_ia32_lddqu256", IX86_BUILTIN_LDDQU256
, UNKNOWN
, (int) V32QI_FTYPE_PCCHAR
},
26908 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_movntv4di
, "__builtin_ia32_movntdq256", IX86_BUILTIN_MOVNTDQ256
, UNKNOWN
, (int) VOID_FTYPE_PV4DI_V4DI
},
26909 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_movntv4df
, "__builtin_ia32_movntpd256", IX86_BUILTIN_MOVNTPD256
, UNKNOWN
, (int) VOID_FTYPE_PDOUBLE_V4DF
},
26910 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_movntv8sf
, "__builtin_ia32_movntps256", IX86_BUILTIN_MOVNTPS256
, UNKNOWN
, (int) VOID_FTYPE_PFLOAT_V8SF
},
26912 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_maskloadpd
, "__builtin_ia32_maskloadpd", IX86_BUILTIN_MASKLOADPD
, UNKNOWN
, (int) V2DF_FTYPE_PCV2DF_V2DI
},
26913 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_maskloadps
, "__builtin_ia32_maskloadps", IX86_BUILTIN_MASKLOADPS
, UNKNOWN
, (int) V4SF_FTYPE_PCV4SF_V4SI
},
26914 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_maskloadpd256
, "__builtin_ia32_maskloadpd256", IX86_BUILTIN_MASKLOADPD256
, UNKNOWN
, (int) V4DF_FTYPE_PCV4DF_V4DI
},
26915 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_maskloadps256
, "__builtin_ia32_maskloadps256", IX86_BUILTIN_MASKLOADPS256
, UNKNOWN
, (int) V8SF_FTYPE_PCV8SF_V8SI
},
26916 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_maskstorepd
, "__builtin_ia32_maskstorepd", IX86_BUILTIN_MASKSTOREPD
, UNKNOWN
, (int) VOID_FTYPE_PV2DF_V2DI_V2DF
},
26917 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_maskstoreps
, "__builtin_ia32_maskstoreps", IX86_BUILTIN_MASKSTOREPS
, UNKNOWN
, (int) VOID_FTYPE_PV4SF_V4SI_V4SF
},
26918 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_maskstorepd256
, "__builtin_ia32_maskstorepd256", IX86_BUILTIN_MASKSTOREPD256
, UNKNOWN
, (int) VOID_FTYPE_PV4DF_V4DI_V4DF
},
26919 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_maskstoreps256
, "__builtin_ia32_maskstoreps256", IX86_BUILTIN_MASKSTOREPS256
, UNKNOWN
, (int) VOID_FTYPE_PV8SF_V8SI_V8SF
},
26922 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_movntdqa
, "__builtin_ia32_movntdqa256", IX86_BUILTIN_MOVNTDQA256
, UNKNOWN
, (int) V4DI_FTYPE_PV4DI
},
26923 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_maskloadd
, "__builtin_ia32_maskloadd", IX86_BUILTIN_MASKLOADD
, UNKNOWN
, (int) V4SI_FTYPE_PCV4SI_V4SI
},
26924 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_maskloadq
, "__builtin_ia32_maskloadq", IX86_BUILTIN_MASKLOADQ
, UNKNOWN
, (int) V2DI_FTYPE_PCV2DI_V2DI
},
26925 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_maskloadd256
, "__builtin_ia32_maskloadd256", IX86_BUILTIN_MASKLOADD256
, UNKNOWN
, (int) V8SI_FTYPE_PCV8SI_V8SI
},
26926 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_maskloadq256
, "__builtin_ia32_maskloadq256", IX86_BUILTIN_MASKLOADQ256
, UNKNOWN
, (int) V4DI_FTYPE_PCV4DI_V4DI
},
26927 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_maskstored
, "__builtin_ia32_maskstored", IX86_BUILTIN_MASKSTORED
, UNKNOWN
, (int) VOID_FTYPE_PV4SI_V4SI_V4SI
},
26928 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_maskstoreq
, "__builtin_ia32_maskstoreq", IX86_BUILTIN_MASKSTOREQ
, UNKNOWN
, (int) VOID_FTYPE_PV2DI_V2DI_V2DI
},
26929 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_maskstored256
, "__builtin_ia32_maskstored256", IX86_BUILTIN_MASKSTORED256
, UNKNOWN
, (int) VOID_FTYPE_PV8SI_V8SI_V8SI
},
26930 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_maskstoreq256
, "__builtin_ia32_maskstoreq256", IX86_BUILTIN_MASKSTOREQ256
, UNKNOWN
, (int) VOID_FTYPE_PV4DI_V4DI_V4DI
},
26932 { OPTION_MASK_ISA_LWP
, CODE_FOR_lwp_llwpcb
, "__builtin_ia32_llwpcb", IX86_BUILTIN_LLWPCB
, UNKNOWN
, (int) VOID_FTYPE_PVOID
},
26933 { OPTION_MASK_ISA_LWP
, CODE_FOR_lwp_slwpcb
, "__builtin_ia32_slwpcb", IX86_BUILTIN_SLWPCB
, UNKNOWN
, (int) PVOID_FTYPE_VOID
},
26934 { OPTION_MASK_ISA_LWP
, CODE_FOR_lwp_lwpvalsi3
, "__builtin_ia32_lwpval32", IX86_BUILTIN_LWPVAL32
, UNKNOWN
, (int) VOID_FTYPE_UINT_UINT_UINT
},
26935 { OPTION_MASK_ISA_LWP
, CODE_FOR_lwp_lwpvaldi3
, "__builtin_ia32_lwpval64", IX86_BUILTIN_LWPVAL64
, UNKNOWN
, (int) VOID_FTYPE_UINT64_UINT_UINT
},
26936 { OPTION_MASK_ISA_LWP
, CODE_FOR_lwp_lwpinssi3
, "__builtin_ia32_lwpins32", IX86_BUILTIN_LWPINS32
, UNKNOWN
, (int) UCHAR_FTYPE_UINT_UINT_UINT
},
26937 { OPTION_MASK_ISA_LWP
, CODE_FOR_lwp_lwpinsdi3
, "__builtin_ia32_lwpins64", IX86_BUILTIN_LWPINS64
, UNKNOWN
, (int) UCHAR_FTYPE_UINT64_UINT_UINT
},
26940 { OPTION_MASK_ISA_FSGSBASE
| OPTION_MASK_ISA_64BIT
, CODE_FOR_rdfsbasesi
, "__builtin_ia32_rdfsbase32", IX86_BUILTIN_RDFSBASE32
, UNKNOWN
, (int) UNSIGNED_FTYPE_VOID
},
26941 { OPTION_MASK_ISA_FSGSBASE
| OPTION_MASK_ISA_64BIT
, CODE_FOR_rdfsbasedi
, "__builtin_ia32_rdfsbase64", IX86_BUILTIN_RDFSBASE64
, UNKNOWN
, (int) UINT64_FTYPE_VOID
},
26942 { OPTION_MASK_ISA_FSGSBASE
| OPTION_MASK_ISA_64BIT
, CODE_FOR_rdgsbasesi
, "__builtin_ia32_rdgsbase32", IX86_BUILTIN_RDGSBASE32
, UNKNOWN
, (int) UNSIGNED_FTYPE_VOID
},
26943 { OPTION_MASK_ISA_FSGSBASE
| OPTION_MASK_ISA_64BIT
, CODE_FOR_rdgsbasedi
, "__builtin_ia32_rdgsbase64", IX86_BUILTIN_RDGSBASE64
, UNKNOWN
, (int) UINT64_FTYPE_VOID
},
26944 { OPTION_MASK_ISA_FSGSBASE
| OPTION_MASK_ISA_64BIT
, CODE_FOR_wrfsbasesi
, "__builtin_ia32_wrfsbase32", IX86_BUILTIN_WRFSBASE32
, UNKNOWN
, (int) VOID_FTYPE_UNSIGNED
},
26945 { OPTION_MASK_ISA_FSGSBASE
| OPTION_MASK_ISA_64BIT
, CODE_FOR_wrfsbasedi
, "__builtin_ia32_wrfsbase64", IX86_BUILTIN_WRFSBASE64
, UNKNOWN
, (int) VOID_FTYPE_UINT64
},
26946 { OPTION_MASK_ISA_FSGSBASE
| OPTION_MASK_ISA_64BIT
, CODE_FOR_wrgsbasesi
, "__builtin_ia32_wrgsbase32", IX86_BUILTIN_WRGSBASE32
, UNKNOWN
, (int) VOID_FTYPE_UNSIGNED
},
26947 { OPTION_MASK_ISA_FSGSBASE
| OPTION_MASK_ISA_64BIT
, CODE_FOR_wrgsbasedi
, "__builtin_ia32_wrgsbase64", IX86_BUILTIN_WRGSBASE64
, UNKNOWN
, (int) VOID_FTYPE_UINT64
},
26950 { OPTION_MASK_ISA_RTM
, CODE_FOR_xbegin
, "__builtin_ia32_xbegin", IX86_BUILTIN_XBEGIN
, UNKNOWN
, (int) UNSIGNED_FTYPE_VOID
},
26951 { OPTION_MASK_ISA_RTM
, CODE_FOR_xend
, "__builtin_ia32_xend", IX86_BUILTIN_XEND
, UNKNOWN
, (int) VOID_FTYPE_VOID
},
26952 { OPTION_MASK_ISA_RTM
, CODE_FOR_xtest
, "__builtin_ia32_xtest", IX86_BUILTIN_XTEST
, UNKNOWN
, (int) INT_FTYPE_VOID
},
26955 /* Builtins with variable number of arguments. */
26956 static const struct builtin_description bdesc_args
[] =
26958 { ~OPTION_MASK_ISA_64BIT
, CODE_FOR_bsr
, "__builtin_ia32_bsrsi", IX86_BUILTIN_BSRSI
, UNKNOWN
, (int) INT_FTYPE_INT
},
26959 { OPTION_MASK_ISA_64BIT
, CODE_FOR_bsr_rex64
, "__builtin_ia32_bsrdi", IX86_BUILTIN_BSRDI
, UNKNOWN
, (int) INT64_FTYPE_INT64
},
26960 { ~OPTION_MASK_ISA_64BIT
, CODE_FOR_nothing
, "__builtin_ia32_rdpmc", IX86_BUILTIN_RDPMC
, UNKNOWN
, (int) UINT64_FTYPE_INT
},
26961 { ~OPTION_MASK_ISA_64BIT
, CODE_FOR_rotlqi3
, "__builtin_ia32_rolqi", IX86_BUILTIN_ROLQI
, UNKNOWN
, (int) UINT8_FTYPE_UINT8_INT
},
26962 { ~OPTION_MASK_ISA_64BIT
, CODE_FOR_rotlhi3
, "__builtin_ia32_rolhi", IX86_BUILTIN_ROLHI
, UNKNOWN
, (int) UINT16_FTYPE_UINT16_INT
},
26963 { ~OPTION_MASK_ISA_64BIT
, CODE_FOR_rotrqi3
, "__builtin_ia32_rorqi", IX86_BUILTIN_RORQI
, UNKNOWN
, (int) UINT8_FTYPE_UINT8_INT
},
26964 { ~OPTION_MASK_ISA_64BIT
, CODE_FOR_rotrhi3
, "__builtin_ia32_rorhi", IX86_BUILTIN_RORHI
, UNKNOWN
, (int) UINT16_FTYPE_UINT16_INT
},
26967 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_addv8qi3
, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB
, UNKNOWN
, (int) V8QI_FTYPE_V8QI_V8QI
},
26968 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_addv4hi3
, "__builtin_ia32_paddw", IX86_BUILTIN_PADDW
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_V4HI
},
26969 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_addv2si3
, "__builtin_ia32_paddd", IX86_BUILTIN_PADDD
, UNKNOWN
, (int) V2SI_FTYPE_V2SI_V2SI
},
26970 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_subv8qi3
, "__builtin_ia32_psubb", IX86_BUILTIN_PSUBB
, UNKNOWN
, (int) V8QI_FTYPE_V8QI_V8QI
},
26971 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_subv4hi3
, "__builtin_ia32_psubw", IX86_BUILTIN_PSUBW
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_V4HI
},
26972 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_subv2si3
, "__builtin_ia32_psubd", IX86_BUILTIN_PSUBD
, UNKNOWN
, (int) V2SI_FTYPE_V2SI_V2SI
},
26974 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_ssaddv8qi3
, "__builtin_ia32_paddsb", IX86_BUILTIN_PADDSB
, UNKNOWN
, (int) V8QI_FTYPE_V8QI_V8QI
},
26975 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_ssaddv4hi3
, "__builtin_ia32_paddsw", IX86_BUILTIN_PADDSW
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_V4HI
},
26976 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_sssubv8qi3
, "__builtin_ia32_psubsb", IX86_BUILTIN_PSUBSB
, UNKNOWN
, (int) V8QI_FTYPE_V8QI_V8QI
},
26977 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_sssubv4hi3
, "__builtin_ia32_psubsw", IX86_BUILTIN_PSUBSW
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_V4HI
},
26978 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_usaddv8qi3
, "__builtin_ia32_paddusb", IX86_BUILTIN_PADDUSB
, UNKNOWN
, (int) V8QI_FTYPE_V8QI_V8QI
},
26979 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_usaddv4hi3
, "__builtin_ia32_paddusw", IX86_BUILTIN_PADDUSW
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_V4HI
},
26980 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_ussubv8qi3
, "__builtin_ia32_psubusb", IX86_BUILTIN_PSUBUSB
, UNKNOWN
, (int) V8QI_FTYPE_V8QI_V8QI
},
26981 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_ussubv4hi3
, "__builtin_ia32_psubusw", IX86_BUILTIN_PSUBUSW
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_V4HI
},
26983 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_mulv4hi3
, "__builtin_ia32_pmullw", IX86_BUILTIN_PMULLW
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_V4HI
},
26984 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_smulv4hi3_highpart
, "__builtin_ia32_pmulhw", IX86_BUILTIN_PMULHW
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_V4HI
},
26986 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_andv2si3
, "__builtin_ia32_pand", IX86_BUILTIN_PAND
, UNKNOWN
, (int) V2SI_FTYPE_V2SI_V2SI
},
26987 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_andnotv2si3
, "__builtin_ia32_pandn", IX86_BUILTIN_PANDN
, UNKNOWN
, (int) V2SI_FTYPE_V2SI_V2SI
},
26988 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_iorv2si3
, "__builtin_ia32_por", IX86_BUILTIN_POR
, UNKNOWN
, (int) V2SI_FTYPE_V2SI_V2SI
},
26989 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_xorv2si3
, "__builtin_ia32_pxor", IX86_BUILTIN_PXOR
, UNKNOWN
, (int) V2SI_FTYPE_V2SI_V2SI
},
26991 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_eqv8qi3
, "__builtin_ia32_pcmpeqb", IX86_BUILTIN_PCMPEQB
, UNKNOWN
, (int) V8QI_FTYPE_V8QI_V8QI
},
26992 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_eqv4hi3
, "__builtin_ia32_pcmpeqw", IX86_BUILTIN_PCMPEQW
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_V4HI
},
26993 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_eqv2si3
, "__builtin_ia32_pcmpeqd", IX86_BUILTIN_PCMPEQD
, UNKNOWN
, (int) V2SI_FTYPE_V2SI_V2SI
},
26994 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_gtv8qi3
, "__builtin_ia32_pcmpgtb", IX86_BUILTIN_PCMPGTB
, UNKNOWN
, (int) V8QI_FTYPE_V8QI_V8QI
},
26995 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_gtv4hi3
, "__builtin_ia32_pcmpgtw", IX86_BUILTIN_PCMPGTW
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_V4HI
},
26996 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_gtv2si3
, "__builtin_ia32_pcmpgtd", IX86_BUILTIN_PCMPGTD
, UNKNOWN
, (int) V2SI_FTYPE_V2SI_V2SI
},
26998 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_punpckhbw
, "__builtin_ia32_punpckhbw", IX86_BUILTIN_PUNPCKHBW
, UNKNOWN
, (int) V8QI_FTYPE_V8QI_V8QI
},
26999 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_punpckhwd
, "__builtin_ia32_punpckhwd", IX86_BUILTIN_PUNPCKHWD
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_V4HI
},
27000 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_punpckhdq
, "__builtin_ia32_punpckhdq", IX86_BUILTIN_PUNPCKHDQ
, UNKNOWN
, (int) V2SI_FTYPE_V2SI_V2SI
},
27001 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_punpcklbw
, "__builtin_ia32_punpcklbw", IX86_BUILTIN_PUNPCKLBW
, UNKNOWN
, (int) V8QI_FTYPE_V8QI_V8QI
},
27002 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_punpcklwd
, "__builtin_ia32_punpcklwd", IX86_BUILTIN_PUNPCKLWD
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_V4HI
},
27003 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_punpckldq
, "__builtin_ia32_punpckldq", IX86_BUILTIN_PUNPCKLDQ
, UNKNOWN
, (int) V2SI_FTYPE_V2SI_V2SI
},
27005 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_packsswb
, "__builtin_ia32_packsswb", IX86_BUILTIN_PACKSSWB
, UNKNOWN
, (int) V8QI_FTYPE_V4HI_V4HI
},
27006 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_packssdw
, "__builtin_ia32_packssdw", IX86_BUILTIN_PACKSSDW
, UNKNOWN
, (int) V4HI_FTYPE_V2SI_V2SI
},
27007 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_packuswb
, "__builtin_ia32_packuswb", IX86_BUILTIN_PACKUSWB
, UNKNOWN
, (int) V8QI_FTYPE_V4HI_V4HI
},
27009 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_pmaddwd
, "__builtin_ia32_pmaddwd", IX86_BUILTIN_PMADDWD
, UNKNOWN
, (int) V2SI_FTYPE_V4HI_V4HI
},
27011 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_ashlv4hi3
, "__builtin_ia32_psllwi", IX86_BUILTIN_PSLLWI
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_SI_COUNT
},
27012 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_ashlv2si3
, "__builtin_ia32_pslldi", IX86_BUILTIN_PSLLDI
, UNKNOWN
, (int) V2SI_FTYPE_V2SI_SI_COUNT
},
27013 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_ashlv1di3
, "__builtin_ia32_psllqi", IX86_BUILTIN_PSLLQI
, UNKNOWN
, (int) V1DI_FTYPE_V1DI_SI_COUNT
},
27014 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_ashlv4hi3
, "__builtin_ia32_psllw", IX86_BUILTIN_PSLLW
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_V4HI_COUNT
},
27015 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_ashlv2si3
, "__builtin_ia32_pslld", IX86_BUILTIN_PSLLD
, UNKNOWN
, (int) V2SI_FTYPE_V2SI_V2SI_COUNT
},
27016 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_ashlv1di3
, "__builtin_ia32_psllq", IX86_BUILTIN_PSLLQ
, UNKNOWN
, (int) V1DI_FTYPE_V1DI_V1DI_COUNT
},
27018 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_lshrv4hi3
, "__builtin_ia32_psrlwi", IX86_BUILTIN_PSRLWI
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_SI_COUNT
},
27019 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_lshrv2si3
, "__builtin_ia32_psrldi", IX86_BUILTIN_PSRLDI
, UNKNOWN
, (int) V2SI_FTYPE_V2SI_SI_COUNT
},
27020 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_lshrv1di3
, "__builtin_ia32_psrlqi", IX86_BUILTIN_PSRLQI
, UNKNOWN
, (int) V1DI_FTYPE_V1DI_SI_COUNT
},
27021 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_lshrv4hi3
, "__builtin_ia32_psrlw", IX86_BUILTIN_PSRLW
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_V4HI_COUNT
},
27022 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_lshrv2si3
, "__builtin_ia32_psrld", IX86_BUILTIN_PSRLD
, UNKNOWN
, (int) V2SI_FTYPE_V2SI_V2SI_COUNT
},
27023 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_lshrv1di3
, "__builtin_ia32_psrlq", IX86_BUILTIN_PSRLQ
, UNKNOWN
, (int) V1DI_FTYPE_V1DI_V1DI_COUNT
},
27025 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_ashrv4hi3
, "__builtin_ia32_psrawi", IX86_BUILTIN_PSRAWI
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_SI_COUNT
},
27026 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_ashrv2si3
, "__builtin_ia32_psradi", IX86_BUILTIN_PSRADI
, UNKNOWN
, (int) V2SI_FTYPE_V2SI_SI_COUNT
},
27027 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_ashrv4hi3
, "__builtin_ia32_psraw", IX86_BUILTIN_PSRAW
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_V4HI_COUNT
},
27028 { OPTION_MASK_ISA_MMX
, CODE_FOR_mmx_ashrv2si3
, "__builtin_ia32_psrad", IX86_BUILTIN_PSRAD
, UNKNOWN
, (int) V2SI_FTYPE_V2SI_V2SI_COUNT
},
27031 { OPTION_MASK_ISA_3DNOW
, CODE_FOR_mmx_pf2id
, "__builtin_ia32_pf2id", IX86_BUILTIN_PF2ID
, UNKNOWN
, (int) V2SI_FTYPE_V2SF
},
27032 { OPTION_MASK_ISA_3DNOW
, CODE_FOR_mmx_floatv2si2
, "__builtin_ia32_pi2fd", IX86_BUILTIN_PI2FD
, UNKNOWN
, (int) V2SF_FTYPE_V2SI
},
27033 { OPTION_MASK_ISA_3DNOW
, CODE_FOR_mmx_rcpv2sf2
, "__builtin_ia32_pfrcp", IX86_BUILTIN_PFRCP
, UNKNOWN
, (int) V2SF_FTYPE_V2SF
},
27034 { OPTION_MASK_ISA_3DNOW
, CODE_FOR_mmx_rsqrtv2sf2
, "__builtin_ia32_pfrsqrt", IX86_BUILTIN_PFRSQRT
, UNKNOWN
, (int) V2SF_FTYPE_V2SF
},
27036 { OPTION_MASK_ISA_3DNOW
, CODE_FOR_mmx_uavgv8qi3
, "__builtin_ia32_pavgusb", IX86_BUILTIN_PAVGUSB
, UNKNOWN
, (int) V8QI_FTYPE_V8QI_V8QI
},
27037 { OPTION_MASK_ISA_3DNOW
, CODE_FOR_mmx_haddv2sf3
, "__builtin_ia32_pfacc", IX86_BUILTIN_PFACC
, UNKNOWN
, (int) V2SF_FTYPE_V2SF_V2SF
},
27038 { OPTION_MASK_ISA_3DNOW
, CODE_FOR_mmx_addv2sf3
, "__builtin_ia32_pfadd", IX86_BUILTIN_PFADD
, UNKNOWN
, (int) V2SF_FTYPE_V2SF_V2SF
},
27039 { OPTION_MASK_ISA_3DNOW
, CODE_FOR_mmx_eqv2sf3
, "__builtin_ia32_pfcmpeq", IX86_BUILTIN_PFCMPEQ
, UNKNOWN
, (int) V2SI_FTYPE_V2SF_V2SF
},
27040 { OPTION_MASK_ISA_3DNOW
, CODE_FOR_mmx_gev2sf3
, "__builtin_ia32_pfcmpge", IX86_BUILTIN_PFCMPGE
, UNKNOWN
, (int) V2SI_FTYPE_V2SF_V2SF
},
27041 { OPTION_MASK_ISA_3DNOW
, CODE_FOR_mmx_gtv2sf3
, "__builtin_ia32_pfcmpgt", IX86_BUILTIN_PFCMPGT
, UNKNOWN
, (int) V2SI_FTYPE_V2SF_V2SF
},
27042 { OPTION_MASK_ISA_3DNOW
, CODE_FOR_mmx_smaxv2sf3
, "__builtin_ia32_pfmax", IX86_BUILTIN_PFMAX
, UNKNOWN
, (int) V2SF_FTYPE_V2SF_V2SF
},
27043 { OPTION_MASK_ISA_3DNOW
, CODE_FOR_mmx_sminv2sf3
, "__builtin_ia32_pfmin", IX86_BUILTIN_PFMIN
, UNKNOWN
, (int) V2SF_FTYPE_V2SF_V2SF
},
27044 { OPTION_MASK_ISA_3DNOW
, CODE_FOR_mmx_mulv2sf3
, "__builtin_ia32_pfmul", IX86_BUILTIN_PFMUL
, UNKNOWN
, (int) V2SF_FTYPE_V2SF_V2SF
},
27045 { OPTION_MASK_ISA_3DNOW
, CODE_FOR_mmx_rcpit1v2sf3
, "__builtin_ia32_pfrcpit1", IX86_BUILTIN_PFRCPIT1
, UNKNOWN
, (int) V2SF_FTYPE_V2SF_V2SF
},
27046 { OPTION_MASK_ISA_3DNOW
, CODE_FOR_mmx_rcpit2v2sf3
, "__builtin_ia32_pfrcpit2", IX86_BUILTIN_PFRCPIT2
, UNKNOWN
, (int) V2SF_FTYPE_V2SF_V2SF
},
27047 { OPTION_MASK_ISA_3DNOW
, CODE_FOR_mmx_rsqit1v2sf3
, "__builtin_ia32_pfrsqit1", IX86_BUILTIN_PFRSQIT1
, UNKNOWN
, (int) V2SF_FTYPE_V2SF_V2SF
},
27048 { OPTION_MASK_ISA_3DNOW
, CODE_FOR_mmx_subv2sf3
, "__builtin_ia32_pfsub", IX86_BUILTIN_PFSUB
, UNKNOWN
, (int) V2SF_FTYPE_V2SF_V2SF
},
27049 { OPTION_MASK_ISA_3DNOW
, CODE_FOR_mmx_subrv2sf3
, "__builtin_ia32_pfsubr", IX86_BUILTIN_PFSUBR
, UNKNOWN
, (int) V2SF_FTYPE_V2SF_V2SF
},
27050 { OPTION_MASK_ISA_3DNOW
, CODE_FOR_mmx_pmulhrwv4hi3
, "__builtin_ia32_pmulhrw", IX86_BUILTIN_PMULHRW
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_V4HI
},
27053 { OPTION_MASK_ISA_3DNOW_A
, CODE_FOR_mmx_pf2iw
, "__builtin_ia32_pf2iw", IX86_BUILTIN_PF2IW
, UNKNOWN
, (int) V2SI_FTYPE_V2SF
},
27054 { OPTION_MASK_ISA_3DNOW_A
, CODE_FOR_mmx_pi2fw
, "__builtin_ia32_pi2fw", IX86_BUILTIN_PI2FW
, UNKNOWN
, (int) V2SF_FTYPE_V2SI
},
27055 { OPTION_MASK_ISA_3DNOW_A
, CODE_FOR_mmx_pswapdv2si2
, "__builtin_ia32_pswapdsi", IX86_BUILTIN_PSWAPDSI
, UNKNOWN
, (int) V2SI_FTYPE_V2SI
},
27056 { OPTION_MASK_ISA_3DNOW_A
, CODE_FOR_mmx_pswapdv2sf2
, "__builtin_ia32_pswapdsf", IX86_BUILTIN_PSWAPDSF
, UNKNOWN
, (int) V2SF_FTYPE_V2SF
},
27057 { OPTION_MASK_ISA_3DNOW_A
, CODE_FOR_mmx_hsubv2sf3
, "__builtin_ia32_pfnacc", IX86_BUILTIN_PFNACC
, UNKNOWN
, (int) V2SF_FTYPE_V2SF_V2SF
},
27058 { OPTION_MASK_ISA_3DNOW_A
, CODE_FOR_mmx_addsubv2sf3
, "__builtin_ia32_pfpnacc", IX86_BUILTIN_PFPNACC
, UNKNOWN
, (int) V2SF_FTYPE_V2SF_V2SF
},
27061 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_movmskps
, "__builtin_ia32_movmskps", IX86_BUILTIN_MOVMSKPS
, UNKNOWN
, (int) INT_FTYPE_V4SF
},
27062 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_sqrtv4sf2
, "__builtin_ia32_sqrtps", IX86_BUILTIN_SQRTPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF
},
27063 { OPTION_MASK_ISA_SSE
, CODE_FOR_sqrtv4sf2
, "__builtin_ia32_sqrtps_nr", IX86_BUILTIN_SQRTPS_NR
, UNKNOWN
, (int) V4SF_FTYPE_V4SF
},
27064 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_rsqrtv4sf2
, "__builtin_ia32_rsqrtps", IX86_BUILTIN_RSQRTPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF
},
27065 { OPTION_MASK_ISA_SSE
, CODE_FOR_rsqrtv4sf2
, "__builtin_ia32_rsqrtps_nr", IX86_BUILTIN_RSQRTPS_NR
, UNKNOWN
, (int) V4SF_FTYPE_V4SF
},
27066 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_rcpv4sf2
, "__builtin_ia32_rcpps", IX86_BUILTIN_RCPPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF
},
27067 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_cvtps2pi
, "__builtin_ia32_cvtps2pi", IX86_BUILTIN_CVTPS2PI
, UNKNOWN
, (int) V2SI_FTYPE_V4SF
},
27068 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_cvtss2si
, "__builtin_ia32_cvtss2si", IX86_BUILTIN_CVTSS2SI
, UNKNOWN
, (int) INT_FTYPE_V4SF
},
27069 { OPTION_MASK_ISA_SSE
| OPTION_MASK_ISA_64BIT
, CODE_FOR_sse_cvtss2siq
, "__builtin_ia32_cvtss2si64", IX86_BUILTIN_CVTSS2SI64
, UNKNOWN
, (int) INT64_FTYPE_V4SF
},
27070 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_cvttps2pi
, "__builtin_ia32_cvttps2pi", IX86_BUILTIN_CVTTPS2PI
, UNKNOWN
, (int) V2SI_FTYPE_V4SF
},
27071 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_cvttss2si
, "__builtin_ia32_cvttss2si", IX86_BUILTIN_CVTTSS2SI
, UNKNOWN
, (int) INT_FTYPE_V4SF
},
27072 { OPTION_MASK_ISA_SSE
| OPTION_MASK_ISA_64BIT
, CODE_FOR_sse_cvttss2siq
, "__builtin_ia32_cvttss2si64", IX86_BUILTIN_CVTTSS2SI64
, UNKNOWN
, (int) INT64_FTYPE_V4SF
},
27074 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_shufps
, "__builtin_ia32_shufps", IX86_BUILTIN_SHUFPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF_INT
},
27076 { OPTION_MASK_ISA_SSE
, CODE_FOR_addv4sf3
, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF
},
27077 { OPTION_MASK_ISA_SSE
, CODE_FOR_subv4sf3
, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF
},
27078 { OPTION_MASK_ISA_SSE
, CODE_FOR_mulv4sf3
, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF
},
27079 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_divv4sf3
, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF
},
27080 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_vmaddv4sf3
, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF
},
27081 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_vmsubv4sf3
, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF
},
27082 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_vmmulv4sf3
, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF
},
27083 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_vmdivv4sf3
, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF
},
27085 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_maskcmpv4sf3
, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS
, EQ
, (int) V4SF_FTYPE_V4SF_V4SF
},
27086 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_maskcmpv4sf3
, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS
, LT
, (int) V4SF_FTYPE_V4SF_V4SF
},
27087 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_maskcmpv4sf3
, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS
, LE
, (int) V4SF_FTYPE_V4SF_V4SF
},
27088 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_maskcmpv4sf3
, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS
, LT
, (int) V4SF_FTYPE_V4SF_V4SF_SWAP
},
27089 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_maskcmpv4sf3
, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS
, LE
, (int) V4SF_FTYPE_V4SF_V4SF_SWAP
},
27090 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_maskcmpv4sf3
, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS
, UNORDERED
, (int) V4SF_FTYPE_V4SF_V4SF
},
27091 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_maskcmpv4sf3
, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS
, NE
, (int) V4SF_FTYPE_V4SF_V4SF
},
27092 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_maskcmpv4sf3
, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS
, UNGE
, (int) V4SF_FTYPE_V4SF_V4SF
},
27093 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_maskcmpv4sf3
, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS
, UNGT
, (int) V4SF_FTYPE_V4SF_V4SF
},
27094 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_maskcmpv4sf3
, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS
, UNGE
, (int) V4SF_FTYPE_V4SF_V4SF_SWAP
},
27095 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_maskcmpv4sf3
, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS
, UNGT
, (int) V4SF_FTYPE_V4SF_V4SF_SWAP
},
27096 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_maskcmpv4sf3
, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS
, ORDERED
, (int) V4SF_FTYPE_V4SF_V4SF
},
27097 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_vmmaskcmpv4sf3
, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS
, EQ
, (int) V4SF_FTYPE_V4SF_V4SF
},
27098 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_vmmaskcmpv4sf3
, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS
, LT
, (int) V4SF_FTYPE_V4SF_V4SF
},
27099 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_vmmaskcmpv4sf3
, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS
, LE
, (int) V4SF_FTYPE_V4SF_V4SF
},
27100 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_vmmaskcmpv4sf3
, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS
, UNORDERED
, (int) V4SF_FTYPE_V4SF_V4SF
},
27101 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_vmmaskcmpv4sf3
, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS
, NE
, (int) V4SF_FTYPE_V4SF_V4SF
},
27102 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_vmmaskcmpv4sf3
, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS
, UNGE
, (int) V4SF_FTYPE_V4SF_V4SF
},
27103 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_vmmaskcmpv4sf3
, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS
, UNGT
, (int) V4SF_FTYPE_V4SF_V4SF
},
27104 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_vmmaskcmpv4sf3
, "__builtin_ia32_cmpngtss", IX86_BUILTIN_CMPNGTSS
, UNGE
, (int) V4SF_FTYPE_V4SF_V4SF_SWAP
},
27105 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_vmmaskcmpv4sf3
, "__builtin_ia32_cmpngess", IX86_BUILTIN_CMPNGESS
, UNGT
, (int) V4SF_FTYPE_V4SF_V4SF_SWAP
},
27106 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_vmmaskcmpv4sf3
, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS
, ORDERED
, (int) V4SF_FTYPE_V4SF_V4SF
},
27108 { OPTION_MASK_ISA_SSE
, CODE_FOR_sminv4sf3
, "__builtin_ia32_minps", IX86_BUILTIN_MINPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF
},
27109 { OPTION_MASK_ISA_SSE
, CODE_FOR_smaxv4sf3
, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF
},
27110 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_vmsminv4sf3
, "__builtin_ia32_minss", IX86_BUILTIN_MINSS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF
},
27111 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_vmsmaxv4sf3
, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF
},
27113 { OPTION_MASK_ISA_SSE
, CODE_FOR_andv4sf3
, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF
},
27114 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_andnotv4sf3
, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF
},
27115 { OPTION_MASK_ISA_SSE
, CODE_FOR_iorv4sf3
, "__builtin_ia32_orps", IX86_BUILTIN_ORPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF
},
27116 { OPTION_MASK_ISA_SSE
, CODE_FOR_xorv4sf3
, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF
},
27118 { OPTION_MASK_ISA_SSE
, CODE_FOR_copysignv4sf3
, "__builtin_ia32_copysignps", IX86_BUILTIN_CPYSGNPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF
},
27120 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_movss
, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF
},
27121 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_movhlps_exp
, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF
},
27122 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_movlhps_exp
, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF
},
27123 { OPTION_MASK_ISA_SSE
, CODE_FOR_vec_interleave_highv4sf
, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF
},
27124 { OPTION_MASK_ISA_SSE
, CODE_FOR_vec_interleave_lowv4sf
, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF
},
27126 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_cvtpi2ps
, "__builtin_ia32_cvtpi2ps", IX86_BUILTIN_CVTPI2PS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V2SI
},
27127 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_cvtsi2ss
, "__builtin_ia32_cvtsi2ss", IX86_BUILTIN_CVTSI2SS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_SI
},
27128 { OPTION_MASK_ISA_SSE
| OPTION_MASK_ISA_64BIT
, CODE_FOR_sse_cvtsi2ssq
, "__builtin_ia32_cvtsi642ss", IX86_BUILTIN_CVTSI642SS
, UNKNOWN
, V4SF_FTYPE_V4SF_DI
},
27130 { OPTION_MASK_ISA_SSE
, CODE_FOR_rsqrtsf2
, "__builtin_ia32_rsqrtf", IX86_BUILTIN_RSQRTF
, UNKNOWN
, (int) FLOAT_FTYPE_FLOAT
},
27132 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_vmsqrtv4sf2
, "__builtin_ia32_sqrtss", IX86_BUILTIN_SQRTSS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_VEC_MERGE
},
27133 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_vmrsqrtv4sf2
, "__builtin_ia32_rsqrtss", IX86_BUILTIN_RSQRTSS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_VEC_MERGE
},
27134 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse_vmrcpv4sf2
, "__builtin_ia32_rcpss", IX86_BUILTIN_RCPSS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_VEC_MERGE
},
27136 { OPTION_MASK_ISA_SSE
, CODE_FOR_abstf2
, 0, IX86_BUILTIN_FABSQ
, UNKNOWN
, (int) FLOAT128_FTYPE_FLOAT128
},
27137 { OPTION_MASK_ISA_SSE
, CODE_FOR_copysigntf3
, 0, IX86_BUILTIN_COPYSIGNQ
, UNKNOWN
, (int) FLOAT128_FTYPE_FLOAT128_FLOAT128
},
27139 /* SSE MMX or 3Dnow!A */
27140 { OPTION_MASK_ISA_SSE
| OPTION_MASK_ISA_3DNOW_A
, CODE_FOR_mmx_uavgv8qi3
, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB
, UNKNOWN
, (int) V8QI_FTYPE_V8QI_V8QI
},
27141 { OPTION_MASK_ISA_SSE
| OPTION_MASK_ISA_3DNOW_A
, CODE_FOR_mmx_uavgv4hi3
, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_V4HI
},
27142 { OPTION_MASK_ISA_SSE
| OPTION_MASK_ISA_3DNOW_A
, CODE_FOR_mmx_umulv4hi3_highpart
, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_V4HI
},
27144 { OPTION_MASK_ISA_SSE
| OPTION_MASK_ISA_3DNOW_A
, CODE_FOR_mmx_umaxv8qi3
, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB
, UNKNOWN
, (int) V8QI_FTYPE_V8QI_V8QI
},
27145 { OPTION_MASK_ISA_SSE
| OPTION_MASK_ISA_3DNOW_A
, CODE_FOR_mmx_smaxv4hi3
, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_V4HI
},
27146 { OPTION_MASK_ISA_SSE
| OPTION_MASK_ISA_3DNOW_A
, CODE_FOR_mmx_uminv8qi3
, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB
, UNKNOWN
, (int) V8QI_FTYPE_V8QI_V8QI
},
27147 { OPTION_MASK_ISA_SSE
| OPTION_MASK_ISA_3DNOW_A
, CODE_FOR_mmx_sminv4hi3
, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_V4HI
},
27149 { OPTION_MASK_ISA_SSE
| OPTION_MASK_ISA_3DNOW_A
, CODE_FOR_mmx_psadbw
, "__builtin_ia32_psadbw", IX86_BUILTIN_PSADBW
, UNKNOWN
, (int) V1DI_FTYPE_V8QI_V8QI
},
27150 { OPTION_MASK_ISA_SSE
| OPTION_MASK_ISA_3DNOW_A
, CODE_FOR_mmx_pmovmskb
, "__builtin_ia32_pmovmskb", IX86_BUILTIN_PMOVMSKB
, UNKNOWN
, (int) INT_FTYPE_V8QI
},
27152 { OPTION_MASK_ISA_SSE
| OPTION_MASK_ISA_3DNOW_A
, CODE_FOR_mmx_pshufw
, "__builtin_ia32_pshufw", IX86_BUILTIN_PSHUFW
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_INT
},
27155 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_shufpd
, "__builtin_ia32_shufpd", IX86_BUILTIN_SHUFPD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF_INT
},
27157 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_movmskpd
, "__builtin_ia32_movmskpd", IX86_BUILTIN_MOVMSKPD
, UNKNOWN
, (int) INT_FTYPE_V2DF
},
27158 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_pmovmskb
, "__builtin_ia32_pmovmskb128", IX86_BUILTIN_PMOVMSKB128
, UNKNOWN
, (int) INT_FTYPE_V16QI
},
27159 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sqrtv2df2
, "__builtin_ia32_sqrtpd", IX86_BUILTIN_SQRTPD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF
},
27160 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_cvtdq2pd
, "__builtin_ia32_cvtdq2pd", IX86_BUILTIN_CVTDQ2PD
, UNKNOWN
, (int) V2DF_FTYPE_V4SI
},
27161 { OPTION_MASK_ISA_SSE2
, CODE_FOR_floatv4siv4sf2
, "__builtin_ia32_cvtdq2ps", IX86_BUILTIN_CVTDQ2PS
, UNKNOWN
, (int) V4SF_FTYPE_V4SI
},
27163 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_cvtpd2dq
, "__builtin_ia32_cvtpd2dq", IX86_BUILTIN_CVTPD2DQ
, UNKNOWN
, (int) V4SI_FTYPE_V2DF
},
27164 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_cvtpd2pi
, "__builtin_ia32_cvtpd2pi", IX86_BUILTIN_CVTPD2PI
, UNKNOWN
, (int) V2SI_FTYPE_V2DF
},
27165 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_cvtpd2ps
, "__builtin_ia32_cvtpd2ps", IX86_BUILTIN_CVTPD2PS
, UNKNOWN
, (int) V4SF_FTYPE_V2DF
},
27166 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_cvttpd2dq
, "__builtin_ia32_cvttpd2dq", IX86_BUILTIN_CVTTPD2DQ
, UNKNOWN
, (int) V4SI_FTYPE_V2DF
},
27167 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_cvttpd2pi
, "__builtin_ia32_cvttpd2pi", IX86_BUILTIN_CVTTPD2PI
, UNKNOWN
, (int) V2SI_FTYPE_V2DF
},
27169 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_cvtpi2pd
, "__builtin_ia32_cvtpi2pd", IX86_BUILTIN_CVTPI2PD
, UNKNOWN
, (int) V2DF_FTYPE_V2SI
},
27171 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_cvtsd2si
, "__builtin_ia32_cvtsd2si", IX86_BUILTIN_CVTSD2SI
, UNKNOWN
, (int) INT_FTYPE_V2DF
},
27172 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_cvttsd2si
, "__builtin_ia32_cvttsd2si", IX86_BUILTIN_CVTTSD2SI
, UNKNOWN
, (int) INT_FTYPE_V2DF
},
27173 { OPTION_MASK_ISA_SSE2
| OPTION_MASK_ISA_64BIT
, CODE_FOR_sse2_cvtsd2siq
, "__builtin_ia32_cvtsd2si64", IX86_BUILTIN_CVTSD2SI64
, UNKNOWN
, (int) INT64_FTYPE_V2DF
},
27174 { OPTION_MASK_ISA_SSE2
| OPTION_MASK_ISA_64BIT
, CODE_FOR_sse2_cvttsd2siq
, "__builtin_ia32_cvttsd2si64", IX86_BUILTIN_CVTTSD2SI64
, UNKNOWN
, (int) INT64_FTYPE_V2DF
},
27176 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_cvtps2dq
, "__builtin_ia32_cvtps2dq", IX86_BUILTIN_CVTPS2DQ
, UNKNOWN
, (int) V4SI_FTYPE_V4SF
},
27177 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_cvtps2pd
, "__builtin_ia32_cvtps2pd", IX86_BUILTIN_CVTPS2PD
, UNKNOWN
, (int) V2DF_FTYPE_V4SF
},
27178 { OPTION_MASK_ISA_SSE2
, CODE_FOR_fix_truncv4sfv4si2
, "__builtin_ia32_cvttps2dq", IX86_BUILTIN_CVTTPS2DQ
, UNKNOWN
, (int) V4SI_FTYPE_V4SF
},
27180 { OPTION_MASK_ISA_SSE2
, CODE_FOR_addv2df3
, "__builtin_ia32_addpd", IX86_BUILTIN_ADDPD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF
},
27181 { OPTION_MASK_ISA_SSE2
, CODE_FOR_subv2df3
, "__builtin_ia32_subpd", IX86_BUILTIN_SUBPD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF
},
27182 { OPTION_MASK_ISA_SSE2
, CODE_FOR_mulv2df3
, "__builtin_ia32_mulpd", IX86_BUILTIN_MULPD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF
},
27183 { OPTION_MASK_ISA_SSE2
, CODE_FOR_divv2df3
, "__builtin_ia32_divpd", IX86_BUILTIN_DIVPD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF
},
27184 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_vmaddv2df3
, "__builtin_ia32_addsd", IX86_BUILTIN_ADDSD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF
},
27185 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_vmsubv2df3
, "__builtin_ia32_subsd", IX86_BUILTIN_SUBSD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF
},
27186 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_vmmulv2df3
, "__builtin_ia32_mulsd", IX86_BUILTIN_MULSD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF
},
27187 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_vmdivv2df3
, "__builtin_ia32_divsd", IX86_BUILTIN_DIVSD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF
},
27189 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_maskcmpv2df3
, "__builtin_ia32_cmpeqpd", IX86_BUILTIN_CMPEQPD
, EQ
, (int) V2DF_FTYPE_V2DF_V2DF
},
27190 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_maskcmpv2df3
, "__builtin_ia32_cmpltpd", IX86_BUILTIN_CMPLTPD
, LT
, (int) V2DF_FTYPE_V2DF_V2DF
},
27191 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_maskcmpv2df3
, "__builtin_ia32_cmplepd", IX86_BUILTIN_CMPLEPD
, LE
, (int) V2DF_FTYPE_V2DF_V2DF
},
27192 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_maskcmpv2df3
, "__builtin_ia32_cmpgtpd", IX86_BUILTIN_CMPGTPD
, LT
, (int) V2DF_FTYPE_V2DF_V2DF_SWAP
},
27193 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_maskcmpv2df3
, "__builtin_ia32_cmpgepd", IX86_BUILTIN_CMPGEPD
, LE
, (int) V2DF_FTYPE_V2DF_V2DF_SWAP
},
27194 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_maskcmpv2df3
, "__builtin_ia32_cmpunordpd", IX86_BUILTIN_CMPUNORDPD
, UNORDERED
, (int) V2DF_FTYPE_V2DF_V2DF
},
27195 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_maskcmpv2df3
, "__builtin_ia32_cmpneqpd", IX86_BUILTIN_CMPNEQPD
, NE
, (int) V2DF_FTYPE_V2DF_V2DF
},
27196 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_maskcmpv2df3
, "__builtin_ia32_cmpnltpd", IX86_BUILTIN_CMPNLTPD
, UNGE
, (int) V2DF_FTYPE_V2DF_V2DF
},
27197 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_maskcmpv2df3
, "__builtin_ia32_cmpnlepd", IX86_BUILTIN_CMPNLEPD
, UNGT
, (int) V2DF_FTYPE_V2DF_V2DF
},
27198 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_maskcmpv2df3
, "__builtin_ia32_cmpngtpd", IX86_BUILTIN_CMPNGTPD
, UNGE
, (int) V2DF_FTYPE_V2DF_V2DF_SWAP
},
27199 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_maskcmpv2df3
, "__builtin_ia32_cmpngepd", IX86_BUILTIN_CMPNGEPD
, UNGT
, (int) V2DF_FTYPE_V2DF_V2DF_SWAP
},
27200 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_maskcmpv2df3
, "__builtin_ia32_cmpordpd", IX86_BUILTIN_CMPORDPD
, ORDERED
, (int) V2DF_FTYPE_V2DF_V2DF
},
27201 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_vmmaskcmpv2df3
, "__builtin_ia32_cmpeqsd", IX86_BUILTIN_CMPEQSD
, EQ
, (int) V2DF_FTYPE_V2DF_V2DF
},
27202 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_vmmaskcmpv2df3
, "__builtin_ia32_cmpltsd", IX86_BUILTIN_CMPLTSD
, LT
, (int) V2DF_FTYPE_V2DF_V2DF
},
27203 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_vmmaskcmpv2df3
, "__builtin_ia32_cmplesd", IX86_BUILTIN_CMPLESD
, LE
, (int) V2DF_FTYPE_V2DF_V2DF
},
27204 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_vmmaskcmpv2df3
, "__builtin_ia32_cmpunordsd", IX86_BUILTIN_CMPUNORDSD
, UNORDERED
, (int) V2DF_FTYPE_V2DF_V2DF
},
27205 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_vmmaskcmpv2df3
, "__builtin_ia32_cmpneqsd", IX86_BUILTIN_CMPNEQSD
, NE
, (int) V2DF_FTYPE_V2DF_V2DF
},
27206 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_vmmaskcmpv2df3
, "__builtin_ia32_cmpnltsd", IX86_BUILTIN_CMPNLTSD
, UNGE
, (int) V2DF_FTYPE_V2DF_V2DF
},
27207 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_vmmaskcmpv2df3
, "__builtin_ia32_cmpnlesd", IX86_BUILTIN_CMPNLESD
, UNGT
, (int) V2DF_FTYPE_V2DF_V2DF
},
27208 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_vmmaskcmpv2df3
, "__builtin_ia32_cmpordsd", IX86_BUILTIN_CMPORDSD
, ORDERED
, (int) V2DF_FTYPE_V2DF_V2DF
},
27210 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sminv2df3
, "__builtin_ia32_minpd", IX86_BUILTIN_MINPD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF
},
27211 { OPTION_MASK_ISA_SSE2
, CODE_FOR_smaxv2df3
, "__builtin_ia32_maxpd", IX86_BUILTIN_MAXPD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF
},
27212 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_vmsminv2df3
, "__builtin_ia32_minsd", IX86_BUILTIN_MINSD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF
},
27213 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_vmsmaxv2df3
, "__builtin_ia32_maxsd", IX86_BUILTIN_MAXSD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF
},
27215 { OPTION_MASK_ISA_SSE2
, CODE_FOR_andv2df3
, "__builtin_ia32_andpd", IX86_BUILTIN_ANDPD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF
},
27216 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_andnotv2df3
, "__builtin_ia32_andnpd", IX86_BUILTIN_ANDNPD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF
},
27217 { OPTION_MASK_ISA_SSE2
, CODE_FOR_iorv2df3
, "__builtin_ia32_orpd", IX86_BUILTIN_ORPD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF
},
27218 { OPTION_MASK_ISA_SSE2
, CODE_FOR_xorv2df3
, "__builtin_ia32_xorpd", IX86_BUILTIN_XORPD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF
},
27220 { OPTION_MASK_ISA_SSE2
, CODE_FOR_copysignv2df3
, "__builtin_ia32_copysignpd", IX86_BUILTIN_CPYSGNPD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF
},
27222 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_movsd
, "__builtin_ia32_movsd", IX86_BUILTIN_MOVSD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF
},
27223 { OPTION_MASK_ISA_SSE2
, CODE_FOR_vec_interleave_highv2df
, "__builtin_ia32_unpckhpd", IX86_BUILTIN_UNPCKHPD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF
},
27224 { OPTION_MASK_ISA_SSE2
, CODE_FOR_vec_interleave_lowv2df
, "__builtin_ia32_unpcklpd", IX86_BUILTIN_UNPCKLPD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF
},
27226 { OPTION_MASK_ISA_SSE2
, CODE_FOR_vec_pack_sfix_v2df
, "__builtin_ia32_vec_pack_sfix", IX86_BUILTIN_VEC_PACK_SFIX
, UNKNOWN
, (int) V4SI_FTYPE_V2DF_V2DF
},
27228 { OPTION_MASK_ISA_SSE2
, CODE_FOR_addv16qi3
, "__builtin_ia32_paddb128", IX86_BUILTIN_PADDB128
, UNKNOWN
, (int) V16QI_FTYPE_V16QI_V16QI
},
27229 { OPTION_MASK_ISA_SSE2
, CODE_FOR_addv8hi3
, "__builtin_ia32_paddw128", IX86_BUILTIN_PADDW128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_V8HI
},
27230 { OPTION_MASK_ISA_SSE2
, CODE_FOR_addv4si3
, "__builtin_ia32_paddd128", IX86_BUILTIN_PADDD128
, UNKNOWN
, (int) V4SI_FTYPE_V4SI_V4SI
},
27231 { OPTION_MASK_ISA_SSE2
, CODE_FOR_addv2di3
, "__builtin_ia32_paddq128", IX86_BUILTIN_PADDQ128
, UNKNOWN
, (int) V2DI_FTYPE_V2DI_V2DI
},
27232 { OPTION_MASK_ISA_SSE2
, CODE_FOR_subv16qi3
, "__builtin_ia32_psubb128", IX86_BUILTIN_PSUBB128
, UNKNOWN
, (int) V16QI_FTYPE_V16QI_V16QI
},
27233 { OPTION_MASK_ISA_SSE2
, CODE_FOR_subv8hi3
, "__builtin_ia32_psubw128", IX86_BUILTIN_PSUBW128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_V8HI
},
27234 { OPTION_MASK_ISA_SSE2
, CODE_FOR_subv4si3
, "__builtin_ia32_psubd128", IX86_BUILTIN_PSUBD128
, UNKNOWN
, (int) V4SI_FTYPE_V4SI_V4SI
},
27235 { OPTION_MASK_ISA_SSE2
, CODE_FOR_subv2di3
, "__builtin_ia32_psubq128", IX86_BUILTIN_PSUBQ128
, UNKNOWN
, (int) V2DI_FTYPE_V2DI_V2DI
},
27237 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_ssaddv16qi3
, "__builtin_ia32_paddsb128", IX86_BUILTIN_PADDSB128
, UNKNOWN
, (int) V16QI_FTYPE_V16QI_V16QI
},
27238 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_ssaddv8hi3
, "__builtin_ia32_paddsw128", IX86_BUILTIN_PADDSW128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_V8HI
},
27239 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_sssubv16qi3
, "__builtin_ia32_psubsb128", IX86_BUILTIN_PSUBSB128
, UNKNOWN
, (int) V16QI_FTYPE_V16QI_V16QI
},
27240 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_sssubv8hi3
, "__builtin_ia32_psubsw128", IX86_BUILTIN_PSUBSW128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_V8HI
},
27241 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_usaddv16qi3
, "__builtin_ia32_paddusb128", IX86_BUILTIN_PADDUSB128
, UNKNOWN
, (int) V16QI_FTYPE_V16QI_V16QI
},
27242 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_usaddv8hi3
, "__builtin_ia32_paddusw128", IX86_BUILTIN_PADDUSW128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_V8HI
},
27243 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_ussubv16qi3
, "__builtin_ia32_psubusb128", IX86_BUILTIN_PSUBUSB128
, UNKNOWN
, (int) V16QI_FTYPE_V16QI_V16QI
},
27244 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_ussubv8hi3
, "__builtin_ia32_psubusw128", IX86_BUILTIN_PSUBUSW128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_V8HI
},
27246 { OPTION_MASK_ISA_SSE2
, CODE_FOR_mulv8hi3
, "__builtin_ia32_pmullw128", IX86_BUILTIN_PMULLW128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_V8HI
},
27247 { OPTION_MASK_ISA_SSE2
, CODE_FOR_smulv8hi3_highpart
, "__builtin_ia32_pmulhw128", IX86_BUILTIN_PMULHW128
, UNKNOWN
,(int) V8HI_FTYPE_V8HI_V8HI
},
27249 { OPTION_MASK_ISA_SSE2
, CODE_FOR_andv2di3
, "__builtin_ia32_pand128", IX86_BUILTIN_PAND128
, UNKNOWN
, (int) V2DI_FTYPE_V2DI_V2DI
},
27250 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_andnotv2di3
, "__builtin_ia32_pandn128", IX86_BUILTIN_PANDN128
, UNKNOWN
, (int) V2DI_FTYPE_V2DI_V2DI
},
27251 { OPTION_MASK_ISA_SSE2
, CODE_FOR_iorv2di3
, "__builtin_ia32_por128", IX86_BUILTIN_POR128
, UNKNOWN
, (int) V2DI_FTYPE_V2DI_V2DI
},
27252 { OPTION_MASK_ISA_SSE2
, CODE_FOR_xorv2di3
, "__builtin_ia32_pxor128", IX86_BUILTIN_PXOR128
, UNKNOWN
, (int) V2DI_FTYPE_V2DI_V2DI
},
27254 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_uavgv16qi3
, "__builtin_ia32_pavgb128", IX86_BUILTIN_PAVGB128
, UNKNOWN
, (int) V16QI_FTYPE_V16QI_V16QI
},
27255 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_uavgv8hi3
, "__builtin_ia32_pavgw128", IX86_BUILTIN_PAVGW128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_V8HI
},
27257 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_eqv16qi3
, "__builtin_ia32_pcmpeqb128", IX86_BUILTIN_PCMPEQB128
, UNKNOWN
, (int) V16QI_FTYPE_V16QI_V16QI
},
27258 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_eqv8hi3
, "__builtin_ia32_pcmpeqw128", IX86_BUILTIN_PCMPEQW128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_V8HI
},
27259 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_eqv4si3
, "__builtin_ia32_pcmpeqd128", IX86_BUILTIN_PCMPEQD128
, UNKNOWN
, (int) V4SI_FTYPE_V4SI_V4SI
},
27260 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_gtv16qi3
, "__builtin_ia32_pcmpgtb128", IX86_BUILTIN_PCMPGTB128
, UNKNOWN
, (int) V16QI_FTYPE_V16QI_V16QI
},
27261 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_gtv8hi3
, "__builtin_ia32_pcmpgtw128", IX86_BUILTIN_PCMPGTW128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_V8HI
},
27262 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_gtv4si3
, "__builtin_ia32_pcmpgtd128", IX86_BUILTIN_PCMPGTD128
, UNKNOWN
, (int) V4SI_FTYPE_V4SI_V4SI
},
27264 { OPTION_MASK_ISA_SSE2
, CODE_FOR_umaxv16qi3
, "__builtin_ia32_pmaxub128", IX86_BUILTIN_PMAXUB128
, UNKNOWN
, (int) V16QI_FTYPE_V16QI_V16QI
},
27265 { OPTION_MASK_ISA_SSE2
, CODE_FOR_smaxv8hi3
, "__builtin_ia32_pmaxsw128", IX86_BUILTIN_PMAXSW128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_V8HI
},
27266 { OPTION_MASK_ISA_SSE2
, CODE_FOR_uminv16qi3
, "__builtin_ia32_pminub128", IX86_BUILTIN_PMINUB128
, UNKNOWN
, (int) V16QI_FTYPE_V16QI_V16QI
},
27267 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sminv8hi3
, "__builtin_ia32_pminsw128", IX86_BUILTIN_PMINSW128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_V8HI
},
27269 { OPTION_MASK_ISA_SSE2
, CODE_FOR_vec_interleave_highv16qi
, "__builtin_ia32_punpckhbw128", IX86_BUILTIN_PUNPCKHBW128
, UNKNOWN
, (int) V16QI_FTYPE_V16QI_V16QI
},
27270 { OPTION_MASK_ISA_SSE2
, CODE_FOR_vec_interleave_highv8hi
, "__builtin_ia32_punpckhwd128", IX86_BUILTIN_PUNPCKHWD128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_V8HI
},
27271 { OPTION_MASK_ISA_SSE2
, CODE_FOR_vec_interleave_highv4si
, "__builtin_ia32_punpckhdq128", IX86_BUILTIN_PUNPCKHDQ128
, UNKNOWN
, (int) V4SI_FTYPE_V4SI_V4SI
},
27272 { OPTION_MASK_ISA_SSE2
, CODE_FOR_vec_interleave_highv2di
, "__builtin_ia32_punpckhqdq128", IX86_BUILTIN_PUNPCKHQDQ128
, UNKNOWN
, (int) V2DI_FTYPE_V2DI_V2DI
},
27273 { OPTION_MASK_ISA_SSE2
, CODE_FOR_vec_interleave_lowv16qi
, "__builtin_ia32_punpcklbw128", IX86_BUILTIN_PUNPCKLBW128
, UNKNOWN
, (int) V16QI_FTYPE_V16QI_V16QI
},
27274 { OPTION_MASK_ISA_SSE2
, CODE_FOR_vec_interleave_lowv8hi
, "__builtin_ia32_punpcklwd128", IX86_BUILTIN_PUNPCKLWD128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_V8HI
},
27275 { OPTION_MASK_ISA_SSE2
, CODE_FOR_vec_interleave_lowv4si
, "__builtin_ia32_punpckldq128", IX86_BUILTIN_PUNPCKLDQ128
, UNKNOWN
, (int) V4SI_FTYPE_V4SI_V4SI
},
27276 { OPTION_MASK_ISA_SSE2
, CODE_FOR_vec_interleave_lowv2di
, "__builtin_ia32_punpcklqdq128", IX86_BUILTIN_PUNPCKLQDQ128
, UNKNOWN
, (int) V2DI_FTYPE_V2DI_V2DI
},
27278 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_packsswb
, "__builtin_ia32_packsswb128", IX86_BUILTIN_PACKSSWB128
, UNKNOWN
, (int) V16QI_FTYPE_V8HI_V8HI
},
27279 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_packssdw
, "__builtin_ia32_packssdw128", IX86_BUILTIN_PACKSSDW128
, UNKNOWN
, (int) V8HI_FTYPE_V4SI_V4SI
},
27280 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_packuswb
, "__builtin_ia32_packuswb128", IX86_BUILTIN_PACKUSWB128
, UNKNOWN
, (int) V16QI_FTYPE_V8HI_V8HI
},
27282 { OPTION_MASK_ISA_SSE2
, CODE_FOR_umulv8hi3_highpart
, "__builtin_ia32_pmulhuw128", IX86_BUILTIN_PMULHUW128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_V8HI
},
27283 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_psadbw
, "__builtin_ia32_psadbw128", IX86_BUILTIN_PSADBW128
, UNKNOWN
, (int) V2DI_FTYPE_V16QI_V16QI
},
27285 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_umulv1siv1di3
, "__builtin_ia32_pmuludq", IX86_BUILTIN_PMULUDQ
, UNKNOWN
, (int) V1DI_FTYPE_V2SI_V2SI
},
27286 { OPTION_MASK_ISA_SSE2
, CODE_FOR_vec_widen_umult_even_v4si
, "__builtin_ia32_pmuludq128", IX86_BUILTIN_PMULUDQ128
, UNKNOWN
, (int) V2DI_FTYPE_V4SI_V4SI
},
27288 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_pmaddwd
, "__builtin_ia32_pmaddwd128", IX86_BUILTIN_PMADDWD128
, UNKNOWN
, (int) V4SI_FTYPE_V8HI_V8HI
},
27290 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_cvtsi2sd
, "__builtin_ia32_cvtsi2sd", IX86_BUILTIN_CVTSI2SD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_SI
},
27291 { OPTION_MASK_ISA_SSE2
| OPTION_MASK_ISA_64BIT
, CODE_FOR_sse2_cvtsi2sdq
, "__builtin_ia32_cvtsi642sd", IX86_BUILTIN_CVTSI642SD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_DI
},
27292 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_cvtsd2ss
, "__builtin_ia32_cvtsd2ss", IX86_BUILTIN_CVTSD2SS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V2DF
},
27293 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_cvtss2sd
, "__builtin_ia32_cvtss2sd", IX86_BUILTIN_CVTSS2SD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V4SF
},
27295 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_ashlv1ti3
, "__builtin_ia32_pslldqi128", IX86_BUILTIN_PSLLDQI128
, UNKNOWN
, (int) V2DI_FTYPE_V2DI_INT_CONVERT
},
27296 { OPTION_MASK_ISA_SSE2
, CODE_FOR_ashlv8hi3
, "__builtin_ia32_psllwi128", IX86_BUILTIN_PSLLWI128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_SI_COUNT
},
27297 { OPTION_MASK_ISA_SSE2
, CODE_FOR_ashlv4si3
, "__builtin_ia32_pslldi128", IX86_BUILTIN_PSLLDI128
, UNKNOWN
, (int) V4SI_FTYPE_V4SI_SI_COUNT
},
27298 { OPTION_MASK_ISA_SSE2
, CODE_FOR_ashlv2di3
, "__builtin_ia32_psllqi128", IX86_BUILTIN_PSLLQI128
, UNKNOWN
, (int) V2DI_FTYPE_V2DI_SI_COUNT
},
27299 { OPTION_MASK_ISA_SSE2
, CODE_FOR_ashlv8hi3
, "__builtin_ia32_psllw128", IX86_BUILTIN_PSLLW128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_V8HI_COUNT
},
27300 { OPTION_MASK_ISA_SSE2
, CODE_FOR_ashlv4si3
, "__builtin_ia32_pslld128", IX86_BUILTIN_PSLLD128
, UNKNOWN
, (int) V4SI_FTYPE_V4SI_V4SI_COUNT
},
27301 { OPTION_MASK_ISA_SSE2
, CODE_FOR_ashlv2di3
, "__builtin_ia32_psllq128", IX86_BUILTIN_PSLLQ128
, UNKNOWN
, (int) V2DI_FTYPE_V2DI_V2DI_COUNT
},
27303 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_lshrv1ti3
, "__builtin_ia32_psrldqi128", IX86_BUILTIN_PSRLDQI128
, UNKNOWN
, (int) V2DI_FTYPE_V2DI_INT_CONVERT
},
27304 { OPTION_MASK_ISA_SSE2
, CODE_FOR_lshrv8hi3
, "__builtin_ia32_psrlwi128", IX86_BUILTIN_PSRLWI128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_SI_COUNT
},
27305 { OPTION_MASK_ISA_SSE2
, CODE_FOR_lshrv4si3
, "__builtin_ia32_psrldi128", IX86_BUILTIN_PSRLDI128
, UNKNOWN
, (int) V4SI_FTYPE_V4SI_SI_COUNT
},
27306 { OPTION_MASK_ISA_SSE2
, CODE_FOR_lshrv2di3
, "__builtin_ia32_psrlqi128", IX86_BUILTIN_PSRLQI128
, UNKNOWN
, (int) V2DI_FTYPE_V2DI_SI_COUNT
},
27307 { OPTION_MASK_ISA_SSE2
, CODE_FOR_lshrv8hi3
, "__builtin_ia32_psrlw128", IX86_BUILTIN_PSRLW128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_V8HI_COUNT
},
27308 { OPTION_MASK_ISA_SSE2
, CODE_FOR_lshrv4si3
, "__builtin_ia32_psrld128", IX86_BUILTIN_PSRLD128
, UNKNOWN
, (int) V4SI_FTYPE_V4SI_V4SI_COUNT
},
27309 { OPTION_MASK_ISA_SSE2
, CODE_FOR_lshrv2di3
, "__builtin_ia32_psrlq128", IX86_BUILTIN_PSRLQ128
, UNKNOWN
, (int) V2DI_FTYPE_V2DI_V2DI_COUNT
},
27311 { OPTION_MASK_ISA_SSE2
, CODE_FOR_ashrv8hi3
, "__builtin_ia32_psrawi128", IX86_BUILTIN_PSRAWI128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_SI_COUNT
},
27312 { OPTION_MASK_ISA_SSE2
, CODE_FOR_ashrv4si3
, "__builtin_ia32_psradi128", IX86_BUILTIN_PSRADI128
, UNKNOWN
, (int) V4SI_FTYPE_V4SI_SI_COUNT
},
27313 { OPTION_MASK_ISA_SSE2
, CODE_FOR_ashrv8hi3
, "__builtin_ia32_psraw128", IX86_BUILTIN_PSRAW128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_V8HI_COUNT
},
27314 { OPTION_MASK_ISA_SSE2
, CODE_FOR_ashrv4si3
, "__builtin_ia32_psrad128", IX86_BUILTIN_PSRAD128
, UNKNOWN
, (int) V4SI_FTYPE_V4SI_V4SI_COUNT
},
27316 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_pshufd
, "__builtin_ia32_pshufd", IX86_BUILTIN_PSHUFD
, UNKNOWN
, (int) V4SI_FTYPE_V4SI_INT
},
27317 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_pshuflw
, "__builtin_ia32_pshuflw", IX86_BUILTIN_PSHUFLW
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_INT
},
27318 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_pshufhw
, "__builtin_ia32_pshufhw", IX86_BUILTIN_PSHUFHW
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_INT
},
27320 { OPTION_MASK_ISA_SSE2
, CODE_FOR_sse2_vmsqrtv2df2
, "__builtin_ia32_sqrtsd", IX86_BUILTIN_SQRTSD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_VEC_MERGE
},
27322 { OPTION_MASK_ISA_SSE
, CODE_FOR_sse2_movq128
, "__builtin_ia32_movq128", IX86_BUILTIN_MOVQ128
, UNKNOWN
, (int) V2DI_FTYPE_V2DI
},
27325 { OPTION_MASK_ISA_SSE2
, CODE_FOR_mmx_addv1di3
, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ
, UNKNOWN
, (int) V1DI_FTYPE_V1DI_V1DI
},
27326 { OPTION_MASK_ISA_SSE2
, CODE_FOR_mmx_subv1di3
, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ
, UNKNOWN
, (int) V1DI_FTYPE_V1DI_V1DI
},
27329 { OPTION_MASK_ISA_SSE3
, CODE_FOR_sse3_movshdup
, "__builtin_ia32_movshdup", IX86_BUILTIN_MOVSHDUP
, UNKNOWN
, (int) V4SF_FTYPE_V4SF
},
27330 { OPTION_MASK_ISA_SSE3
, CODE_FOR_sse3_movsldup
, "__builtin_ia32_movsldup", IX86_BUILTIN_MOVSLDUP
, UNKNOWN
, (int) V4SF_FTYPE_V4SF
},
27332 { OPTION_MASK_ISA_SSE3
, CODE_FOR_sse3_addsubv4sf3
, "__builtin_ia32_addsubps", IX86_BUILTIN_ADDSUBPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF
},
27333 { OPTION_MASK_ISA_SSE3
, CODE_FOR_sse3_addsubv2df3
, "__builtin_ia32_addsubpd", IX86_BUILTIN_ADDSUBPD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF
},
27334 { OPTION_MASK_ISA_SSE3
, CODE_FOR_sse3_haddv4sf3
, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF
},
27335 { OPTION_MASK_ISA_SSE3
, CODE_FOR_sse3_haddv2df3
, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF
},
27336 { OPTION_MASK_ISA_SSE3
, CODE_FOR_sse3_hsubv4sf3
, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF
},
27337 { OPTION_MASK_ISA_SSE3
, CODE_FOR_sse3_hsubv2df3
, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF
},
27340 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_absv16qi2
, "__builtin_ia32_pabsb128", IX86_BUILTIN_PABSB128
, UNKNOWN
, (int) V16QI_FTYPE_V16QI
},
27341 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_absv8qi2
, "__builtin_ia32_pabsb", IX86_BUILTIN_PABSB
, UNKNOWN
, (int) V8QI_FTYPE_V8QI
},
27342 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_absv8hi2
, "__builtin_ia32_pabsw128", IX86_BUILTIN_PABSW128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI
},
27343 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_absv4hi2
, "__builtin_ia32_pabsw", IX86_BUILTIN_PABSW
, UNKNOWN
, (int) V4HI_FTYPE_V4HI
},
27344 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_absv4si2
, "__builtin_ia32_pabsd128", IX86_BUILTIN_PABSD128
, UNKNOWN
, (int) V4SI_FTYPE_V4SI
},
27345 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_absv2si2
, "__builtin_ia32_pabsd", IX86_BUILTIN_PABSD
, UNKNOWN
, (int) V2SI_FTYPE_V2SI
},
27347 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_ssse3_phaddwv8hi3
, "__builtin_ia32_phaddw128", IX86_BUILTIN_PHADDW128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_V8HI
},
27348 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_ssse3_phaddwv4hi3
, "__builtin_ia32_phaddw", IX86_BUILTIN_PHADDW
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_V4HI
},
27349 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_ssse3_phadddv4si3
, "__builtin_ia32_phaddd128", IX86_BUILTIN_PHADDD128
, UNKNOWN
, (int) V4SI_FTYPE_V4SI_V4SI
},
27350 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_ssse3_phadddv2si3
, "__builtin_ia32_phaddd", IX86_BUILTIN_PHADDD
, UNKNOWN
, (int) V2SI_FTYPE_V2SI_V2SI
},
27351 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_ssse3_phaddswv8hi3
, "__builtin_ia32_phaddsw128", IX86_BUILTIN_PHADDSW128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_V8HI
},
27352 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_ssse3_phaddswv4hi3
, "__builtin_ia32_phaddsw", IX86_BUILTIN_PHADDSW
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_V4HI
},
27353 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_ssse3_phsubwv8hi3
, "__builtin_ia32_phsubw128", IX86_BUILTIN_PHSUBW128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_V8HI
},
27354 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_ssse3_phsubwv4hi3
, "__builtin_ia32_phsubw", IX86_BUILTIN_PHSUBW
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_V4HI
},
27355 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_ssse3_phsubdv4si3
, "__builtin_ia32_phsubd128", IX86_BUILTIN_PHSUBD128
, UNKNOWN
, (int) V4SI_FTYPE_V4SI_V4SI
},
27356 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_ssse3_phsubdv2si3
, "__builtin_ia32_phsubd", IX86_BUILTIN_PHSUBD
, UNKNOWN
, (int) V2SI_FTYPE_V2SI_V2SI
},
27357 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_ssse3_phsubswv8hi3
, "__builtin_ia32_phsubsw128", IX86_BUILTIN_PHSUBSW128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_V8HI
},
27358 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_ssse3_phsubswv4hi3
, "__builtin_ia32_phsubsw", IX86_BUILTIN_PHSUBSW
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_V4HI
},
27359 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_ssse3_pmaddubsw128
, "__builtin_ia32_pmaddubsw128", IX86_BUILTIN_PMADDUBSW128
, UNKNOWN
, (int) V8HI_FTYPE_V16QI_V16QI
},
27360 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_ssse3_pmaddubsw
, "__builtin_ia32_pmaddubsw", IX86_BUILTIN_PMADDUBSW
, UNKNOWN
, (int) V4HI_FTYPE_V8QI_V8QI
},
27361 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_ssse3_pmulhrswv8hi3
, "__builtin_ia32_pmulhrsw128", IX86_BUILTIN_PMULHRSW128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_V8HI
},
27362 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_ssse3_pmulhrswv4hi3
, "__builtin_ia32_pmulhrsw", IX86_BUILTIN_PMULHRSW
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_V4HI
},
27363 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_ssse3_pshufbv16qi3
, "__builtin_ia32_pshufb128", IX86_BUILTIN_PSHUFB128
, UNKNOWN
, (int) V16QI_FTYPE_V16QI_V16QI
},
27364 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_ssse3_pshufbv8qi3
, "__builtin_ia32_pshufb", IX86_BUILTIN_PSHUFB
, UNKNOWN
, (int) V8QI_FTYPE_V8QI_V8QI
},
27365 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_ssse3_psignv16qi3
, "__builtin_ia32_psignb128", IX86_BUILTIN_PSIGNB128
, UNKNOWN
, (int) V16QI_FTYPE_V16QI_V16QI
},
27366 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_ssse3_psignv8qi3
, "__builtin_ia32_psignb", IX86_BUILTIN_PSIGNB
, UNKNOWN
, (int) V8QI_FTYPE_V8QI_V8QI
},
27367 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_ssse3_psignv8hi3
, "__builtin_ia32_psignw128", IX86_BUILTIN_PSIGNW128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_V8HI
},
27368 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_ssse3_psignv4hi3
, "__builtin_ia32_psignw", IX86_BUILTIN_PSIGNW
, UNKNOWN
, (int) V4HI_FTYPE_V4HI_V4HI
},
27369 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_ssse3_psignv4si3
, "__builtin_ia32_psignd128", IX86_BUILTIN_PSIGND128
, UNKNOWN
, (int) V4SI_FTYPE_V4SI_V4SI
},
27370 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_ssse3_psignv2si3
, "__builtin_ia32_psignd", IX86_BUILTIN_PSIGND
, UNKNOWN
, (int) V2SI_FTYPE_V2SI_V2SI
},
27373 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_ssse3_palignrti
, "__builtin_ia32_palignr128", IX86_BUILTIN_PALIGNR128
, UNKNOWN
, (int) V2DI_FTYPE_V2DI_V2DI_INT_CONVERT
},
27374 { OPTION_MASK_ISA_SSSE3
, CODE_FOR_ssse3_palignrdi
, "__builtin_ia32_palignr", IX86_BUILTIN_PALIGNR
, UNKNOWN
, (int) V1DI_FTYPE_V1DI_V1DI_INT_CONVERT
},
27377 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_sse4_1_blendpd
, "__builtin_ia32_blendpd", IX86_BUILTIN_BLENDPD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF_INT
},
27378 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_sse4_1_blendps
, "__builtin_ia32_blendps", IX86_BUILTIN_BLENDPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF_INT
},
27379 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_sse4_1_blendvpd
, "__builtin_ia32_blendvpd", IX86_BUILTIN_BLENDVPD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF_V2DF
},
27380 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_sse4_1_blendvps
, "__builtin_ia32_blendvps", IX86_BUILTIN_BLENDVPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF_V4SF
},
27381 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_sse4_1_dppd
, "__builtin_ia32_dppd", IX86_BUILTIN_DPPD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF_INT
},
27382 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_sse4_1_dpps
, "__builtin_ia32_dpps", IX86_BUILTIN_DPPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF_INT
},
27383 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_sse4_1_insertps
, "__builtin_ia32_insertps128", IX86_BUILTIN_INSERTPS128
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF_INT
},
27384 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_sse4_1_mpsadbw
, "__builtin_ia32_mpsadbw128", IX86_BUILTIN_MPSADBW128
, UNKNOWN
, (int) V16QI_FTYPE_V16QI_V16QI_INT
},
27385 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_sse4_1_pblendvb
, "__builtin_ia32_pblendvb128", IX86_BUILTIN_PBLENDVB128
, UNKNOWN
, (int) V16QI_FTYPE_V16QI_V16QI_V16QI
},
27386 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_sse4_1_pblendw
, "__builtin_ia32_pblendw128", IX86_BUILTIN_PBLENDW128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_V8HI_INT
},
27388 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_sse4_1_sign_extendv8qiv8hi2
, "__builtin_ia32_pmovsxbw128", IX86_BUILTIN_PMOVSXBW128
, UNKNOWN
, (int) V8HI_FTYPE_V16QI
},
27389 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_sse4_1_sign_extendv4qiv4si2
, "__builtin_ia32_pmovsxbd128", IX86_BUILTIN_PMOVSXBD128
, UNKNOWN
, (int) V4SI_FTYPE_V16QI
},
27390 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_sse4_1_sign_extendv2qiv2di2
, "__builtin_ia32_pmovsxbq128", IX86_BUILTIN_PMOVSXBQ128
, UNKNOWN
, (int) V2DI_FTYPE_V16QI
},
27391 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_sse4_1_sign_extendv4hiv4si2
, "__builtin_ia32_pmovsxwd128", IX86_BUILTIN_PMOVSXWD128
, UNKNOWN
, (int) V4SI_FTYPE_V8HI
},
27392 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_sse4_1_sign_extendv2hiv2di2
, "__builtin_ia32_pmovsxwq128", IX86_BUILTIN_PMOVSXWQ128
, UNKNOWN
, (int) V2DI_FTYPE_V8HI
},
27393 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_sse4_1_sign_extendv2siv2di2
, "__builtin_ia32_pmovsxdq128", IX86_BUILTIN_PMOVSXDQ128
, UNKNOWN
, (int) V2DI_FTYPE_V4SI
},
27394 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_sse4_1_zero_extendv8qiv8hi2
, "__builtin_ia32_pmovzxbw128", IX86_BUILTIN_PMOVZXBW128
, UNKNOWN
, (int) V8HI_FTYPE_V16QI
},
27395 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_sse4_1_zero_extendv4qiv4si2
, "__builtin_ia32_pmovzxbd128", IX86_BUILTIN_PMOVZXBD128
, UNKNOWN
, (int) V4SI_FTYPE_V16QI
},
27396 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_sse4_1_zero_extendv2qiv2di2
, "__builtin_ia32_pmovzxbq128", IX86_BUILTIN_PMOVZXBQ128
, UNKNOWN
, (int) V2DI_FTYPE_V16QI
},
27397 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_sse4_1_zero_extendv4hiv4si2
, "__builtin_ia32_pmovzxwd128", IX86_BUILTIN_PMOVZXWD128
, UNKNOWN
, (int) V4SI_FTYPE_V8HI
},
27398 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_sse4_1_zero_extendv2hiv2di2
, "__builtin_ia32_pmovzxwq128", IX86_BUILTIN_PMOVZXWQ128
, UNKNOWN
, (int) V2DI_FTYPE_V8HI
},
27399 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_sse4_1_zero_extendv2siv2di2
, "__builtin_ia32_pmovzxdq128", IX86_BUILTIN_PMOVZXDQ128
, UNKNOWN
, (int) V2DI_FTYPE_V4SI
},
27400 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_sse4_1_phminposuw
, "__builtin_ia32_phminposuw128", IX86_BUILTIN_PHMINPOSUW128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI
},
27402 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_sse4_1_packusdw
, "__builtin_ia32_packusdw128", IX86_BUILTIN_PACKUSDW128
, UNKNOWN
, (int) V8HI_FTYPE_V4SI_V4SI
},
27403 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_sse4_1_eqv2di3
, "__builtin_ia32_pcmpeqq", IX86_BUILTIN_PCMPEQQ
, UNKNOWN
, (int) V2DI_FTYPE_V2DI_V2DI
},
27404 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_smaxv16qi3
, "__builtin_ia32_pmaxsb128", IX86_BUILTIN_PMAXSB128
, UNKNOWN
, (int) V16QI_FTYPE_V16QI_V16QI
},
27405 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_smaxv4si3
, "__builtin_ia32_pmaxsd128", IX86_BUILTIN_PMAXSD128
, UNKNOWN
, (int) V4SI_FTYPE_V4SI_V4SI
},
27406 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_umaxv4si3
, "__builtin_ia32_pmaxud128", IX86_BUILTIN_PMAXUD128
, UNKNOWN
, (int) V4SI_FTYPE_V4SI_V4SI
},
27407 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_umaxv8hi3
, "__builtin_ia32_pmaxuw128", IX86_BUILTIN_PMAXUW128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_V8HI
},
27408 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_sminv16qi3
, "__builtin_ia32_pminsb128", IX86_BUILTIN_PMINSB128
, UNKNOWN
, (int) V16QI_FTYPE_V16QI_V16QI
},
27409 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_sminv4si3
, "__builtin_ia32_pminsd128", IX86_BUILTIN_PMINSD128
, UNKNOWN
, (int) V4SI_FTYPE_V4SI_V4SI
},
27410 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_uminv4si3
, "__builtin_ia32_pminud128", IX86_BUILTIN_PMINUD128
, UNKNOWN
, (int) V4SI_FTYPE_V4SI_V4SI
},
27411 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_uminv8hi3
, "__builtin_ia32_pminuw128", IX86_BUILTIN_PMINUW128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI_V8HI
},
27412 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_sse4_1_mulv2siv2di3
, "__builtin_ia32_pmuldq128", IX86_BUILTIN_PMULDQ128
, UNKNOWN
, (int) V2DI_FTYPE_V4SI_V4SI
},
27413 { OPTION_MASK_ISA_SSE4_1
, CODE_FOR_mulv4si3
, "__builtin_ia32_pmulld128", IX86_BUILTIN_PMULLD128
, UNKNOWN
, (int) V4SI_FTYPE_V4SI_V4SI
},
27416 { OPTION_MASK_ISA_ROUND
, CODE_FOR_sse4_1_roundpd
, "__builtin_ia32_roundpd", IX86_BUILTIN_ROUNDPD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_INT
},
27417 { OPTION_MASK_ISA_ROUND
, CODE_FOR_sse4_1_roundps
, "__builtin_ia32_roundps", IX86_BUILTIN_ROUNDPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_INT
},
27418 { OPTION_MASK_ISA_ROUND
, CODE_FOR_sse4_1_roundsd
, "__builtin_ia32_roundsd", IX86_BUILTIN_ROUNDSD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF_INT
},
27419 { OPTION_MASK_ISA_ROUND
, CODE_FOR_sse4_1_roundss
, "__builtin_ia32_roundss", IX86_BUILTIN_ROUNDSS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF_INT
},
27421 { OPTION_MASK_ISA_ROUND
, CODE_FOR_sse4_1_roundpd
, "__builtin_ia32_floorpd", IX86_BUILTIN_FLOORPD
, (enum rtx_code
) ROUND_FLOOR
, (int) V2DF_FTYPE_V2DF_ROUND
},
27422 { OPTION_MASK_ISA_ROUND
, CODE_FOR_sse4_1_roundpd
, "__builtin_ia32_ceilpd", IX86_BUILTIN_CEILPD
, (enum rtx_code
) ROUND_CEIL
, (int) V2DF_FTYPE_V2DF_ROUND
},
27423 { OPTION_MASK_ISA_ROUND
, CODE_FOR_sse4_1_roundpd
, "__builtin_ia32_truncpd", IX86_BUILTIN_TRUNCPD
, (enum rtx_code
) ROUND_TRUNC
, (int) V2DF_FTYPE_V2DF_ROUND
},
27424 { OPTION_MASK_ISA_ROUND
, CODE_FOR_sse4_1_roundpd
, "__builtin_ia32_rintpd", IX86_BUILTIN_RINTPD
, (enum rtx_code
) ROUND_MXCSR
, (int) V2DF_FTYPE_V2DF_ROUND
},
27426 { OPTION_MASK_ISA_ROUND
, CODE_FOR_sse4_1_roundpd_vec_pack_sfix
, "__builtin_ia32_floorpd_vec_pack_sfix", IX86_BUILTIN_FLOORPD_VEC_PACK_SFIX
, (enum rtx_code
) ROUND_FLOOR
, (int) V4SI_FTYPE_V2DF_V2DF_ROUND
},
27427 { OPTION_MASK_ISA_ROUND
, CODE_FOR_sse4_1_roundpd_vec_pack_sfix
, "__builtin_ia32_ceilpd_vec_pack_sfix", IX86_BUILTIN_CEILPD_VEC_PACK_SFIX
, (enum rtx_code
) ROUND_CEIL
, (int) V4SI_FTYPE_V2DF_V2DF_ROUND
},
27429 { OPTION_MASK_ISA_ROUND
, CODE_FOR_roundv2df2
, "__builtin_ia32_roundpd_az", IX86_BUILTIN_ROUNDPD_AZ
, UNKNOWN
, (int) V2DF_FTYPE_V2DF
},
27430 { OPTION_MASK_ISA_ROUND
, CODE_FOR_roundv2df2_vec_pack_sfix
, "__builtin_ia32_roundpd_az_vec_pack_sfix", IX86_BUILTIN_ROUNDPD_AZ_VEC_PACK_SFIX
, UNKNOWN
, (int) V4SI_FTYPE_V2DF_V2DF
},
27432 { OPTION_MASK_ISA_ROUND
, CODE_FOR_sse4_1_roundps
, "__builtin_ia32_floorps", IX86_BUILTIN_FLOORPS
, (enum rtx_code
) ROUND_FLOOR
, (int) V4SF_FTYPE_V4SF_ROUND
},
27433 { OPTION_MASK_ISA_ROUND
, CODE_FOR_sse4_1_roundps
, "__builtin_ia32_ceilps", IX86_BUILTIN_CEILPS
, (enum rtx_code
) ROUND_CEIL
, (int) V4SF_FTYPE_V4SF_ROUND
},
27434 { OPTION_MASK_ISA_ROUND
, CODE_FOR_sse4_1_roundps
, "__builtin_ia32_truncps", IX86_BUILTIN_TRUNCPS
, (enum rtx_code
) ROUND_TRUNC
, (int) V4SF_FTYPE_V4SF_ROUND
},
27435 { OPTION_MASK_ISA_ROUND
, CODE_FOR_sse4_1_roundps
, "__builtin_ia32_rintps", IX86_BUILTIN_RINTPS
, (enum rtx_code
) ROUND_MXCSR
, (int) V4SF_FTYPE_V4SF_ROUND
},
27437 { OPTION_MASK_ISA_ROUND
, CODE_FOR_sse4_1_roundps_sfix
, "__builtin_ia32_floorps_sfix", IX86_BUILTIN_FLOORPS_SFIX
, (enum rtx_code
) ROUND_FLOOR
, (int) V4SI_FTYPE_V4SF_ROUND
},
27438 { OPTION_MASK_ISA_ROUND
, CODE_FOR_sse4_1_roundps_sfix
, "__builtin_ia32_ceilps_sfix", IX86_BUILTIN_CEILPS_SFIX
, (enum rtx_code
) ROUND_CEIL
, (int) V4SI_FTYPE_V4SF_ROUND
},
27440 { OPTION_MASK_ISA_ROUND
, CODE_FOR_roundv4sf2
, "__builtin_ia32_roundps_az", IX86_BUILTIN_ROUNDPS_AZ
, UNKNOWN
, (int) V4SF_FTYPE_V4SF
},
27441 { OPTION_MASK_ISA_ROUND
, CODE_FOR_roundv4sf2_sfix
, "__builtin_ia32_roundps_az_sfix", IX86_BUILTIN_ROUNDPS_AZ_SFIX
, UNKNOWN
, (int) V4SI_FTYPE_V4SF
},
27443 { OPTION_MASK_ISA_ROUND
, CODE_FOR_sse4_1_ptest
, "__builtin_ia32_ptestz128", IX86_BUILTIN_PTESTZ
, EQ
, (int) INT_FTYPE_V2DI_V2DI_PTEST
},
27444 { OPTION_MASK_ISA_ROUND
, CODE_FOR_sse4_1_ptest
, "__builtin_ia32_ptestc128", IX86_BUILTIN_PTESTC
, LTU
, (int) INT_FTYPE_V2DI_V2DI_PTEST
},
27445 { OPTION_MASK_ISA_ROUND
, CODE_FOR_sse4_1_ptest
, "__builtin_ia32_ptestnzc128", IX86_BUILTIN_PTESTNZC
, GTU
, (int) INT_FTYPE_V2DI_V2DI_PTEST
},
27448 { OPTION_MASK_ISA_SSE4_2
, CODE_FOR_sse4_2_gtv2di3
, "__builtin_ia32_pcmpgtq", IX86_BUILTIN_PCMPGTQ
, UNKNOWN
, (int) V2DI_FTYPE_V2DI_V2DI
},
27449 { OPTION_MASK_ISA_SSE4_2
| OPTION_MASK_ISA_CRC32
, CODE_FOR_sse4_2_crc32qi
, "__builtin_ia32_crc32qi", IX86_BUILTIN_CRC32QI
, UNKNOWN
, (int) UINT_FTYPE_UINT_UCHAR
},
27450 { OPTION_MASK_ISA_SSE4_2
| OPTION_MASK_ISA_CRC32
, CODE_FOR_sse4_2_crc32hi
, "__builtin_ia32_crc32hi", IX86_BUILTIN_CRC32HI
, UNKNOWN
, (int) UINT_FTYPE_UINT_USHORT
},
27451 { OPTION_MASK_ISA_SSE4_2
| OPTION_MASK_ISA_CRC32
, CODE_FOR_sse4_2_crc32si
, "__builtin_ia32_crc32si", IX86_BUILTIN_CRC32SI
, UNKNOWN
, (int) UINT_FTYPE_UINT_UINT
},
27452 { OPTION_MASK_ISA_SSE4_2
| OPTION_MASK_ISA_CRC32
| OPTION_MASK_ISA_64BIT
, CODE_FOR_sse4_2_crc32di
, "__builtin_ia32_crc32di", IX86_BUILTIN_CRC32DI
, UNKNOWN
, (int) UINT64_FTYPE_UINT64_UINT64
},
27455 { OPTION_MASK_ISA_SSE4A
, CODE_FOR_sse4a_extrqi
, "__builtin_ia32_extrqi", IX86_BUILTIN_EXTRQI
, UNKNOWN
, (int) V2DI_FTYPE_V2DI_UINT_UINT
},
27456 { OPTION_MASK_ISA_SSE4A
, CODE_FOR_sse4a_extrq
, "__builtin_ia32_extrq", IX86_BUILTIN_EXTRQ
, UNKNOWN
, (int) V2DI_FTYPE_V2DI_V16QI
},
27457 { OPTION_MASK_ISA_SSE4A
, CODE_FOR_sse4a_insertqi
, "__builtin_ia32_insertqi", IX86_BUILTIN_INSERTQI
, UNKNOWN
, (int) V2DI_FTYPE_V2DI_V2DI_UINT_UINT
},
27458 { OPTION_MASK_ISA_SSE4A
, CODE_FOR_sse4a_insertq
, "__builtin_ia32_insertq", IX86_BUILTIN_INSERTQ
, UNKNOWN
, (int) V2DI_FTYPE_V2DI_V2DI
},
27461 { OPTION_MASK_ISA_SSE2
, CODE_FOR_aeskeygenassist
, 0, IX86_BUILTIN_AESKEYGENASSIST128
, UNKNOWN
, (int) V2DI_FTYPE_V2DI_INT
},
27462 { OPTION_MASK_ISA_SSE2
, CODE_FOR_aesimc
, 0, IX86_BUILTIN_AESIMC128
, UNKNOWN
, (int) V2DI_FTYPE_V2DI
},
27464 { OPTION_MASK_ISA_SSE2
, CODE_FOR_aesenc
, 0, IX86_BUILTIN_AESENC128
, UNKNOWN
, (int) V2DI_FTYPE_V2DI_V2DI
},
27465 { OPTION_MASK_ISA_SSE2
, CODE_FOR_aesenclast
, 0, IX86_BUILTIN_AESENCLAST128
, UNKNOWN
, (int) V2DI_FTYPE_V2DI_V2DI
},
27466 { OPTION_MASK_ISA_SSE2
, CODE_FOR_aesdec
, 0, IX86_BUILTIN_AESDEC128
, UNKNOWN
, (int) V2DI_FTYPE_V2DI_V2DI
},
27467 { OPTION_MASK_ISA_SSE2
, CODE_FOR_aesdeclast
, 0, IX86_BUILTIN_AESDECLAST128
, UNKNOWN
, (int) V2DI_FTYPE_V2DI_V2DI
},
27470 { OPTION_MASK_ISA_SSE2
, CODE_FOR_pclmulqdq
, 0, IX86_BUILTIN_PCLMULQDQ128
, UNKNOWN
, (int) V2DI_FTYPE_V2DI_V2DI_INT
},
27473 { OPTION_MASK_ISA_AVX
, CODE_FOR_addv4df3
, "__builtin_ia32_addpd256", IX86_BUILTIN_ADDPD256
, UNKNOWN
, (int) V4DF_FTYPE_V4DF_V4DF
},
27474 { OPTION_MASK_ISA_AVX
, CODE_FOR_addv8sf3
, "__builtin_ia32_addps256", IX86_BUILTIN_ADDPS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF_V8SF
},
27475 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_addsubv4df3
, "__builtin_ia32_addsubpd256", IX86_BUILTIN_ADDSUBPD256
, UNKNOWN
, (int) V4DF_FTYPE_V4DF_V4DF
},
27476 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_addsubv8sf3
, "__builtin_ia32_addsubps256", IX86_BUILTIN_ADDSUBPS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF_V8SF
},
27477 { OPTION_MASK_ISA_AVX
, CODE_FOR_andv4df3
, "__builtin_ia32_andpd256", IX86_BUILTIN_ANDPD256
, UNKNOWN
, (int) V4DF_FTYPE_V4DF_V4DF
},
27478 { OPTION_MASK_ISA_AVX
, CODE_FOR_andv8sf3
, "__builtin_ia32_andps256", IX86_BUILTIN_ANDPS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF_V8SF
},
27479 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_andnotv4df3
, "__builtin_ia32_andnpd256", IX86_BUILTIN_ANDNPD256
, UNKNOWN
, (int) V4DF_FTYPE_V4DF_V4DF
},
27480 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_andnotv8sf3
, "__builtin_ia32_andnps256", IX86_BUILTIN_ANDNPS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF_V8SF
},
27481 { OPTION_MASK_ISA_AVX
, CODE_FOR_divv4df3
, "__builtin_ia32_divpd256", IX86_BUILTIN_DIVPD256
, UNKNOWN
, (int) V4DF_FTYPE_V4DF_V4DF
},
27482 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_divv8sf3
, "__builtin_ia32_divps256", IX86_BUILTIN_DIVPS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF_V8SF
},
27483 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_haddv4df3
, "__builtin_ia32_haddpd256", IX86_BUILTIN_HADDPD256
, UNKNOWN
, (int) V4DF_FTYPE_V4DF_V4DF
},
27484 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_hsubv8sf3
, "__builtin_ia32_hsubps256", IX86_BUILTIN_HSUBPS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF_V8SF
},
27485 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_hsubv4df3
, "__builtin_ia32_hsubpd256", IX86_BUILTIN_HSUBPD256
, UNKNOWN
, (int) V4DF_FTYPE_V4DF_V4DF
},
27486 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_haddv8sf3
, "__builtin_ia32_haddps256", IX86_BUILTIN_HADDPS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF_V8SF
},
27487 { OPTION_MASK_ISA_AVX
, CODE_FOR_smaxv4df3
, "__builtin_ia32_maxpd256", IX86_BUILTIN_MAXPD256
, UNKNOWN
, (int) V4DF_FTYPE_V4DF_V4DF
},
27488 { OPTION_MASK_ISA_AVX
, CODE_FOR_smaxv8sf3
, "__builtin_ia32_maxps256", IX86_BUILTIN_MAXPS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF_V8SF
},
27489 { OPTION_MASK_ISA_AVX
, CODE_FOR_sminv4df3
, "__builtin_ia32_minpd256", IX86_BUILTIN_MINPD256
, UNKNOWN
, (int) V4DF_FTYPE_V4DF_V4DF
},
27490 { OPTION_MASK_ISA_AVX
, CODE_FOR_sminv8sf3
, "__builtin_ia32_minps256", IX86_BUILTIN_MINPS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF_V8SF
},
27491 { OPTION_MASK_ISA_AVX
, CODE_FOR_mulv4df3
, "__builtin_ia32_mulpd256", IX86_BUILTIN_MULPD256
, UNKNOWN
, (int) V4DF_FTYPE_V4DF_V4DF
},
27492 { OPTION_MASK_ISA_AVX
, CODE_FOR_mulv8sf3
, "__builtin_ia32_mulps256", IX86_BUILTIN_MULPS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF_V8SF
},
27493 { OPTION_MASK_ISA_AVX
, CODE_FOR_iorv4df3
, "__builtin_ia32_orpd256", IX86_BUILTIN_ORPD256
, UNKNOWN
, (int) V4DF_FTYPE_V4DF_V4DF
},
27494 { OPTION_MASK_ISA_AVX
, CODE_FOR_iorv8sf3
, "__builtin_ia32_orps256", IX86_BUILTIN_ORPS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF_V8SF
},
27495 { OPTION_MASK_ISA_AVX
, CODE_FOR_subv4df3
, "__builtin_ia32_subpd256", IX86_BUILTIN_SUBPD256
, UNKNOWN
, (int) V4DF_FTYPE_V4DF_V4DF
},
27496 { OPTION_MASK_ISA_AVX
, CODE_FOR_subv8sf3
, "__builtin_ia32_subps256", IX86_BUILTIN_SUBPS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF_V8SF
},
27497 { OPTION_MASK_ISA_AVX
, CODE_FOR_xorv4df3
, "__builtin_ia32_xorpd256", IX86_BUILTIN_XORPD256
, UNKNOWN
, (int) V4DF_FTYPE_V4DF_V4DF
},
27498 { OPTION_MASK_ISA_AVX
, CODE_FOR_xorv8sf3
, "__builtin_ia32_xorps256", IX86_BUILTIN_XORPS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF_V8SF
},
27500 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vpermilvarv2df3
, "__builtin_ia32_vpermilvarpd", IX86_BUILTIN_VPERMILVARPD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DI
},
27501 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vpermilvarv4sf3
, "__builtin_ia32_vpermilvarps", IX86_BUILTIN_VPERMILVARPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SI
},
27502 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vpermilvarv4df3
, "__builtin_ia32_vpermilvarpd256", IX86_BUILTIN_VPERMILVARPD256
, UNKNOWN
, (int) V4DF_FTYPE_V4DF_V4DI
},
27503 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vpermilvarv8sf3
, "__builtin_ia32_vpermilvarps256", IX86_BUILTIN_VPERMILVARPS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF_V8SI
},
27505 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_blendpd256
, "__builtin_ia32_blendpd256", IX86_BUILTIN_BLENDPD256
, UNKNOWN
, (int) V4DF_FTYPE_V4DF_V4DF_INT
},
27506 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_blendps256
, "__builtin_ia32_blendps256", IX86_BUILTIN_BLENDPS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF_V8SF_INT
},
27507 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_blendvpd256
, "__builtin_ia32_blendvpd256", IX86_BUILTIN_BLENDVPD256
, UNKNOWN
, (int) V4DF_FTYPE_V4DF_V4DF_V4DF
},
27508 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_blendvps256
, "__builtin_ia32_blendvps256", IX86_BUILTIN_BLENDVPS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF_V8SF_V8SF
},
27509 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_dpps256
, "__builtin_ia32_dpps256", IX86_BUILTIN_DPPS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF_V8SF_INT
},
27510 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_shufpd256
, "__builtin_ia32_shufpd256", IX86_BUILTIN_SHUFPD256
, UNKNOWN
, (int) V4DF_FTYPE_V4DF_V4DF_INT
},
27511 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_shufps256
, "__builtin_ia32_shufps256", IX86_BUILTIN_SHUFPS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF_V8SF_INT
},
27512 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vmcmpv2df3
, "__builtin_ia32_cmpsd", IX86_BUILTIN_CMPSD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF_INT
},
27513 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vmcmpv4sf3
, "__builtin_ia32_cmpss", IX86_BUILTIN_CMPSS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF_INT
},
27514 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_cmpv2df3
, "__builtin_ia32_cmppd", IX86_BUILTIN_CMPPD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_V2DF_INT
},
27515 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_cmpv4sf3
, "__builtin_ia32_cmpps", IX86_BUILTIN_CMPPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_V4SF_INT
},
27516 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_cmpv4df3
, "__builtin_ia32_cmppd256", IX86_BUILTIN_CMPPD256
, UNKNOWN
, (int) V4DF_FTYPE_V4DF_V4DF_INT
},
27517 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_cmpv8sf3
, "__builtin_ia32_cmpps256", IX86_BUILTIN_CMPPS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF_V8SF_INT
},
27518 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vextractf128v4df
, "__builtin_ia32_vextractf128_pd256", IX86_BUILTIN_EXTRACTF128PD256
, UNKNOWN
, (int) V2DF_FTYPE_V4DF_INT
},
27519 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vextractf128v8sf
, "__builtin_ia32_vextractf128_ps256", IX86_BUILTIN_EXTRACTF128PS256
, UNKNOWN
, (int) V4SF_FTYPE_V8SF_INT
},
27520 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vextractf128v8si
, "__builtin_ia32_vextractf128_si256", IX86_BUILTIN_EXTRACTF128SI256
, UNKNOWN
, (int) V4SI_FTYPE_V8SI_INT
},
27521 { OPTION_MASK_ISA_AVX
, CODE_FOR_floatv4siv4df2
, "__builtin_ia32_cvtdq2pd256", IX86_BUILTIN_CVTDQ2PD256
, UNKNOWN
, (int) V4DF_FTYPE_V4SI
},
27522 { OPTION_MASK_ISA_AVX
, CODE_FOR_floatv8siv8sf2
, "__builtin_ia32_cvtdq2ps256", IX86_BUILTIN_CVTDQ2PS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SI
},
27523 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_cvtpd2ps256
, "__builtin_ia32_cvtpd2ps256", IX86_BUILTIN_CVTPD2PS256
, UNKNOWN
, (int) V4SF_FTYPE_V4DF
},
27524 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_cvtps2dq256
, "__builtin_ia32_cvtps2dq256", IX86_BUILTIN_CVTPS2DQ256
, UNKNOWN
, (int) V8SI_FTYPE_V8SF
},
27525 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_cvtps2pd256
, "__builtin_ia32_cvtps2pd256", IX86_BUILTIN_CVTPS2PD256
, UNKNOWN
, (int) V4DF_FTYPE_V4SF
},
27526 { OPTION_MASK_ISA_AVX
, CODE_FOR_fix_truncv4dfv4si2
, "__builtin_ia32_cvttpd2dq256", IX86_BUILTIN_CVTTPD2DQ256
, UNKNOWN
, (int) V4SI_FTYPE_V4DF
},
27527 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_cvtpd2dq256
, "__builtin_ia32_cvtpd2dq256", IX86_BUILTIN_CVTPD2DQ256
, UNKNOWN
, (int) V4SI_FTYPE_V4DF
},
27528 { OPTION_MASK_ISA_AVX
, CODE_FOR_fix_truncv8sfv8si2
, "__builtin_ia32_cvttps2dq256", IX86_BUILTIN_CVTTPS2DQ256
, UNKNOWN
, (int) V8SI_FTYPE_V8SF
},
27529 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vperm2f128v4df3
, "__builtin_ia32_vperm2f128_pd256", IX86_BUILTIN_VPERM2F128PD256
, UNKNOWN
, (int) V4DF_FTYPE_V4DF_V4DF_INT
},
27530 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vperm2f128v8sf3
, "__builtin_ia32_vperm2f128_ps256", IX86_BUILTIN_VPERM2F128PS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF_V8SF_INT
},
27531 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vperm2f128v8si3
, "__builtin_ia32_vperm2f128_si256", IX86_BUILTIN_VPERM2F128SI256
, UNKNOWN
, (int) V8SI_FTYPE_V8SI_V8SI_INT
},
27532 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vpermilv2df
, "__builtin_ia32_vpermilpd", IX86_BUILTIN_VPERMILPD
, UNKNOWN
, (int) V2DF_FTYPE_V2DF_INT
},
27533 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vpermilv4sf
, "__builtin_ia32_vpermilps", IX86_BUILTIN_VPERMILPS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF_INT
},
27534 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vpermilv4df
, "__builtin_ia32_vpermilpd256", IX86_BUILTIN_VPERMILPD256
, UNKNOWN
, (int) V4DF_FTYPE_V4DF_INT
},
27535 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vpermilv8sf
, "__builtin_ia32_vpermilps256", IX86_BUILTIN_VPERMILPS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF_INT
},
27536 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vinsertf128v4df
, "__builtin_ia32_vinsertf128_pd256", IX86_BUILTIN_VINSERTF128PD256
, UNKNOWN
, (int) V4DF_FTYPE_V4DF_V2DF_INT
},
27537 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vinsertf128v8sf
, "__builtin_ia32_vinsertf128_ps256", IX86_BUILTIN_VINSERTF128PS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF_V4SF_INT
},
27538 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vinsertf128v8si
, "__builtin_ia32_vinsertf128_si256", IX86_BUILTIN_VINSERTF128SI256
, UNKNOWN
, (int) V8SI_FTYPE_V8SI_V4SI_INT
},
27540 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_movshdup256
, "__builtin_ia32_movshdup256", IX86_BUILTIN_MOVSHDUP256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF
},
27541 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_movsldup256
, "__builtin_ia32_movsldup256", IX86_BUILTIN_MOVSLDUP256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF
},
27542 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_movddup256
, "__builtin_ia32_movddup256", IX86_BUILTIN_MOVDDUP256
, UNKNOWN
, (int) V4DF_FTYPE_V4DF
},
27544 { OPTION_MASK_ISA_AVX
, CODE_FOR_sqrtv4df2
, "__builtin_ia32_sqrtpd256", IX86_BUILTIN_SQRTPD256
, UNKNOWN
, (int) V4DF_FTYPE_V4DF
},
27545 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_sqrtv8sf2
, "__builtin_ia32_sqrtps256", IX86_BUILTIN_SQRTPS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF
},
27546 { OPTION_MASK_ISA_AVX
, CODE_FOR_sqrtv8sf2
, "__builtin_ia32_sqrtps_nr256", IX86_BUILTIN_SQRTPS_NR256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF
},
27547 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_rsqrtv8sf2
, "__builtin_ia32_rsqrtps256", IX86_BUILTIN_RSQRTPS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF
},
27548 { OPTION_MASK_ISA_AVX
, CODE_FOR_rsqrtv8sf2
, "__builtin_ia32_rsqrtps_nr256", IX86_BUILTIN_RSQRTPS_NR256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF
},
27550 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_rcpv8sf2
, "__builtin_ia32_rcpps256", IX86_BUILTIN_RCPPS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF
},
27552 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_roundpd256
, "__builtin_ia32_roundpd256", IX86_BUILTIN_ROUNDPD256
, UNKNOWN
, (int) V4DF_FTYPE_V4DF_INT
},
27553 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_roundps256
, "__builtin_ia32_roundps256", IX86_BUILTIN_ROUNDPS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF_INT
},
27555 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_roundpd256
, "__builtin_ia32_floorpd256", IX86_BUILTIN_FLOORPD256
, (enum rtx_code
) ROUND_FLOOR
, (int) V4DF_FTYPE_V4DF_ROUND
},
27556 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_roundpd256
, "__builtin_ia32_ceilpd256", IX86_BUILTIN_CEILPD256
, (enum rtx_code
) ROUND_CEIL
, (int) V4DF_FTYPE_V4DF_ROUND
},
27557 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_roundpd256
, "__builtin_ia32_truncpd256", IX86_BUILTIN_TRUNCPD256
, (enum rtx_code
) ROUND_TRUNC
, (int) V4DF_FTYPE_V4DF_ROUND
},
27558 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_roundpd256
, "__builtin_ia32_rintpd256", IX86_BUILTIN_RINTPD256
, (enum rtx_code
) ROUND_MXCSR
, (int) V4DF_FTYPE_V4DF_ROUND
},
27560 { OPTION_MASK_ISA_AVX
, CODE_FOR_roundv4df2
, "__builtin_ia32_roundpd_az256", IX86_BUILTIN_ROUNDPD_AZ256
, UNKNOWN
, (int) V4DF_FTYPE_V4DF
},
27561 { OPTION_MASK_ISA_AVX
, CODE_FOR_roundv4df2_vec_pack_sfix
, "__builtin_ia32_roundpd_az_vec_pack_sfix256", IX86_BUILTIN_ROUNDPD_AZ_VEC_PACK_SFIX256
, UNKNOWN
, (int) V8SI_FTYPE_V4DF_V4DF
},
27563 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_roundpd_vec_pack_sfix256
, "__builtin_ia32_floorpd_vec_pack_sfix256", IX86_BUILTIN_FLOORPD_VEC_PACK_SFIX256
, (enum rtx_code
) ROUND_FLOOR
, (int) V8SI_FTYPE_V4DF_V4DF_ROUND
},
27564 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_roundpd_vec_pack_sfix256
, "__builtin_ia32_ceilpd_vec_pack_sfix256", IX86_BUILTIN_CEILPD_VEC_PACK_SFIX256
, (enum rtx_code
) ROUND_CEIL
, (int) V8SI_FTYPE_V4DF_V4DF_ROUND
},
27566 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_roundps256
, "__builtin_ia32_floorps256", IX86_BUILTIN_FLOORPS256
, (enum rtx_code
) ROUND_FLOOR
, (int) V8SF_FTYPE_V8SF_ROUND
},
27567 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_roundps256
, "__builtin_ia32_ceilps256", IX86_BUILTIN_CEILPS256
, (enum rtx_code
) ROUND_CEIL
, (int) V8SF_FTYPE_V8SF_ROUND
},
27568 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_roundps256
, "__builtin_ia32_truncps256", IX86_BUILTIN_TRUNCPS256
, (enum rtx_code
) ROUND_TRUNC
, (int) V8SF_FTYPE_V8SF_ROUND
},
27569 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_roundps256
, "__builtin_ia32_rintps256", IX86_BUILTIN_RINTPS256
, (enum rtx_code
) ROUND_MXCSR
, (int) V8SF_FTYPE_V8SF_ROUND
},
27571 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_roundps_sfix256
, "__builtin_ia32_floorps_sfix256", IX86_BUILTIN_FLOORPS_SFIX256
, (enum rtx_code
) ROUND_FLOOR
, (int) V8SI_FTYPE_V8SF_ROUND
},
27572 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_roundps_sfix256
, "__builtin_ia32_ceilps_sfix256", IX86_BUILTIN_CEILPS_SFIX256
, (enum rtx_code
) ROUND_CEIL
, (int) V8SI_FTYPE_V8SF_ROUND
},
27574 { OPTION_MASK_ISA_AVX
, CODE_FOR_roundv8sf2
, "__builtin_ia32_roundps_az256", IX86_BUILTIN_ROUNDPS_AZ256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF
},
27575 { OPTION_MASK_ISA_AVX
, CODE_FOR_roundv8sf2_sfix
, "__builtin_ia32_roundps_az_sfix256", IX86_BUILTIN_ROUNDPS_AZ_SFIX256
, UNKNOWN
, (int) V8SI_FTYPE_V8SF
},
27577 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_unpckhpd256
, "__builtin_ia32_unpckhpd256", IX86_BUILTIN_UNPCKHPD256
, UNKNOWN
, (int) V4DF_FTYPE_V4DF_V4DF
},
27578 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_unpcklpd256
, "__builtin_ia32_unpcklpd256", IX86_BUILTIN_UNPCKLPD256
, UNKNOWN
, (int) V4DF_FTYPE_V4DF_V4DF
},
27579 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_unpckhps256
, "__builtin_ia32_unpckhps256", IX86_BUILTIN_UNPCKHPS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF_V8SF
},
27580 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_unpcklps256
, "__builtin_ia32_unpcklps256", IX86_BUILTIN_UNPCKLPS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF_V8SF
},
27582 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_si256_si
, "__builtin_ia32_si256_si", IX86_BUILTIN_SI256_SI
, UNKNOWN
, (int) V8SI_FTYPE_V4SI
},
27583 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_ps256_ps
, "__builtin_ia32_ps256_ps", IX86_BUILTIN_PS256_PS
, UNKNOWN
, (int) V8SF_FTYPE_V4SF
},
27584 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_pd256_pd
, "__builtin_ia32_pd256_pd", IX86_BUILTIN_PD256_PD
, UNKNOWN
, (int) V4DF_FTYPE_V2DF
},
27585 { OPTION_MASK_ISA_AVX
, CODE_FOR_vec_extract_lo_v8si
, "__builtin_ia32_si_si256", IX86_BUILTIN_SI_SI256
, UNKNOWN
, (int) V4SI_FTYPE_V8SI
},
27586 { OPTION_MASK_ISA_AVX
, CODE_FOR_vec_extract_lo_v8sf
, "__builtin_ia32_ps_ps256", IX86_BUILTIN_PS_PS256
, UNKNOWN
, (int) V4SF_FTYPE_V8SF
},
27587 { OPTION_MASK_ISA_AVX
, CODE_FOR_vec_extract_lo_v4df
, "__builtin_ia32_pd_pd256", IX86_BUILTIN_PD_PD256
, UNKNOWN
, (int) V2DF_FTYPE_V4DF
},
27589 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vtestpd
, "__builtin_ia32_vtestzpd", IX86_BUILTIN_VTESTZPD
, EQ
, (int) INT_FTYPE_V2DF_V2DF_PTEST
},
27590 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vtestpd
, "__builtin_ia32_vtestcpd", IX86_BUILTIN_VTESTCPD
, LTU
, (int) INT_FTYPE_V2DF_V2DF_PTEST
},
27591 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vtestpd
, "__builtin_ia32_vtestnzcpd", IX86_BUILTIN_VTESTNZCPD
, GTU
, (int) INT_FTYPE_V2DF_V2DF_PTEST
},
27592 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vtestps
, "__builtin_ia32_vtestzps", IX86_BUILTIN_VTESTZPS
, EQ
, (int) INT_FTYPE_V4SF_V4SF_PTEST
},
27593 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vtestps
, "__builtin_ia32_vtestcps", IX86_BUILTIN_VTESTCPS
, LTU
, (int) INT_FTYPE_V4SF_V4SF_PTEST
},
27594 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vtestps
, "__builtin_ia32_vtestnzcps", IX86_BUILTIN_VTESTNZCPS
, GTU
, (int) INT_FTYPE_V4SF_V4SF_PTEST
},
27595 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vtestpd256
, "__builtin_ia32_vtestzpd256", IX86_BUILTIN_VTESTZPD256
, EQ
, (int) INT_FTYPE_V4DF_V4DF_PTEST
},
27596 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vtestpd256
, "__builtin_ia32_vtestcpd256", IX86_BUILTIN_VTESTCPD256
, LTU
, (int) INT_FTYPE_V4DF_V4DF_PTEST
},
27597 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vtestpd256
, "__builtin_ia32_vtestnzcpd256", IX86_BUILTIN_VTESTNZCPD256
, GTU
, (int) INT_FTYPE_V4DF_V4DF_PTEST
},
27598 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vtestps256
, "__builtin_ia32_vtestzps256", IX86_BUILTIN_VTESTZPS256
, EQ
, (int) INT_FTYPE_V8SF_V8SF_PTEST
},
27599 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vtestps256
, "__builtin_ia32_vtestcps256", IX86_BUILTIN_VTESTCPS256
, LTU
, (int) INT_FTYPE_V8SF_V8SF_PTEST
},
27600 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_vtestps256
, "__builtin_ia32_vtestnzcps256", IX86_BUILTIN_VTESTNZCPS256
, GTU
, (int) INT_FTYPE_V8SF_V8SF_PTEST
},
27601 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_ptest256
, "__builtin_ia32_ptestz256", IX86_BUILTIN_PTESTZ256
, EQ
, (int) INT_FTYPE_V4DI_V4DI_PTEST
},
27602 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_ptest256
, "__builtin_ia32_ptestc256", IX86_BUILTIN_PTESTC256
, LTU
, (int) INT_FTYPE_V4DI_V4DI_PTEST
},
27603 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_ptest256
, "__builtin_ia32_ptestnzc256", IX86_BUILTIN_PTESTNZC256
, GTU
, (int) INT_FTYPE_V4DI_V4DI_PTEST
},
27605 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_movmskpd256
, "__builtin_ia32_movmskpd256", IX86_BUILTIN_MOVMSKPD256
, UNKNOWN
, (int) INT_FTYPE_V4DF
},
27606 { OPTION_MASK_ISA_AVX
, CODE_FOR_avx_movmskps256
, "__builtin_ia32_movmskps256", IX86_BUILTIN_MOVMSKPS256
, UNKNOWN
, (int) INT_FTYPE_V8SF
},
27608 { OPTION_MASK_ISA_AVX
, CODE_FOR_copysignv8sf3
, "__builtin_ia32_copysignps256", IX86_BUILTIN_CPYSGNPS256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF_V8SF
},
27609 { OPTION_MASK_ISA_AVX
, CODE_FOR_copysignv4df3
, "__builtin_ia32_copysignpd256", IX86_BUILTIN_CPYSGNPD256
, UNKNOWN
, (int) V4DF_FTYPE_V4DF_V4DF
},
27611 { OPTION_MASK_ISA_AVX
, CODE_FOR_vec_pack_sfix_v4df
, "__builtin_ia32_vec_pack_sfix256 ", IX86_BUILTIN_VEC_PACK_SFIX256
, UNKNOWN
, (int) V8SI_FTYPE_V4DF_V4DF
},
27614 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_mpsadbw
, "__builtin_ia32_mpsadbw256", IX86_BUILTIN_MPSADBW256
, UNKNOWN
, (int) V32QI_FTYPE_V32QI_V32QI_INT
},
27615 { OPTION_MASK_ISA_AVX2
, CODE_FOR_absv32qi2
, "__builtin_ia32_pabsb256", IX86_BUILTIN_PABSB256
, UNKNOWN
, (int) V32QI_FTYPE_V32QI
},
27616 { OPTION_MASK_ISA_AVX2
, CODE_FOR_absv16hi2
, "__builtin_ia32_pabsw256", IX86_BUILTIN_PABSW256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI
},
27617 { OPTION_MASK_ISA_AVX2
, CODE_FOR_absv8si2
, "__builtin_ia32_pabsd256", IX86_BUILTIN_PABSD256
, UNKNOWN
, (int) V8SI_FTYPE_V8SI
},
27618 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_packssdw
, "__builtin_ia32_packssdw256", IX86_BUILTIN_PACKSSDW256
, UNKNOWN
, (int) V16HI_FTYPE_V8SI_V8SI
},
27619 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_packsswb
, "__builtin_ia32_packsswb256", IX86_BUILTIN_PACKSSWB256
, UNKNOWN
, (int) V32QI_FTYPE_V16HI_V16HI
},
27620 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_packusdw
, "__builtin_ia32_packusdw256", IX86_BUILTIN_PACKUSDW256
, UNKNOWN
, (int) V16HI_FTYPE_V8SI_V8SI
},
27621 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_packuswb
, "__builtin_ia32_packuswb256", IX86_BUILTIN_PACKUSWB256
, UNKNOWN
, (int) V32QI_FTYPE_V16HI_V16HI
},
27622 { OPTION_MASK_ISA_AVX2
, CODE_FOR_addv32qi3
, "__builtin_ia32_paddb256", IX86_BUILTIN_PADDB256
, UNKNOWN
, (int) V32QI_FTYPE_V32QI_V32QI
},
27623 { OPTION_MASK_ISA_AVX2
, CODE_FOR_addv16hi3
, "__builtin_ia32_paddw256", IX86_BUILTIN_PADDW256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_V16HI
},
27624 { OPTION_MASK_ISA_AVX2
, CODE_FOR_addv8si3
, "__builtin_ia32_paddd256", IX86_BUILTIN_PADDD256
, UNKNOWN
, (int) V8SI_FTYPE_V8SI_V8SI
},
27625 { OPTION_MASK_ISA_AVX2
, CODE_FOR_addv4di3
, "__builtin_ia32_paddq256", IX86_BUILTIN_PADDQ256
, UNKNOWN
, (int) V4DI_FTYPE_V4DI_V4DI
},
27626 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_ssaddv32qi3
, "__builtin_ia32_paddsb256", IX86_BUILTIN_PADDSB256
, UNKNOWN
, (int) V32QI_FTYPE_V32QI_V32QI
},
27627 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_ssaddv16hi3
, "__builtin_ia32_paddsw256", IX86_BUILTIN_PADDSW256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_V16HI
},
27628 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_usaddv32qi3
, "__builtin_ia32_paddusb256", IX86_BUILTIN_PADDUSB256
, UNKNOWN
, (int) V32QI_FTYPE_V32QI_V32QI
},
27629 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_usaddv16hi3
, "__builtin_ia32_paddusw256", IX86_BUILTIN_PADDUSW256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_V16HI
},
27630 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_palignrv2ti
, "__builtin_ia32_palignr256", IX86_BUILTIN_PALIGNR256
, UNKNOWN
, (int) V4DI_FTYPE_V4DI_V4DI_INT_CONVERT
},
27631 { OPTION_MASK_ISA_AVX2
, CODE_FOR_andv4di3
, "__builtin_ia32_andsi256", IX86_BUILTIN_AND256I
, UNKNOWN
, (int) V4DI_FTYPE_V4DI_V4DI
},
27632 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_andnotv4di3
, "__builtin_ia32_andnotsi256", IX86_BUILTIN_ANDNOT256I
, UNKNOWN
, (int) V4DI_FTYPE_V4DI_V4DI
},
27633 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_uavgv32qi3
, "__builtin_ia32_pavgb256", IX86_BUILTIN_PAVGB256
, UNKNOWN
, (int) V32QI_FTYPE_V32QI_V32QI
},
27634 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_uavgv16hi3
, "__builtin_ia32_pavgw256", IX86_BUILTIN_PAVGW256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_V16HI
},
27635 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_pblendvb
, "__builtin_ia32_pblendvb256", IX86_BUILTIN_PBLENDVB256
, UNKNOWN
, (int) V32QI_FTYPE_V32QI_V32QI_V32QI
},
27636 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_pblendw
, "__builtin_ia32_pblendw256", IX86_BUILTIN_PBLENDVW256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_V16HI_INT
},
27637 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_eqv32qi3
, "__builtin_ia32_pcmpeqb256", IX86_BUILTIN_PCMPEQB256
, UNKNOWN
, (int) V32QI_FTYPE_V32QI_V32QI
},
27638 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_eqv16hi3
, "__builtin_ia32_pcmpeqw256", IX86_BUILTIN_PCMPEQW256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_V16HI
},
27639 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_eqv8si3
, "__builtin_ia32_pcmpeqd256", IX86_BUILTIN_PCMPEQD256
, UNKNOWN
, (int) V8SI_FTYPE_V8SI_V8SI
},
27640 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_eqv4di3
, "__builtin_ia32_pcmpeqq256", IX86_BUILTIN_PCMPEQQ256
, UNKNOWN
, (int) V4DI_FTYPE_V4DI_V4DI
},
27641 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_gtv32qi3
, "__builtin_ia32_pcmpgtb256", IX86_BUILTIN_PCMPGTB256
, UNKNOWN
, (int) V32QI_FTYPE_V32QI_V32QI
},
27642 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_gtv16hi3
, "__builtin_ia32_pcmpgtw256", IX86_BUILTIN_PCMPGTW256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_V16HI
},
27643 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_gtv8si3
, "__builtin_ia32_pcmpgtd256", IX86_BUILTIN_PCMPGTD256
, UNKNOWN
, (int) V8SI_FTYPE_V8SI_V8SI
},
27644 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_gtv4di3
, "__builtin_ia32_pcmpgtq256", IX86_BUILTIN_PCMPGTQ256
, UNKNOWN
, (int) V4DI_FTYPE_V4DI_V4DI
},
27645 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_phaddwv16hi3
, "__builtin_ia32_phaddw256", IX86_BUILTIN_PHADDW256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_V16HI
},
27646 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_phadddv8si3
, "__builtin_ia32_phaddd256", IX86_BUILTIN_PHADDD256
, UNKNOWN
, (int) V8SI_FTYPE_V8SI_V8SI
},
27647 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_phaddswv16hi3
, "__builtin_ia32_phaddsw256", IX86_BUILTIN_PHADDSW256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_V16HI
},
27648 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_phsubwv16hi3
, "__builtin_ia32_phsubw256", IX86_BUILTIN_PHSUBW256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_V16HI
},
27649 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_phsubdv8si3
, "__builtin_ia32_phsubd256", IX86_BUILTIN_PHSUBD256
, UNKNOWN
, (int) V8SI_FTYPE_V8SI_V8SI
},
27650 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_phsubswv16hi3
, "__builtin_ia32_phsubsw256", IX86_BUILTIN_PHSUBSW256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_V16HI
},
27651 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_pmaddubsw256
, "__builtin_ia32_pmaddubsw256", IX86_BUILTIN_PMADDUBSW256
, UNKNOWN
, (int) V16HI_FTYPE_V32QI_V32QI
},
27652 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_pmaddwd
, "__builtin_ia32_pmaddwd256", IX86_BUILTIN_PMADDWD256
, UNKNOWN
, (int) V8SI_FTYPE_V16HI_V16HI
},
27653 { OPTION_MASK_ISA_AVX2
, CODE_FOR_smaxv32qi3
, "__builtin_ia32_pmaxsb256", IX86_BUILTIN_PMAXSB256
, UNKNOWN
, (int) V32QI_FTYPE_V32QI_V32QI
},
27654 { OPTION_MASK_ISA_AVX2
, CODE_FOR_smaxv16hi3
, "__builtin_ia32_pmaxsw256", IX86_BUILTIN_PMAXSW256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_V16HI
},
27655 { OPTION_MASK_ISA_AVX2
, CODE_FOR_smaxv8si3
, "__builtin_ia32_pmaxsd256", IX86_BUILTIN_PMAXSD256
, UNKNOWN
, (int) V8SI_FTYPE_V8SI_V8SI
},
27656 { OPTION_MASK_ISA_AVX2
, CODE_FOR_umaxv32qi3
, "__builtin_ia32_pmaxub256", IX86_BUILTIN_PMAXUB256
, UNKNOWN
, (int) V32QI_FTYPE_V32QI_V32QI
},
27657 { OPTION_MASK_ISA_AVX2
, CODE_FOR_umaxv16hi3
, "__builtin_ia32_pmaxuw256", IX86_BUILTIN_PMAXUW256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_V16HI
},
27658 { OPTION_MASK_ISA_AVX2
, CODE_FOR_umaxv8si3
, "__builtin_ia32_pmaxud256", IX86_BUILTIN_PMAXUD256
, UNKNOWN
, (int) V8SI_FTYPE_V8SI_V8SI
},
27659 { OPTION_MASK_ISA_AVX2
, CODE_FOR_sminv32qi3
, "__builtin_ia32_pminsb256", IX86_BUILTIN_PMINSB256
, UNKNOWN
, (int) V32QI_FTYPE_V32QI_V32QI
},
27660 { OPTION_MASK_ISA_AVX2
, CODE_FOR_sminv16hi3
, "__builtin_ia32_pminsw256", IX86_BUILTIN_PMINSW256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_V16HI
},
27661 { OPTION_MASK_ISA_AVX2
, CODE_FOR_sminv8si3
, "__builtin_ia32_pminsd256", IX86_BUILTIN_PMINSD256
, UNKNOWN
, (int) V8SI_FTYPE_V8SI_V8SI
},
27662 { OPTION_MASK_ISA_AVX2
, CODE_FOR_uminv32qi3
, "__builtin_ia32_pminub256", IX86_BUILTIN_PMINUB256
, UNKNOWN
, (int) V32QI_FTYPE_V32QI_V32QI
},
27663 { OPTION_MASK_ISA_AVX2
, CODE_FOR_uminv16hi3
, "__builtin_ia32_pminuw256", IX86_BUILTIN_PMINUW256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_V16HI
},
27664 { OPTION_MASK_ISA_AVX2
, CODE_FOR_uminv8si3
, "__builtin_ia32_pminud256", IX86_BUILTIN_PMINUD256
, UNKNOWN
, (int) V8SI_FTYPE_V8SI_V8SI
},
27665 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_pmovmskb
, "__builtin_ia32_pmovmskb256", IX86_BUILTIN_PMOVMSKB256
, UNKNOWN
, (int) INT_FTYPE_V32QI
},
27666 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_sign_extendv16qiv16hi2
, "__builtin_ia32_pmovsxbw256", IX86_BUILTIN_PMOVSXBW256
, UNKNOWN
, (int) V16HI_FTYPE_V16QI
},
27667 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_sign_extendv8qiv8si2
, "__builtin_ia32_pmovsxbd256", IX86_BUILTIN_PMOVSXBD256
, UNKNOWN
, (int) V8SI_FTYPE_V16QI
},
27668 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_sign_extendv4qiv4di2
, "__builtin_ia32_pmovsxbq256", IX86_BUILTIN_PMOVSXBQ256
, UNKNOWN
, (int) V4DI_FTYPE_V16QI
},
27669 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_sign_extendv8hiv8si2
, "__builtin_ia32_pmovsxwd256", IX86_BUILTIN_PMOVSXWD256
, UNKNOWN
, (int) V8SI_FTYPE_V8HI
},
27670 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_sign_extendv4hiv4di2
, "__builtin_ia32_pmovsxwq256", IX86_BUILTIN_PMOVSXWQ256
, UNKNOWN
, (int) V4DI_FTYPE_V8HI
},
27671 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_sign_extendv4siv4di2
, "__builtin_ia32_pmovsxdq256", IX86_BUILTIN_PMOVSXDQ256
, UNKNOWN
, (int) V4DI_FTYPE_V4SI
},
27672 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_zero_extendv16qiv16hi2
, "__builtin_ia32_pmovzxbw256", IX86_BUILTIN_PMOVZXBW256
, UNKNOWN
, (int) V16HI_FTYPE_V16QI
},
27673 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_zero_extendv8qiv8si2
, "__builtin_ia32_pmovzxbd256", IX86_BUILTIN_PMOVZXBD256
, UNKNOWN
, (int) V8SI_FTYPE_V16QI
},
27674 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_zero_extendv4qiv4di2
, "__builtin_ia32_pmovzxbq256", IX86_BUILTIN_PMOVZXBQ256
, UNKNOWN
, (int) V4DI_FTYPE_V16QI
},
27675 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_zero_extendv8hiv8si2
, "__builtin_ia32_pmovzxwd256", IX86_BUILTIN_PMOVZXWD256
, UNKNOWN
, (int) V8SI_FTYPE_V8HI
},
27676 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_zero_extendv4hiv4di2
, "__builtin_ia32_pmovzxwq256", IX86_BUILTIN_PMOVZXWQ256
, UNKNOWN
, (int) V4DI_FTYPE_V8HI
},
27677 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_zero_extendv4siv4di2
, "__builtin_ia32_pmovzxdq256", IX86_BUILTIN_PMOVZXDQ256
, UNKNOWN
, (int) V4DI_FTYPE_V4SI
},
27678 { OPTION_MASK_ISA_AVX2
, CODE_FOR_vec_widen_smult_even_v8si
, "__builtin_ia32_pmuldq256", IX86_BUILTIN_PMULDQ256
, UNKNOWN
, (int) V4DI_FTYPE_V8SI_V8SI
},
27679 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_pmulhrswv16hi3
, "__builtin_ia32_pmulhrsw256", IX86_BUILTIN_PMULHRSW256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_V16HI
},
27680 { OPTION_MASK_ISA_AVX2
, CODE_FOR_umulv16hi3_highpart
, "__builtin_ia32_pmulhuw256" , IX86_BUILTIN_PMULHUW256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_V16HI
},
27681 { OPTION_MASK_ISA_AVX2
, CODE_FOR_smulv16hi3_highpart
, "__builtin_ia32_pmulhw256" , IX86_BUILTIN_PMULHW256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_V16HI
},
27682 { OPTION_MASK_ISA_AVX2
, CODE_FOR_mulv16hi3
, "__builtin_ia32_pmullw256" , IX86_BUILTIN_PMULLW256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_V16HI
},
27683 { OPTION_MASK_ISA_AVX2
, CODE_FOR_mulv8si3
, "__builtin_ia32_pmulld256" , IX86_BUILTIN_PMULLD256
, UNKNOWN
, (int) V8SI_FTYPE_V8SI_V8SI
},
27684 { OPTION_MASK_ISA_AVX2
, CODE_FOR_vec_widen_umult_even_v8si
, "__builtin_ia32_pmuludq256", IX86_BUILTIN_PMULUDQ256
, UNKNOWN
, (int) V4DI_FTYPE_V8SI_V8SI
},
27685 { OPTION_MASK_ISA_AVX2
, CODE_FOR_iorv4di3
, "__builtin_ia32_por256", IX86_BUILTIN_POR256
, UNKNOWN
, (int) V4DI_FTYPE_V4DI_V4DI
},
27686 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_psadbw
, "__builtin_ia32_psadbw256", IX86_BUILTIN_PSADBW256
, UNKNOWN
, (int) V16HI_FTYPE_V32QI_V32QI
},
27687 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_pshufbv32qi3
, "__builtin_ia32_pshufb256", IX86_BUILTIN_PSHUFB256
, UNKNOWN
, (int) V32QI_FTYPE_V32QI_V32QI
},
27688 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_pshufdv3
, "__builtin_ia32_pshufd256", IX86_BUILTIN_PSHUFD256
, UNKNOWN
, (int) V8SI_FTYPE_V8SI_INT
},
27689 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_pshufhwv3
, "__builtin_ia32_pshufhw256", IX86_BUILTIN_PSHUFHW256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_INT
},
27690 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_pshuflwv3
, "__builtin_ia32_pshuflw256", IX86_BUILTIN_PSHUFLW256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_INT
},
27691 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_psignv32qi3
, "__builtin_ia32_psignb256", IX86_BUILTIN_PSIGNB256
, UNKNOWN
, (int) V32QI_FTYPE_V32QI_V32QI
},
27692 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_psignv16hi3
, "__builtin_ia32_psignw256", IX86_BUILTIN_PSIGNW256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_V16HI
},
27693 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_psignv8si3
, "__builtin_ia32_psignd256", IX86_BUILTIN_PSIGND256
, UNKNOWN
, (int) V8SI_FTYPE_V8SI_V8SI
},
27694 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_ashlv2ti3
, "__builtin_ia32_pslldqi256", IX86_BUILTIN_PSLLDQI256
, UNKNOWN
, (int) V4DI_FTYPE_V4DI_INT_CONVERT
},
27695 { OPTION_MASK_ISA_AVX2
, CODE_FOR_ashlv16hi3
, "__builtin_ia32_psllwi256", IX86_BUILTIN_PSLLWI256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_SI_COUNT
},
27696 { OPTION_MASK_ISA_AVX2
, CODE_FOR_ashlv16hi3
, "__builtin_ia32_psllw256", IX86_BUILTIN_PSLLW256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_V8HI_COUNT
},
27697 { OPTION_MASK_ISA_AVX2
, CODE_FOR_ashlv8si3
, "__builtin_ia32_pslldi256", IX86_BUILTIN_PSLLDI256
, UNKNOWN
, (int) V8SI_FTYPE_V8SI_SI_COUNT
},
27698 { OPTION_MASK_ISA_AVX2
, CODE_FOR_ashlv8si3
, "__builtin_ia32_pslld256", IX86_BUILTIN_PSLLD256
, UNKNOWN
, (int) V8SI_FTYPE_V8SI_V4SI_COUNT
},
27699 { OPTION_MASK_ISA_AVX2
, CODE_FOR_ashlv4di3
, "__builtin_ia32_psllqi256", IX86_BUILTIN_PSLLQI256
, UNKNOWN
, (int) V4DI_FTYPE_V4DI_INT_COUNT
},
27700 { OPTION_MASK_ISA_AVX2
, CODE_FOR_ashlv4di3
, "__builtin_ia32_psllq256", IX86_BUILTIN_PSLLQ256
, UNKNOWN
, (int) V4DI_FTYPE_V4DI_V2DI_COUNT
},
27701 { OPTION_MASK_ISA_AVX2
, CODE_FOR_ashrv16hi3
, "__builtin_ia32_psrawi256", IX86_BUILTIN_PSRAWI256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_SI_COUNT
},
27702 { OPTION_MASK_ISA_AVX2
, CODE_FOR_ashrv16hi3
, "__builtin_ia32_psraw256", IX86_BUILTIN_PSRAW256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_V8HI_COUNT
},
27703 { OPTION_MASK_ISA_AVX2
, CODE_FOR_ashrv8si3
, "__builtin_ia32_psradi256", IX86_BUILTIN_PSRADI256
, UNKNOWN
, (int) V8SI_FTYPE_V8SI_SI_COUNT
},
27704 { OPTION_MASK_ISA_AVX2
, CODE_FOR_ashrv8si3
, "__builtin_ia32_psrad256", IX86_BUILTIN_PSRAD256
, UNKNOWN
, (int) V8SI_FTYPE_V8SI_V4SI_COUNT
},
27705 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_lshrv2ti3
, "__builtin_ia32_psrldqi256", IX86_BUILTIN_PSRLDQI256
, UNKNOWN
, (int) V4DI_FTYPE_V4DI_INT_CONVERT
},
27706 { OPTION_MASK_ISA_AVX2
, CODE_FOR_lshrv16hi3
, "__builtin_ia32_psrlwi256", IX86_BUILTIN_PSRLWI256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_SI_COUNT
},
27707 { OPTION_MASK_ISA_AVX2
, CODE_FOR_lshrv16hi3
, "__builtin_ia32_psrlw256", IX86_BUILTIN_PSRLW256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_V8HI_COUNT
},
27708 { OPTION_MASK_ISA_AVX2
, CODE_FOR_lshrv8si3
, "__builtin_ia32_psrldi256", IX86_BUILTIN_PSRLDI256
, UNKNOWN
, (int) V8SI_FTYPE_V8SI_SI_COUNT
},
27709 { OPTION_MASK_ISA_AVX2
, CODE_FOR_lshrv8si3
, "__builtin_ia32_psrld256", IX86_BUILTIN_PSRLD256
, UNKNOWN
, (int) V8SI_FTYPE_V8SI_V4SI_COUNT
},
27710 { OPTION_MASK_ISA_AVX2
, CODE_FOR_lshrv4di3
, "__builtin_ia32_psrlqi256", IX86_BUILTIN_PSRLQI256
, UNKNOWN
, (int) V4DI_FTYPE_V4DI_INT_COUNT
},
27711 { OPTION_MASK_ISA_AVX2
, CODE_FOR_lshrv4di3
, "__builtin_ia32_psrlq256", IX86_BUILTIN_PSRLQ256
, UNKNOWN
, (int) V4DI_FTYPE_V4DI_V2DI_COUNT
},
27712 { OPTION_MASK_ISA_AVX2
, CODE_FOR_subv32qi3
, "__builtin_ia32_psubb256", IX86_BUILTIN_PSUBB256
, UNKNOWN
, (int) V32QI_FTYPE_V32QI_V32QI
},
27713 { OPTION_MASK_ISA_AVX2
, CODE_FOR_subv16hi3
, "__builtin_ia32_psubw256", IX86_BUILTIN_PSUBW256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_V16HI
},
27714 { OPTION_MASK_ISA_AVX2
, CODE_FOR_subv8si3
, "__builtin_ia32_psubd256", IX86_BUILTIN_PSUBD256
, UNKNOWN
, (int) V8SI_FTYPE_V8SI_V8SI
},
27715 { OPTION_MASK_ISA_AVX2
, CODE_FOR_subv4di3
, "__builtin_ia32_psubq256", IX86_BUILTIN_PSUBQ256
, UNKNOWN
, (int) V4DI_FTYPE_V4DI_V4DI
},
27716 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_sssubv32qi3
, "__builtin_ia32_psubsb256", IX86_BUILTIN_PSUBSB256
, UNKNOWN
, (int) V32QI_FTYPE_V32QI_V32QI
},
27717 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_sssubv16hi3
, "__builtin_ia32_psubsw256", IX86_BUILTIN_PSUBSW256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_V16HI
},
27718 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_ussubv32qi3
, "__builtin_ia32_psubusb256", IX86_BUILTIN_PSUBUSB256
, UNKNOWN
, (int) V32QI_FTYPE_V32QI_V32QI
},
27719 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_ussubv16hi3
, "__builtin_ia32_psubusw256", IX86_BUILTIN_PSUBUSW256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_V16HI
},
27720 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_interleave_highv32qi
, "__builtin_ia32_punpckhbw256", IX86_BUILTIN_PUNPCKHBW256
, UNKNOWN
, (int) V32QI_FTYPE_V32QI_V32QI
},
27721 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_interleave_highv16hi
, "__builtin_ia32_punpckhwd256", IX86_BUILTIN_PUNPCKHWD256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_V16HI
},
27722 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_interleave_highv8si
, "__builtin_ia32_punpckhdq256", IX86_BUILTIN_PUNPCKHDQ256
, UNKNOWN
, (int) V8SI_FTYPE_V8SI_V8SI
},
27723 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_interleave_highv4di
, "__builtin_ia32_punpckhqdq256", IX86_BUILTIN_PUNPCKHQDQ256
, UNKNOWN
, (int) V4DI_FTYPE_V4DI_V4DI
},
27724 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_interleave_lowv32qi
, "__builtin_ia32_punpcklbw256", IX86_BUILTIN_PUNPCKLBW256
, UNKNOWN
, (int) V32QI_FTYPE_V32QI_V32QI
},
27725 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_interleave_lowv16hi
, "__builtin_ia32_punpcklwd256", IX86_BUILTIN_PUNPCKLWD256
, UNKNOWN
, (int) V16HI_FTYPE_V16HI_V16HI
},
27726 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_interleave_lowv8si
, "__builtin_ia32_punpckldq256", IX86_BUILTIN_PUNPCKLDQ256
, UNKNOWN
, (int) V8SI_FTYPE_V8SI_V8SI
},
27727 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_interleave_lowv4di
, "__builtin_ia32_punpcklqdq256", IX86_BUILTIN_PUNPCKLQDQ256
, UNKNOWN
, (int) V4DI_FTYPE_V4DI_V4DI
},
27728 { OPTION_MASK_ISA_AVX2
, CODE_FOR_xorv4di3
, "__builtin_ia32_pxor256", IX86_BUILTIN_PXOR256
, UNKNOWN
, (int) V4DI_FTYPE_V4DI_V4DI
},
27729 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_vec_dupv4sf
, "__builtin_ia32_vbroadcastss_ps", IX86_BUILTIN_VBROADCASTSS_PS
, UNKNOWN
, (int) V4SF_FTYPE_V4SF
},
27730 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_vec_dupv8sf
, "__builtin_ia32_vbroadcastss_ps256", IX86_BUILTIN_VBROADCASTSS_PS256
, UNKNOWN
, (int) V8SF_FTYPE_V4SF
},
27731 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_vec_dupv4df
, "__builtin_ia32_vbroadcastsd_pd256", IX86_BUILTIN_VBROADCASTSD_PD256
, UNKNOWN
, (int) V4DF_FTYPE_V2DF
},
27732 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_vbroadcasti128_v4di
, "__builtin_ia32_vbroadcastsi256", IX86_BUILTIN_VBROADCASTSI256
, UNKNOWN
, (int) V4DI_FTYPE_V2DI
},
27733 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_pblenddv4si
, "__builtin_ia32_pblendd128", IX86_BUILTIN_PBLENDD128
, UNKNOWN
, (int) V4SI_FTYPE_V4SI_V4SI_INT
},
27734 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_pblenddv8si
, "__builtin_ia32_pblendd256", IX86_BUILTIN_PBLENDD256
, UNKNOWN
, (int) V8SI_FTYPE_V8SI_V8SI_INT
},
27735 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_pbroadcastv32qi
, "__builtin_ia32_pbroadcastb256", IX86_BUILTIN_PBROADCASTB256
, UNKNOWN
, (int) V32QI_FTYPE_V16QI
},
27736 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_pbroadcastv16hi
, "__builtin_ia32_pbroadcastw256", IX86_BUILTIN_PBROADCASTW256
, UNKNOWN
, (int) V16HI_FTYPE_V8HI
},
27737 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_pbroadcastv8si
, "__builtin_ia32_pbroadcastd256", IX86_BUILTIN_PBROADCASTD256
, UNKNOWN
, (int) V8SI_FTYPE_V4SI
},
27738 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_pbroadcastv4di
, "__builtin_ia32_pbroadcastq256", IX86_BUILTIN_PBROADCASTQ256
, UNKNOWN
, (int) V4DI_FTYPE_V2DI
},
27739 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_pbroadcastv16qi
, "__builtin_ia32_pbroadcastb128", IX86_BUILTIN_PBROADCASTB128
, UNKNOWN
, (int) V16QI_FTYPE_V16QI
},
27740 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_pbroadcastv8hi
, "__builtin_ia32_pbroadcastw128", IX86_BUILTIN_PBROADCASTW128
, UNKNOWN
, (int) V8HI_FTYPE_V8HI
},
27741 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_pbroadcastv4si
, "__builtin_ia32_pbroadcastd128", IX86_BUILTIN_PBROADCASTD128
, UNKNOWN
, (int) V4SI_FTYPE_V4SI
},
27742 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_pbroadcastv2di
, "__builtin_ia32_pbroadcastq128", IX86_BUILTIN_PBROADCASTQ128
, UNKNOWN
, (int) V2DI_FTYPE_V2DI
},
27743 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_permvarv8si
, "__builtin_ia32_permvarsi256", IX86_BUILTIN_VPERMVARSI256
, UNKNOWN
, (int) V8SI_FTYPE_V8SI_V8SI
},
27744 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_permvarv8sf
, "__builtin_ia32_permvarsf256", IX86_BUILTIN_VPERMVARSF256
, UNKNOWN
, (int) V8SF_FTYPE_V8SF_V8SI
},
27745 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_permv4df
, "__builtin_ia32_permdf256", IX86_BUILTIN_VPERMDF256
, UNKNOWN
, (int) V4DF_FTYPE_V4DF_INT
},
27746 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_permv4di
, "__builtin_ia32_permdi256", IX86_BUILTIN_VPERMDI256
, UNKNOWN
, (int) V4DI_FTYPE_V4DI_INT
},
27747 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_permv2ti
, "__builtin_ia32_permti256", IX86_BUILTIN_VPERMTI256
, UNKNOWN
, (int) V4DI_FTYPE_V4DI_V4DI_INT
},
27748 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_extracti128
, "__builtin_ia32_extract128i256", IX86_BUILTIN_VEXTRACT128I256
, UNKNOWN
, (int) V2DI_FTYPE_V4DI_INT
},
27749 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_inserti128
, "__builtin_ia32_insert128i256", IX86_BUILTIN_VINSERT128I256
, UNKNOWN
, (int) V4DI_FTYPE_V4DI_V2DI_INT
},
27750 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_ashlvv4di
, "__builtin_ia32_psllv4di", IX86_BUILTIN_PSLLVV4DI
, UNKNOWN
, (int) V4DI_FTYPE_V4DI_V4DI
},
27751 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_ashlvv2di
, "__builtin_ia32_psllv2di", IX86_BUILTIN_PSLLVV2DI
, UNKNOWN
, (int) V2DI_FTYPE_V2DI_V2DI
},
27752 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_ashlvv8si
, "__builtin_ia32_psllv8si", IX86_BUILTIN_PSLLVV8SI
, UNKNOWN
, (int) V8SI_FTYPE_V8SI_V8SI
},
27753 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_ashlvv4si
, "__builtin_ia32_psllv4si", IX86_BUILTIN_PSLLVV4SI
, UNKNOWN
, (int) V4SI_FTYPE_V4SI_V4SI
},
27754 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_ashrvv8si
, "__builtin_ia32_psrav8si", IX86_BUILTIN_PSRAVV8SI
, UNKNOWN
, (int) V8SI_FTYPE_V8SI_V8SI
},
27755 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_ashrvv4si
, "__builtin_ia32_psrav4si", IX86_BUILTIN_PSRAVV4SI
, UNKNOWN
, (int) V4SI_FTYPE_V4SI_V4SI
},
27756 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_lshrvv4di
, "__builtin_ia32_psrlv4di", IX86_BUILTIN_PSRLVV4DI
, UNKNOWN
, (int) V4DI_FTYPE_V4DI_V4DI
},
27757 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_lshrvv2di
, "__builtin_ia32_psrlv2di", IX86_BUILTIN_PSRLVV2DI
, UNKNOWN
, (int) V2DI_FTYPE_V2DI_V2DI
},
27758 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_lshrvv8si
, "__builtin_ia32_psrlv8si", IX86_BUILTIN_PSRLVV8SI
, UNKNOWN
, (int) V8SI_FTYPE_V8SI_V8SI
},
27759 { OPTION_MASK_ISA_AVX2
, CODE_FOR_avx2_lshrvv4si
, "__builtin_ia32_psrlv4si", IX86_BUILTIN_PSRLVV4SI
, UNKNOWN
, (int) V4SI_FTYPE_V4SI_V4SI
},
27761 { OPTION_MASK_ISA_LZCNT
, CODE_FOR_clzhi2_lzcnt
, "__builtin_clzs", IX86_BUILTIN_CLZS
, UNKNOWN
, (int) UINT16_FTYPE_UINT16
},
27764 { OPTION_MASK_ISA_BMI
, CODE_FOR_bmi_bextr_si
, "__builtin_ia32_bextr_u32", IX86_BUILTIN_BEXTR32
, UNKNOWN
, (int) UINT_FTYPE_UINT_UINT
},
27765 { OPTION_MASK_ISA_BMI
, CODE_FOR_bmi_bextr_di
, "__builtin_ia32_bextr_u64", IX86_BUILTIN_BEXTR64
, UNKNOWN
, (int) UINT64_FTYPE_UINT64_UINT64
},
27766 { OPTION_MASK_ISA_BMI
, CODE_FOR_ctzhi2
, "__builtin_ctzs", IX86_BUILTIN_CTZS
, UNKNOWN
, (int) UINT16_FTYPE_UINT16
},
27769 { OPTION_MASK_ISA_TBM
, CODE_FOR_tbm_bextri_si
, "__builtin_ia32_bextri_u32", IX86_BUILTIN_BEXTRI32
, UNKNOWN
, (int) UINT_FTYPE_UINT_UINT
},
27770 { OPTION_MASK_ISA_TBM
, CODE_FOR_tbm_bextri_di
, "__builtin_ia32_bextri_u64", IX86_BUILTIN_BEXTRI64
, UNKNOWN
, (int) UINT64_FTYPE_UINT64_UINT64
},
27773 { OPTION_MASK_ISA_F16C
, CODE_FOR_vcvtph2ps
, "__builtin_ia32_vcvtph2ps", IX86_BUILTIN_CVTPH2PS
, UNKNOWN
, (int) V4SF_FTYPE_V8HI
},
27774 { OPTION_MASK_ISA_F16C
, CODE_FOR_vcvtph2ps256
, "__builtin_ia32_vcvtph2ps256", IX86_BUILTIN_CVTPH2PS256
, UNKNOWN
, (int) V8SF_FTYPE_V8HI
},
27775 { OPTION_MASK_ISA_F16C
, CODE_FOR_vcvtps2ph
, "__builtin_ia32_vcvtps2ph", IX86_BUILTIN_CVTPS2PH
, UNKNOWN
, (int) V8HI_FTYPE_V4SF_INT
},
27776 { OPTION_MASK_ISA_F16C
, CODE_FOR_vcvtps2ph256
, "__builtin_ia32_vcvtps2ph256", IX86_BUILTIN_CVTPS2PH256
, UNKNOWN
, (int) V8HI_FTYPE_V8SF_INT
},
27779 { OPTION_MASK_ISA_BMI2
, CODE_FOR_bmi2_bzhi_si3
, "__builtin_ia32_bzhi_si", IX86_BUILTIN_BZHI32
, UNKNOWN
, (int) UINT_FTYPE_UINT_UINT
},
27780 { OPTION_MASK_ISA_BMI2
, CODE_FOR_bmi2_bzhi_di3
, "__builtin_ia32_bzhi_di", IX86_BUILTIN_BZHI64
, UNKNOWN
, (int) UINT64_FTYPE_UINT64_UINT64
},
27781 { OPTION_MASK_ISA_BMI2
, CODE_FOR_bmi2_pdep_si3
, "__builtin_ia32_pdep_si", IX86_BUILTIN_PDEP32
, UNKNOWN
, (int) UINT_FTYPE_UINT_UINT
},
27782 { OPTION_MASK_ISA_BMI2
, CODE_FOR_bmi2_pdep_di3
, "__builtin_ia32_pdep_di", IX86_BUILTIN_PDEP64
, UNKNOWN
, (int) UINT64_FTYPE_UINT64_UINT64
},
27783 { OPTION_MASK_ISA_BMI2
, CODE_FOR_bmi2_pext_si3
, "__builtin_ia32_pext_si", IX86_BUILTIN_PEXT32
, UNKNOWN
, (int) UINT_FTYPE_UINT_UINT
},
27784 { OPTION_MASK_ISA_BMI2
, CODE_FOR_bmi2_pext_di3
, "__builtin_ia32_pext_di", IX86_BUILTIN_PEXT64
, UNKNOWN
, (int) UINT64_FTYPE_UINT64_UINT64
},
27787 /* FMA4 and XOP. */
27788 #define MULTI_ARG_4_DF2_DI_I V2DF_FTYPE_V2DF_V2DF_V2DI_INT
27789 #define MULTI_ARG_4_DF2_DI_I1 V4DF_FTYPE_V4DF_V4DF_V4DI_INT
27790 #define MULTI_ARG_4_SF2_SI_I V4SF_FTYPE_V4SF_V4SF_V4SI_INT
27791 #define MULTI_ARG_4_SF2_SI_I1 V8SF_FTYPE_V8SF_V8SF_V8SI_INT
27792 #define MULTI_ARG_3_SF V4SF_FTYPE_V4SF_V4SF_V4SF
27793 #define MULTI_ARG_3_DF V2DF_FTYPE_V2DF_V2DF_V2DF
27794 #define MULTI_ARG_3_SF2 V8SF_FTYPE_V8SF_V8SF_V8SF
27795 #define MULTI_ARG_3_DF2 V4DF_FTYPE_V4DF_V4DF_V4DF
27796 #define MULTI_ARG_3_DI V2DI_FTYPE_V2DI_V2DI_V2DI
27797 #define MULTI_ARG_3_SI V4SI_FTYPE_V4SI_V4SI_V4SI
27798 #define MULTI_ARG_3_SI_DI V4SI_FTYPE_V4SI_V4SI_V2DI
27799 #define MULTI_ARG_3_HI V8HI_FTYPE_V8HI_V8HI_V8HI
27800 #define MULTI_ARG_3_HI_SI V8HI_FTYPE_V8HI_V8HI_V4SI
27801 #define MULTI_ARG_3_QI V16QI_FTYPE_V16QI_V16QI_V16QI
27802 #define MULTI_ARG_3_DI2 V4DI_FTYPE_V4DI_V4DI_V4DI
27803 #define MULTI_ARG_3_SI2 V8SI_FTYPE_V8SI_V8SI_V8SI
27804 #define MULTI_ARG_3_HI2 V16HI_FTYPE_V16HI_V16HI_V16HI
27805 #define MULTI_ARG_3_QI2 V32QI_FTYPE_V32QI_V32QI_V32QI
27806 #define MULTI_ARG_2_SF V4SF_FTYPE_V4SF_V4SF
27807 #define MULTI_ARG_2_DF V2DF_FTYPE_V2DF_V2DF
27808 #define MULTI_ARG_2_DI V2DI_FTYPE_V2DI_V2DI
27809 #define MULTI_ARG_2_SI V4SI_FTYPE_V4SI_V4SI
27810 #define MULTI_ARG_2_HI V8HI_FTYPE_V8HI_V8HI
27811 #define MULTI_ARG_2_QI V16QI_FTYPE_V16QI_V16QI
27812 #define MULTI_ARG_2_DI_IMM V2DI_FTYPE_V2DI_SI
27813 #define MULTI_ARG_2_SI_IMM V4SI_FTYPE_V4SI_SI
27814 #define MULTI_ARG_2_HI_IMM V8HI_FTYPE_V8HI_SI
27815 #define MULTI_ARG_2_QI_IMM V16QI_FTYPE_V16QI_SI
27816 #define MULTI_ARG_2_DI_CMP V2DI_FTYPE_V2DI_V2DI_CMP
27817 #define MULTI_ARG_2_SI_CMP V4SI_FTYPE_V4SI_V4SI_CMP
27818 #define MULTI_ARG_2_HI_CMP V8HI_FTYPE_V8HI_V8HI_CMP
27819 #define MULTI_ARG_2_QI_CMP V16QI_FTYPE_V16QI_V16QI_CMP
27820 #define MULTI_ARG_2_SF_TF V4SF_FTYPE_V4SF_V4SF_TF
27821 #define MULTI_ARG_2_DF_TF V2DF_FTYPE_V2DF_V2DF_TF
27822 #define MULTI_ARG_2_DI_TF V2DI_FTYPE_V2DI_V2DI_TF
27823 #define MULTI_ARG_2_SI_TF V4SI_FTYPE_V4SI_V4SI_TF
27824 #define MULTI_ARG_2_HI_TF V8HI_FTYPE_V8HI_V8HI_TF
27825 #define MULTI_ARG_2_QI_TF V16QI_FTYPE_V16QI_V16QI_TF
27826 #define MULTI_ARG_1_SF V4SF_FTYPE_V4SF
27827 #define MULTI_ARG_1_DF V2DF_FTYPE_V2DF
27828 #define MULTI_ARG_1_SF2 V8SF_FTYPE_V8SF
27829 #define MULTI_ARG_1_DF2 V4DF_FTYPE_V4DF
27830 #define MULTI_ARG_1_DI V2DI_FTYPE_V2DI
27831 #define MULTI_ARG_1_SI V4SI_FTYPE_V4SI
27832 #define MULTI_ARG_1_HI V8HI_FTYPE_V8HI
27833 #define MULTI_ARG_1_QI V16QI_FTYPE_V16QI
27834 #define MULTI_ARG_1_SI_DI V2DI_FTYPE_V4SI
27835 #define MULTI_ARG_1_HI_DI V2DI_FTYPE_V8HI
27836 #define MULTI_ARG_1_HI_SI V4SI_FTYPE_V8HI
27837 #define MULTI_ARG_1_QI_DI V2DI_FTYPE_V16QI
27838 #define MULTI_ARG_1_QI_SI V4SI_FTYPE_V16QI
27839 #define MULTI_ARG_1_QI_HI V8HI_FTYPE_V16QI
27841 static const struct builtin_description bdesc_multi_arg
[] =
27843 { OPTION_MASK_ISA_FMA4
, CODE_FOR_fma4i_vmfmadd_v4sf
,
27844 "__builtin_ia32_vfmaddss", IX86_BUILTIN_VFMADDSS
,
27845 UNKNOWN
, (int)MULTI_ARG_3_SF
},
27846 { OPTION_MASK_ISA_FMA4
, CODE_FOR_fma4i_vmfmadd_v2df
,
27847 "__builtin_ia32_vfmaddsd", IX86_BUILTIN_VFMADDSD
,
27848 UNKNOWN
, (int)MULTI_ARG_3_DF
},
27850 { OPTION_MASK_ISA_FMA
, CODE_FOR_fmai_vmfmadd_v4sf
,
27851 "__builtin_ia32_vfmaddss3", IX86_BUILTIN_VFMADDSS3
,
27852 UNKNOWN
, (int)MULTI_ARG_3_SF
},
27853 { OPTION_MASK_ISA_FMA
, CODE_FOR_fmai_vmfmadd_v2df
,
27854 "__builtin_ia32_vfmaddsd3", IX86_BUILTIN_VFMADDSD3
,
27855 UNKNOWN
, (int)MULTI_ARG_3_DF
},
27857 { OPTION_MASK_ISA_FMA
| OPTION_MASK_ISA_FMA4
, CODE_FOR_fma4i_fmadd_v4sf
,
27858 "__builtin_ia32_vfmaddps", IX86_BUILTIN_VFMADDPS
,
27859 UNKNOWN
, (int)MULTI_ARG_3_SF
},
27860 { OPTION_MASK_ISA_FMA
| OPTION_MASK_ISA_FMA4
, CODE_FOR_fma4i_fmadd_v2df
,
27861 "__builtin_ia32_vfmaddpd", IX86_BUILTIN_VFMADDPD
,
27862 UNKNOWN
, (int)MULTI_ARG_3_DF
},
27863 { OPTION_MASK_ISA_FMA
| OPTION_MASK_ISA_FMA4
, CODE_FOR_fma4i_fmadd_v8sf
,
27864 "__builtin_ia32_vfmaddps256", IX86_BUILTIN_VFMADDPS256
,
27865 UNKNOWN
, (int)MULTI_ARG_3_SF2
},
27866 { OPTION_MASK_ISA_FMA
| OPTION_MASK_ISA_FMA4
, CODE_FOR_fma4i_fmadd_v4df
,
27867 "__builtin_ia32_vfmaddpd256", IX86_BUILTIN_VFMADDPD256
,
27868 UNKNOWN
, (int)MULTI_ARG_3_DF2
},
27870 { OPTION_MASK_ISA_FMA
| OPTION_MASK_ISA_FMA4
, CODE_FOR_fmaddsub_v4sf
,
27871 "__builtin_ia32_vfmaddsubps", IX86_BUILTIN_VFMADDSUBPS
,
27872 UNKNOWN
, (int)MULTI_ARG_3_SF
},
27873 { OPTION_MASK_ISA_FMA
| OPTION_MASK_ISA_FMA4
, CODE_FOR_fmaddsub_v2df
,
27874 "__builtin_ia32_vfmaddsubpd", IX86_BUILTIN_VFMADDSUBPD
,
27875 UNKNOWN
, (int)MULTI_ARG_3_DF
},
27876 { OPTION_MASK_ISA_FMA
| OPTION_MASK_ISA_FMA4
, CODE_FOR_fmaddsub_v8sf
,
27877 "__builtin_ia32_vfmaddsubps256", IX86_BUILTIN_VFMADDSUBPS256
,
27878 UNKNOWN
, (int)MULTI_ARG_3_SF2
},
27879 { OPTION_MASK_ISA_FMA
| OPTION_MASK_ISA_FMA4
, CODE_FOR_fmaddsub_v4df
,
27880 "__builtin_ia32_vfmaddsubpd256", IX86_BUILTIN_VFMADDSUBPD256
,
27881 UNKNOWN
, (int)MULTI_ARG_3_DF2
},
27883 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcmov_v2di
, "__builtin_ia32_vpcmov", IX86_BUILTIN_VPCMOV
, UNKNOWN
, (int)MULTI_ARG_3_DI
},
27884 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcmov_v2di
, "__builtin_ia32_vpcmov_v2di", IX86_BUILTIN_VPCMOV_V2DI
, UNKNOWN
, (int)MULTI_ARG_3_DI
},
27885 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcmov_v4si
, "__builtin_ia32_vpcmov_v4si", IX86_BUILTIN_VPCMOV_V4SI
, UNKNOWN
, (int)MULTI_ARG_3_SI
},
27886 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcmov_v8hi
, "__builtin_ia32_vpcmov_v8hi", IX86_BUILTIN_VPCMOV_V8HI
, UNKNOWN
, (int)MULTI_ARG_3_HI
},
27887 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcmov_v16qi
, "__builtin_ia32_vpcmov_v16qi",IX86_BUILTIN_VPCMOV_V16QI
,UNKNOWN
, (int)MULTI_ARG_3_QI
},
27888 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcmov_v2df
, "__builtin_ia32_vpcmov_v2df", IX86_BUILTIN_VPCMOV_V2DF
, UNKNOWN
, (int)MULTI_ARG_3_DF
},
27889 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcmov_v4sf
, "__builtin_ia32_vpcmov_v4sf", IX86_BUILTIN_VPCMOV_V4SF
, UNKNOWN
, (int)MULTI_ARG_3_SF
},
27891 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcmov_v4di256
, "__builtin_ia32_vpcmov256", IX86_BUILTIN_VPCMOV256
, UNKNOWN
, (int)MULTI_ARG_3_DI2
},
27892 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcmov_v4di256
, "__builtin_ia32_vpcmov_v4di256", IX86_BUILTIN_VPCMOV_V4DI256
, UNKNOWN
, (int)MULTI_ARG_3_DI2
},
27893 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcmov_v8si256
, "__builtin_ia32_vpcmov_v8si256", IX86_BUILTIN_VPCMOV_V8SI256
, UNKNOWN
, (int)MULTI_ARG_3_SI2
},
27894 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcmov_v16hi256
, "__builtin_ia32_vpcmov_v16hi256", IX86_BUILTIN_VPCMOV_V16HI256
, UNKNOWN
, (int)MULTI_ARG_3_HI2
},
27895 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcmov_v32qi256
, "__builtin_ia32_vpcmov_v32qi256", IX86_BUILTIN_VPCMOV_V32QI256
, UNKNOWN
, (int)MULTI_ARG_3_QI2
},
27896 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcmov_v4df256
, "__builtin_ia32_vpcmov_v4df256", IX86_BUILTIN_VPCMOV_V4DF256
, UNKNOWN
, (int)MULTI_ARG_3_DF2
},
27897 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcmov_v8sf256
, "__builtin_ia32_vpcmov_v8sf256", IX86_BUILTIN_VPCMOV_V8SF256
, UNKNOWN
, (int)MULTI_ARG_3_SF2
},
27899 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pperm
, "__builtin_ia32_vpperm", IX86_BUILTIN_VPPERM
, UNKNOWN
, (int)MULTI_ARG_3_QI
},
27901 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pmacssww
, "__builtin_ia32_vpmacssww", IX86_BUILTIN_VPMACSSWW
, UNKNOWN
, (int)MULTI_ARG_3_HI
},
27902 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pmacsww
, "__builtin_ia32_vpmacsww", IX86_BUILTIN_VPMACSWW
, UNKNOWN
, (int)MULTI_ARG_3_HI
},
27903 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pmacsswd
, "__builtin_ia32_vpmacsswd", IX86_BUILTIN_VPMACSSWD
, UNKNOWN
, (int)MULTI_ARG_3_HI_SI
},
27904 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pmacswd
, "__builtin_ia32_vpmacswd", IX86_BUILTIN_VPMACSWD
, UNKNOWN
, (int)MULTI_ARG_3_HI_SI
},
27905 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pmacssdd
, "__builtin_ia32_vpmacssdd", IX86_BUILTIN_VPMACSSDD
, UNKNOWN
, (int)MULTI_ARG_3_SI
},
27906 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pmacsdd
, "__builtin_ia32_vpmacsdd", IX86_BUILTIN_VPMACSDD
, UNKNOWN
, (int)MULTI_ARG_3_SI
},
27907 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pmacssdql
, "__builtin_ia32_vpmacssdql", IX86_BUILTIN_VPMACSSDQL
, UNKNOWN
, (int)MULTI_ARG_3_SI_DI
},
27908 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pmacssdqh
, "__builtin_ia32_vpmacssdqh", IX86_BUILTIN_VPMACSSDQH
, UNKNOWN
, (int)MULTI_ARG_3_SI_DI
},
27909 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pmacsdql
, "__builtin_ia32_vpmacsdql", IX86_BUILTIN_VPMACSDQL
, UNKNOWN
, (int)MULTI_ARG_3_SI_DI
},
27910 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pmacsdqh
, "__builtin_ia32_vpmacsdqh", IX86_BUILTIN_VPMACSDQH
, UNKNOWN
, (int)MULTI_ARG_3_SI_DI
},
27911 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pmadcsswd
, "__builtin_ia32_vpmadcsswd", IX86_BUILTIN_VPMADCSSWD
, UNKNOWN
, (int)MULTI_ARG_3_HI_SI
},
27912 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pmadcswd
, "__builtin_ia32_vpmadcswd", IX86_BUILTIN_VPMADCSWD
, UNKNOWN
, (int)MULTI_ARG_3_HI_SI
},
27914 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_vrotlv2di3
, "__builtin_ia32_vprotq", IX86_BUILTIN_VPROTQ
, UNKNOWN
, (int)MULTI_ARG_2_DI
},
27915 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_vrotlv4si3
, "__builtin_ia32_vprotd", IX86_BUILTIN_VPROTD
, UNKNOWN
, (int)MULTI_ARG_2_SI
},
27916 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_vrotlv8hi3
, "__builtin_ia32_vprotw", IX86_BUILTIN_VPROTW
, UNKNOWN
, (int)MULTI_ARG_2_HI
},
27917 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_vrotlv16qi3
, "__builtin_ia32_vprotb", IX86_BUILTIN_VPROTB
, UNKNOWN
, (int)MULTI_ARG_2_QI
},
27918 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_rotlv2di3
, "__builtin_ia32_vprotqi", IX86_BUILTIN_VPROTQ_IMM
, UNKNOWN
, (int)MULTI_ARG_2_DI_IMM
},
27919 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_rotlv4si3
, "__builtin_ia32_vprotdi", IX86_BUILTIN_VPROTD_IMM
, UNKNOWN
, (int)MULTI_ARG_2_SI_IMM
},
27920 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_rotlv8hi3
, "__builtin_ia32_vprotwi", IX86_BUILTIN_VPROTW_IMM
, UNKNOWN
, (int)MULTI_ARG_2_HI_IMM
},
27921 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_rotlv16qi3
, "__builtin_ia32_vprotbi", IX86_BUILTIN_VPROTB_IMM
, UNKNOWN
, (int)MULTI_ARG_2_QI_IMM
},
27922 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_shav2di3
, "__builtin_ia32_vpshaq", IX86_BUILTIN_VPSHAQ
, UNKNOWN
, (int)MULTI_ARG_2_DI
},
27923 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_shav4si3
, "__builtin_ia32_vpshad", IX86_BUILTIN_VPSHAD
, UNKNOWN
, (int)MULTI_ARG_2_SI
},
27924 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_shav8hi3
, "__builtin_ia32_vpshaw", IX86_BUILTIN_VPSHAW
, UNKNOWN
, (int)MULTI_ARG_2_HI
},
27925 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_shav16qi3
, "__builtin_ia32_vpshab", IX86_BUILTIN_VPSHAB
, UNKNOWN
, (int)MULTI_ARG_2_QI
},
27926 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_shlv2di3
, "__builtin_ia32_vpshlq", IX86_BUILTIN_VPSHLQ
, UNKNOWN
, (int)MULTI_ARG_2_DI
},
27927 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_shlv4si3
, "__builtin_ia32_vpshld", IX86_BUILTIN_VPSHLD
, UNKNOWN
, (int)MULTI_ARG_2_SI
},
27928 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_shlv8hi3
, "__builtin_ia32_vpshlw", IX86_BUILTIN_VPSHLW
, UNKNOWN
, (int)MULTI_ARG_2_HI
},
27929 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_shlv16qi3
, "__builtin_ia32_vpshlb", IX86_BUILTIN_VPSHLB
, UNKNOWN
, (int)MULTI_ARG_2_QI
},
27931 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_vmfrczv4sf2
, "__builtin_ia32_vfrczss", IX86_BUILTIN_VFRCZSS
, UNKNOWN
, (int)MULTI_ARG_2_SF
},
27932 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_vmfrczv2df2
, "__builtin_ia32_vfrczsd", IX86_BUILTIN_VFRCZSD
, UNKNOWN
, (int)MULTI_ARG_2_DF
},
27933 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_frczv4sf2
, "__builtin_ia32_vfrczps", IX86_BUILTIN_VFRCZPS
, UNKNOWN
, (int)MULTI_ARG_1_SF
},
27934 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_frczv2df2
, "__builtin_ia32_vfrczpd", IX86_BUILTIN_VFRCZPD
, UNKNOWN
, (int)MULTI_ARG_1_DF
},
27935 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_frczv8sf2
, "__builtin_ia32_vfrczps256", IX86_BUILTIN_VFRCZPS256
, UNKNOWN
, (int)MULTI_ARG_1_SF2
},
27936 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_frczv4df2
, "__builtin_ia32_vfrczpd256", IX86_BUILTIN_VFRCZPD256
, UNKNOWN
, (int)MULTI_ARG_1_DF2
},
27938 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_phaddbw
, "__builtin_ia32_vphaddbw", IX86_BUILTIN_VPHADDBW
, UNKNOWN
, (int)MULTI_ARG_1_QI_HI
},
27939 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_phaddbd
, "__builtin_ia32_vphaddbd", IX86_BUILTIN_VPHADDBD
, UNKNOWN
, (int)MULTI_ARG_1_QI_SI
},
27940 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_phaddbq
, "__builtin_ia32_vphaddbq", IX86_BUILTIN_VPHADDBQ
, UNKNOWN
, (int)MULTI_ARG_1_QI_DI
},
27941 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_phaddwd
, "__builtin_ia32_vphaddwd", IX86_BUILTIN_VPHADDWD
, UNKNOWN
, (int)MULTI_ARG_1_HI_SI
},
27942 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_phaddwq
, "__builtin_ia32_vphaddwq", IX86_BUILTIN_VPHADDWQ
, UNKNOWN
, (int)MULTI_ARG_1_HI_DI
},
27943 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_phadddq
, "__builtin_ia32_vphadddq", IX86_BUILTIN_VPHADDDQ
, UNKNOWN
, (int)MULTI_ARG_1_SI_DI
},
27944 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_phaddubw
, "__builtin_ia32_vphaddubw", IX86_BUILTIN_VPHADDUBW
, UNKNOWN
, (int)MULTI_ARG_1_QI_HI
},
27945 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_phaddubd
, "__builtin_ia32_vphaddubd", IX86_BUILTIN_VPHADDUBD
, UNKNOWN
, (int)MULTI_ARG_1_QI_SI
},
27946 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_phaddubq
, "__builtin_ia32_vphaddubq", IX86_BUILTIN_VPHADDUBQ
, UNKNOWN
, (int)MULTI_ARG_1_QI_DI
},
27947 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_phadduwd
, "__builtin_ia32_vphadduwd", IX86_BUILTIN_VPHADDUWD
, UNKNOWN
, (int)MULTI_ARG_1_HI_SI
},
27948 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_phadduwq
, "__builtin_ia32_vphadduwq", IX86_BUILTIN_VPHADDUWQ
, UNKNOWN
, (int)MULTI_ARG_1_HI_DI
},
27949 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_phaddudq
, "__builtin_ia32_vphaddudq", IX86_BUILTIN_VPHADDUDQ
, UNKNOWN
, (int)MULTI_ARG_1_SI_DI
},
27950 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_phsubbw
, "__builtin_ia32_vphsubbw", IX86_BUILTIN_VPHSUBBW
, UNKNOWN
, (int)MULTI_ARG_1_QI_HI
},
27951 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_phsubwd
, "__builtin_ia32_vphsubwd", IX86_BUILTIN_VPHSUBWD
, UNKNOWN
, (int)MULTI_ARG_1_HI_SI
},
27952 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_phsubdq
, "__builtin_ia32_vphsubdq", IX86_BUILTIN_VPHSUBDQ
, UNKNOWN
, (int)MULTI_ARG_1_SI_DI
},
27954 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmpv16qi3
, "__builtin_ia32_vpcomeqb", IX86_BUILTIN_VPCOMEQB
, EQ
, (int)MULTI_ARG_2_QI_CMP
},
27955 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmpv16qi3
, "__builtin_ia32_vpcomneb", IX86_BUILTIN_VPCOMNEB
, NE
, (int)MULTI_ARG_2_QI_CMP
},
27956 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmpv16qi3
, "__builtin_ia32_vpcomneqb", IX86_BUILTIN_VPCOMNEB
, NE
, (int)MULTI_ARG_2_QI_CMP
},
27957 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmpv16qi3
, "__builtin_ia32_vpcomltb", IX86_BUILTIN_VPCOMLTB
, LT
, (int)MULTI_ARG_2_QI_CMP
},
27958 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmpv16qi3
, "__builtin_ia32_vpcomleb", IX86_BUILTIN_VPCOMLEB
, LE
, (int)MULTI_ARG_2_QI_CMP
},
27959 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmpv16qi3
, "__builtin_ia32_vpcomgtb", IX86_BUILTIN_VPCOMGTB
, GT
, (int)MULTI_ARG_2_QI_CMP
},
27960 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmpv16qi3
, "__builtin_ia32_vpcomgeb", IX86_BUILTIN_VPCOMGEB
, GE
, (int)MULTI_ARG_2_QI_CMP
},
27962 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmpv8hi3
, "__builtin_ia32_vpcomeqw", IX86_BUILTIN_VPCOMEQW
, EQ
, (int)MULTI_ARG_2_HI_CMP
},
27963 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmpv8hi3
, "__builtin_ia32_vpcomnew", IX86_BUILTIN_VPCOMNEW
, NE
, (int)MULTI_ARG_2_HI_CMP
},
27964 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmpv8hi3
, "__builtin_ia32_vpcomneqw", IX86_BUILTIN_VPCOMNEW
, NE
, (int)MULTI_ARG_2_HI_CMP
},
27965 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmpv8hi3
, "__builtin_ia32_vpcomltw", IX86_BUILTIN_VPCOMLTW
, LT
, (int)MULTI_ARG_2_HI_CMP
},
27966 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmpv8hi3
, "__builtin_ia32_vpcomlew", IX86_BUILTIN_VPCOMLEW
, LE
, (int)MULTI_ARG_2_HI_CMP
},
27967 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmpv8hi3
, "__builtin_ia32_vpcomgtw", IX86_BUILTIN_VPCOMGTW
, GT
, (int)MULTI_ARG_2_HI_CMP
},
27968 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmpv8hi3
, "__builtin_ia32_vpcomgew", IX86_BUILTIN_VPCOMGEW
, GE
, (int)MULTI_ARG_2_HI_CMP
},
27970 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmpv4si3
, "__builtin_ia32_vpcomeqd", IX86_BUILTIN_VPCOMEQD
, EQ
, (int)MULTI_ARG_2_SI_CMP
},
27971 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmpv4si3
, "__builtin_ia32_vpcomned", IX86_BUILTIN_VPCOMNED
, NE
, (int)MULTI_ARG_2_SI_CMP
},
27972 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmpv4si3
, "__builtin_ia32_vpcomneqd", IX86_BUILTIN_VPCOMNED
, NE
, (int)MULTI_ARG_2_SI_CMP
},
27973 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmpv4si3
, "__builtin_ia32_vpcomltd", IX86_BUILTIN_VPCOMLTD
, LT
, (int)MULTI_ARG_2_SI_CMP
},
27974 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmpv4si3
, "__builtin_ia32_vpcomled", IX86_BUILTIN_VPCOMLED
, LE
, (int)MULTI_ARG_2_SI_CMP
},
27975 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmpv4si3
, "__builtin_ia32_vpcomgtd", IX86_BUILTIN_VPCOMGTD
, GT
, (int)MULTI_ARG_2_SI_CMP
},
27976 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmpv4si3
, "__builtin_ia32_vpcomged", IX86_BUILTIN_VPCOMGED
, GE
, (int)MULTI_ARG_2_SI_CMP
},
27978 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmpv2di3
, "__builtin_ia32_vpcomeqq", IX86_BUILTIN_VPCOMEQQ
, EQ
, (int)MULTI_ARG_2_DI_CMP
},
27979 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmpv2di3
, "__builtin_ia32_vpcomneq", IX86_BUILTIN_VPCOMNEQ
, NE
, (int)MULTI_ARG_2_DI_CMP
},
27980 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmpv2di3
, "__builtin_ia32_vpcomneqq", IX86_BUILTIN_VPCOMNEQ
, NE
, (int)MULTI_ARG_2_DI_CMP
},
27981 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmpv2di3
, "__builtin_ia32_vpcomltq", IX86_BUILTIN_VPCOMLTQ
, LT
, (int)MULTI_ARG_2_DI_CMP
},
27982 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmpv2di3
, "__builtin_ia32_vpcomleq", IX86_BUILTIN_VPCOMLEQ
, LE
, (int)MULTI_ARG_2_DI_CMP
},
27983 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmpv2di3
, "__builtin_ia32_vpcomgtq", IX86_BUILTIN_VPCOMGTQ
, GT
, (int)MULTI_ARG_2_DI_CMP
},
27984 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmpv2di3
, "__builtin_ia32_vpcomgeq", IX86_BUILTIN_VPCOMGEQ
, GE
, (int)MULTI_ARG_2_DI_CMP
},
27986 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmp_uns2v16qi3
,"__builtin_ia32_vpcomequb", IX86_BUILTIN_VPCOMEQUB
, EQ
, (int)MULTI_ARG_2_QI_CMP
},
27987 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmp_uns2v16qi3
,"__builtin_ia32_vpcomneub", IX86_BUILTIN_VPCOMNEUB
, NE
, (int)MULTI_ARG_2_QI_CMP
},
27988 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmp_uns2v16qi3
,"__builtin_ia32_vpcomnequb", IX86_BUILTIN_VPCOMNEUB
, NE
, (int)MULTI_ARG_2_QI_CMP
},
27989 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmp_unsv16qi3
, "__builtin_ia32_vpcomltub", IX86_BUILTIN_VPCOMLTUB
, LTU
, (int)MULTI_ARG_2_QI_CMP
},
27990 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmp_unsv16qi3
, "__builtin_ia32_vpcomleub", IX86_BUILTIN_VPCOMLEUB
, LEU
, (int)MULTI_ARG_2_QI_CMP
},
27991 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmp_unsv16qi3
, "__builtin_ia32_vpcomgtub", IX86_BUILTIN_VPCOMGTUB
, GTU
, (int)MULTI_ARG_2_QI_CMP
},
27992 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmp_unsv16qi3
, "__builtin_ia32_vpcomgeub", IX86_BUILTIN_VPCOMGEUB
, GEU
, (int)MULTI_ARG_2_QI_CMP
},
27994 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmp_uns2v8hi3
, "__builtin_ia32_vpcomequw", IX86_BUILTIN_VPCOMEQUW
, EQ
, (int)MULTI_ARG_2_HI_CMP
},
27995 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmp_uns2v8hi3
, "__builtin_ia32_vpcomneuw", IX86_BUILTIN_VPCOMNEUW
, NE
, (int)MULTI_ARG_2_HI_CMP
},
27996 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmp_uns2v8hi3
, "__builtin_ia32_vpcomnequw", IX86_BUILTIN_VPCOMNEUW
, NE
, (int)MULTI_ARG_2_HI_CMP
},
27997 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmp_unsv8hi3
, "__builtin_ia32_vpcomltuw", IX86_BUILTIN_VPCOMLTUW
, LTU
, (int)MULTI_ARG_2_HI_CMP
},
27998 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmp_unsv8hi3
, "__builtin_ia32_vpcomleuw", IX86_BUILTIN_VPCOMLEUW
, LEU
, (int)MULTI_ARG_2_HI_CMP
},
27999 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmp_unsv8hi3
, "__builtin_ia32_vpcomgtuw", IX86_BUILTIN_VPCOMGTUW
, GTU
, (int)MULTI_ARG_2_HI_CMP
},
28000 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmp_unsv8hi3
, "__builtin_ia32_vpcomgeuw", IX86_BUILTIN_VPCOMGEUW
, GEU
, (int)MULTI_ARG_2_HI_CMP
},
28002 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmp_uns2v4si3
, "__builtin_ia32_vpcomequd", IX86_BUILTIN_VPCOMEQUD
, EQ
, (int)MULTI_ARG_2_SI_CMP
},
28003 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmp_uns2v4si3
, "__builtin_ia32_vpcomneud", IX86_BUILTIN_VPCOMNEUD
, NE
, (int)MULTI_ARG_2_SI_CMP
},
28004 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmp_uns2v4si3
, "__builtin_ia32_vpcomnequd", IX86_BUILTIN_VPCOMNEUD
, NE
, (int)MULTI_ARG_2_SI_CMP
},
28005 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmp_unsv4si3
, "__builtin_ia32_vpcomltud", IX86_BUILTIN_VPCOMLTUD
, LTU
, (int)MULTI_ARG_2_SI_CMP
},
28006 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmp_unsv4si3
, "__builtin_ia32_vpcomleud", IX86_BUILTIN_VPCOMLEUD
, LEU
, (int)MULTI_ARG_2_SI_CMP
},
28007 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmp_unsv4si3
, "__builtin_ia32_vpcomgtud", IX86_BUILTIN_VPCOMGTUD
, GTU
, (int)MULTI_ARG_2_SI_CMP
},
28008 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmp_unsv4si3
, "__builtin_ia32_vpcomgeud", IX86_BUILTIN_VPCOMGEUD
, GEU
, (int)MULTI_ARG_2_SI_CMP
},
28010 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmp_uns2v2di3
, "__builtin_ia32_vpcomequq", IX86_BUILTIN_VPCOMEQUQ
, EQ
, (int)MULTI_ARG_2_DI_CMP
},
28011 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmp_uns2v2di3
, "__builtin_ia32_vpcomneuq", IX86_BUILTIN_VPCOMNEUQ
, NE
, (int)MULTI_ARG_2_DI_CMP
},
28012 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmp_uns2v2di3
, "__builtin_ia32_vpcomnequq", IX86_BUILTIN_VPCOMNEUQ
, NE
, (int)MULTI_ARG_2_DI_CMP
},
28013 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmp_unsv2di3
, "__builtin_ia32_vpcomltuq", IX86_BUILTIN_VPCOMLTUQ
, LTU
, (int)MULTI_ARG_2_DI_CMP
},
28014 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmp_unsv2di3
, "__builtin_ia32_vpcomleuq", IX86_BUILTIN_VPCOMLEUQ
, LEU
, (int)MULTI_ARG_2_DI_CMP
},
28015 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmp_unsv2di3
, "__builtin_ia32_vpcomgtuq", IX86_BUILTIN_VPCOMGTUQ
, GTU
, (int)MULTI_ARG_2_DI_CMP
},
28016 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_maskcmp_unsv2di3
, "__builtin_ia32_vpcomgeuq", IX86_BUILTIN_VPCOMGEUQ
, GEU
, (int)MULTI_ARG_2_DI_CMP
},
28018 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcom_tfv16qi3
, "__builtin_ia32_vpcomfalseb", IX86_BUILTIN_VPCOMFALSEB
, (enum rtx_code
) PCOM_FALSE
, (int)MULTI_ARG_2_QI_TF
},
28019 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcom_tfv8hi3
, "__builtin_ia32_vpcomfalsew", IX86_BUILTIN_VPCOMFALSEW
, (enum rtx_code
) PCOM_FALSE
, (int)MULTI_ARG_2_HI_TF
},
28020 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcom_tfv4si3
, "__builtin_ia32_vpcomfalsed", IX86_BUILTIN_VPCOMFALSED
, (enum rtx_code
) PCOM_FALSE
, (int)MULTI_ARG_2_SI_TF
},
28021 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcom_tfv2di3
, "__builtin_ia32_vpcomfalseq", IX86_BUILTIN_VPCOMFALSEQ
, (enum rtx_code
) PCOM_FALSE
, (int)MULTI_ARG_2_DI_TF
},
28022 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcom_tfv16qi3
, "__builtin_ia32_vpcomfalseub",IX86_BUILTIN_VPCOMFALSEUB
,(enum rtx_code
) PCOM_FALSE
, (int)MULTI_ARG_2_QI_TF
},
28023 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcom_tfv8hi3
, "__builtin_ia32_vpcomfalseuw",IX86_BUILTIN_VPCOMFALSEUW
,(enum rtx_code
) PCOM_FALSE
, (int)MULTI_ARG_2_HI_TF
},
28024 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcom_tfv4si3
, "__builtin_ia32_vpcomfalseud",IX86_BUILTIN_VPCOMFALSEUD
,(enum rtx_code
) PCOM_FALSE
, (int)MULTI_ARG_2_SI_TF
},
28025 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcom_tfv2di3
, "__builtin_ia32_vpcomfalseuq",IX86_BUILTIN_VPCOMFALSEUQ
,(enum rtx_code
) PCOM_FALSE
, (int)MULTI_ARG_2_DI_TF
},
28027 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcom_tfv16qi3
, "__builtin_ia32_vpcomtrueb", IX86_BUILTIN_VPCOMTRUEB
, (enum rtx_code
) PCOM_TRUE
, (int)MULTI_ARG_2_QI_TF
},
28028 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcom_tfv8hi3
, "__builtin_ia32_vpcomtruew", IX86_BUILTIN_VPCOMTRUEW
, (enum rtx_code
) PCOM_TRUE
, (int)MULTI_ARG_2_HI_TF
},
28029 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcom_tfv4si3
, "__builtin_ia32_vpcomtrued", IX86_BUILTIN_VPCOMTRUED
, (enum rtx_code
) PCOM_TRUE
, (int)MULTI_ARG_2_SI_TF
},
28030 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcom_tfv2di3
, "__builtin_ia32_vpcomtrueq", IX86_BUILTIN_VPCOMTRUEQ
, (enum rtx_code
) PCOM_TRUE
, (int)MULTI_ARG_2_DI_TF
},
28031 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcom_tfv16qi3
, "__builtin_ia32_vpcomtrueub", IX86_BUILTIN_VPCOMTRUEUB
, (enum rtx_code
) PCOM_TRUE
, (int)MULTI_ARG_2_QI_TF
},
28032 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcom_tfv8hi3
, "__builtin_ia32_vpcomtrueuw", IX86_BUILTIN_VPCOMTRUEUW
, (enum rtx_code
) PCOM_TRUE
, (int)MULTI_ARG_2_HI_TF
},
28033 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcom_tfv4si3
, "__builtin_ia32_vpcomtrueud", IX86_BUILTIN_VPCOMTRUEUD
, (enum rtx_code
) PCOM_TRUE
, (int)MULTI_ARG_2_SI_TF
},
28034 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_pcom_tfv2di3
, "__builtin_ia32_vpcomtrueuq", IX86_BUILTIN_VPCOMTRUEUQ
, (enum rtx_code
) PCOM_TRUE
, (int)MULTI_ARG_2_DI_TF
},
28036 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_vpermil2v2df3
, "__builtin_ia32_vpermil2pd", IX86_BUILTIN_VPERMIL2PD
, UNKNOWN
, (int)MULTI_ARG_4_DF2_DI_I
},
28037 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_vpermil2v4sf3
, "__builtin_ia32_vpermil2ps", IX86_BUILTIN_VPERMIL2PS
, UNKNOWN
, (int)MULTI_ARG_4_SF2_SI_I
},
28038 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_vpermil2v4df3
, "__builtin_ia32_vpermil2pd256", IX86_BUILTIN_VPERMIL2PD256
, UNKNOWN
, (int)MULTI_ARG_4_DF2_DI_I1
},
28039 { OPTION_MASK_ISA_XOP
, CODE_FOR_xop_vpermil2v8sf3
, "__builtin_ia32_vpermil2ps256", IX86_BUILTIN_VPERMIL2PS256
, UNKNOWN
, (int)MULTI_ARG_4_SF2_SI_I1
},
28043 /* TM vector builtins. */
28045 /* Reuse the existing x86-specific `struct builtin_description' cause
28046 we're lazy. Add casts to make them fit. */
28047 static const struct builtin_description bdesc_tm
[] =
28049 { OPTION_MASK_ISA_MMX
, CODE_FOR_nothing
, "__builtin__ITM_WM64", (enum ix86_builtins
) BUILT_IN_TM_STORE_M64
, UNKNOWN
, VOID_FTYPE_PV2SI_V2SI
},
28050 { OPTION_MASK_ISA_MMX
, CODE_FOR_nothing
, "__builtin__ITM_WaRM64", (enum ix86_builtins
) BUILT_IN_TM_STORE_WAR_M64
, UNKNOWN
, VOID_FTYPE_PV2SI_V2SI
},
28051 { OPTION_MASK_ISA_MMX
, CODE_FOR_nothing
, "__builtin__ITM_WaWM64", (enum ix86_builtins
) BUILT_IN_TM_STORE_WAW_M64
, UNKNOWN
, VOID_FTYPE_PV2SI_V2SI
},
28052 { OPTION_MASK_ISA_MMX
, CODE_FOR_nothing
, "__builtin__ITM_RM64", (enum ix86_builtins
) BUILT_IN_TM_LOAD_M64
, UNKNOWN
, V2SI_FTYPE_PCV2SI
},
28053 { OPTION_MASK_ISA_MMX
, CODE_FOR_nothing
, "__builtin__ITM_RaRM64", (enum ix86_builtins
) BUILT_IN_TM_LOAD_RAR_M64
, UNKNOWN
, V2SI_FTYPE_PCV2SI
},
28054 { OPTION_MASK_ISA_MMX
, CODE_FOR_nothing
, "__builtin__ITM_RaWM64", (enum ix86_builtins
) BUILT_IN_TM_LOAD_RAW_M64
, UNKNOWN
, V2SI_FTYPE_PCV2SI
},
28055 { OPTION_MASK_ISA_MMX
, CODE_FOR_nothing
, "__builtin__ITM_RfWM64", (enum ix86_builtins
) BUILT_IN_TM_LOAD_RFW_M64
, UNKNOWN
, V2SI_FTYPE_PCV2SI
},
28057 { OPTION_MASK_ISA_SSE
, CODE_FOR_nothing
, "__builtin__ITM_WM128", (enum ix86_builtins
) BUILT_IN_TM_STORE_M128
, UNKNOWN
, VOID_FTYPE_PV4SF_V4SF
},
28058 { OPTION_MASK_ISA_SSE
, CODE_FOR_nothing
, "__builtin__ITM_WaRM128", (enum ix86_builtins
) BUILT_IN_TM_STORE_WAR_M128
, UNKNOWN
, VOID_FTYPE_PV4SF_V4SF
},
28059 { OPTION_MASK_ISA_SSE
, CODE_FOR_nothing
, "__builtin__ITM_WaWM128", (enum ix86_builtins
) BUILT_IN_TM_STORE_WAW_M128
, UNKNOWN
, VOID_FTYPE_PV4SF_V4SF
},
28060 { OPTION_MASK_ISA_SSE
, CODE_FOR_nothing
, "__builtin__ITM_RM128", (enum ix86_builtins
) BUILT_IN_TM_LOAD_M128
, UNKNOWN
, V4SF_FTYPE_PCV4SF
},
28061 { OPTION_MASK_ISA_SSE
, CODE_FOR_nothing
, "__builtin__ITM_RaRM128", (enum ix86_builtins
) BUILT_IN_TM_LOAD_RAR_M128
, UNKNOWN
, V4SF_FTYPE_PCV4SF
},
28062 { OPTION_MASK_ISA_SSE
, CODE_FOR_nothing
, "__builtin__ITM_RaWM128", (enum ix86_builtins
) BUILT_IN_TM_LOAD_RAW_M128
, UNKNOWN
, V4SF_FTYPE_PCV4SF
},
28063 { OPTION_MASK_ISA_SSE
, CODE_FOR_nothing
, "__builtin__ITM_RfWM128", (enum ix86_builtins
) BUILT_IN_TM_LOAD_RFW_M128
, UNKNOWN
, V4SF_FTYPE_PCV4SF
},
28065 { OPTION_MASK_ISA_AVX
, CODE_FOR_nothing
, "__builtin__ITM_WM256", (enum ix86_builtins
) BUILT_IN_TM_STORE_M256
, UNKNOWN
, VOID_FTYPE_PV8SF_V8SF
},
28066 { OPTION_MASK_ISA_AVX
, CODE_FOR_nothing
, "__builtin__ITM_WaRM256", (enum ix86_builtins
) BUILT_IN_TM_STORE_WAR_M256
, UNKNOWN
, VOID_FTYPE_PV8SF_V8SF
},
28067 { OPTION_MASK_ISA_AVX
, CODE_FOR_nothing
, "__builtin__ITM_WaWM256", (enum ix86_builtins
) BUILT_IN_TM_STORE_WAW_M256
, UNKNOWN
, VOID_FTYPE_PV8SF_V8SF
},
28068 { OPTION_MASK_ISA_AVX
, CODE_FOR_nothing
, "__builtin__ITM_RM256", (enum ix86_builtins
) BUILT_IN_TM_LOAD_M256
, UNKNOWN
, V8SF_FTYPE_PCV8SF
},
28069 { OPTION_MASK_ISA_AVX
, CODE_FOR_nothing
, "__builtin__ITM_RaRM256", (enum ix86_builtins
) BUILT_IN_TM_LOAD_RAR_M256
, UNKNOWN
, V8SF_FTYPE_PCV8SF
},
28070 { OPTION_MASK_ISA_AVX
, CODE_FOR_nothing
, "__builtin__ITM_RaWM256", (enum ix86_builtins
) BUILT_IN_TM_LOAD_RAW_M256
, UNKNOWN
, V8SF_FTYPE_PCV8SF
},
28071 { OPTION_MASK_ISA_AVX
, CODE_FOR_nothing
, "__builtin__ITM_RfWM256", (enum ix86_builtins
) BUILT_IN_TM_LOAD_RFW_M256
, UNKNOWN
, V8SF_FTYPE_PCV8SF
},
28073 { OPTION_MASK_ISA_MMX
, CODE_FOR_nothing
, "__builtin__ITM_LM64", (enum ix86_builtins
) BUILT_IN_TM_LOG_M64
, UNKNOWN
, VOID_FTYPE_PCVOID
},
28074 { OPTION_MASK_ISA_SSE
, CODE_FOR_nothing
, "__builtin__ITM_LM128", (enum ix86_builtins
) BUILT_IN_TM_LOG_M128
, UNKNOWN
, VOID_FTYPE_PCVOID
},
28075 { OPTION_MASK_ISA_AVX
, CODE_FOR_nothing
, "__builtin__ITM_LM256", (enum ix86_builtins
) BUILT_IN_TM_LOG_M256
, UNKNOWN
, VOID_FTYPE_PCVOID
},
28078 /* TM callbacks. */
28080 /* Return the builtin decl needed to load a vector of TYPE. */
28083 ix86_builtin_tm_load (tree type
)
28085 if (TREE_CODE (type
) == VECTOR_TYPE
)
28087 switch (tree_low_cst (TYPE_SIZE (type
), 1))
28090 return builtin_decl_explicit (BUILT_IN_TM_LOAD_M64
);
28092 return builtin_decl_explicit (BUILT_IN_TM_LOAD_M128
);
28094 return builtin_decl_explicit (BUILT_IN_TM_LOAD_M256
);
28100 /* Return the builtin decl needed to store a vector of TYPE. */
28103 ix86_builtin_tm_store (tree type
)
28105 if (TREE_CODE (type
) == VECTOR_TYPE
)
28107 switch (tree_low_cst (TYPE_SIZE (type
), 1))
28110 return builtin_decl_explicit (BUILT_IN_TM_STORE_M64
);
28112 return builtin_decl_explicit (BUILT_IN_TM_STORE_M128
);
28114 return builtin_decl_explicit (BUILT_IN_TM_STORE_M256
);
28120 /* Initialize the transactional memory vector load/store builtins. */
28123 ix86_init_tm_builtins (void)
28125 enum ix86_builtin_func_type ftype
;
28126 const struct builtin_description
*d
;
28129 tree attrs_load
, attrs_type_load
, attrs_store
, attrs_type_store
;
28130 tree attrs_log
, attrs_type_log
;
28135 /* If there are no builtins defined, we must be compiling in a
28136 language without trans-mem support. */
28137 if (!builtin_decl_explicit_p (BUILT_IN_TM_LOAD_1
))
28140 /* Use whatever attributes a normal TM load has. */
28141 decl
= builtin_decl_explicit (BUILT_IN_TM_LOAD_1
);
28142 attrs_load
= DECL_ATTRIBUTES (decl
);
28143 attrs_type_load
= TYPE_ATTRIBUTES (TREE_TYPE (decl
));
28144 /* Use whatever attributes a normal TM store has. */
28145 decl
= builtin_decl_explicit (BUILT_IN_TM_STORE_1
);
28146 attrs_store
= DECL_ATTRIBUTES (decl
);
28147 attrs_type_store
= TYPE_ATTRIBUTES (TREE_TYPE (decl
));
28148 /* Use whatever attributes a normal TM log has. */
28149 decl
= builtin_decl_explicit (BUILT_IN_TM_LOG
);
28150 attrs_log
= DECL_ATTRIBUTES (decl
);
28151 attrs_type_log
= TYPE_ATTRIBUTES (TREE_TYPE (decl
));
28153 for (i
= 0, d
= bdesc_tm
;
28154 i
< ARRAY_SIZE (bdesc_tm
);
28157 if ((d
->mask
& ix86_isa_flags
) != 0
28158 || (lang_hooks
.builtin_function
28159 == lang_hooks
.builtin_function_ext_scope
))
28161 tree type
, attrs
, attrs_type
;
28162 enum built_in_function code
= (enum built_in_function
) d
->code
;
28164 ftype
= (enum ix86_builtin_func_type
) d
->flag
;
28165 type
= ix86_get_builtin_func_type (ftype
);
28167 if (BUILTIN_TM_LOAD_P (code
))
28169 attrs
= attrs_load
;
28170 attrs_type
= attrs_type_load
;
28172 else if (BUILTIN_TM_STORE_P (code
))
28174 attrs
= attrs_store
;
28175 attrs_type
= attrs_type_store
;
28180 attrs_type
= attrs_type_log
;
28182 decl
= add_builtin_function (d
->name
, type
, code
, BUILT_IN_NORMAL
,
28183 /* The builtin without the prefix for
28184 calling it directly. */
28185 d
->name
+ strlen ("__builtin_"),
28187 /* add_builtin_function() will set the DECL_ATTRIBUTES, now
28188 set the TYPE_ATTRIBUTES. */
28189 decl_attributes (&TREE_TYPE (decl
), attrs_type
, ATTR_FLAG_BUILT_IN
);
28191 set_builtin_decl (code
, decl
, false);
28196 /* Set up all the MMX/SSE builtins, even builtins for instructions that are not
28197 in the current target ISA to allow the user to compile particular modules
28198 with different target specific options that differ from the command line
28201 ix86_init_mmx_sse_builtins (void)
28203 const struct builtin_description
* d
;
28204 enum ix86_builtin_func_type ftype
;
28207 /* Add all special builtins with variable number of operands. */
28208 for (i
= 0, d
= bdesc_special_args
;
28209 i
< ARRAY_SIZE (bdesc_special_args
);
28215 ftype
= (enum ix86_builtin_func_type
) d
->flag
;
28216 def_builtin (d
->mask
, d
->name
, ftype
, d
->code
);
28219 /* Add all builtins with variable number of operands. */
28220 for (i
= 0, d
= bdesc_args
;
28221 i
< ARRAY_SIZE (bdesc_args
);
28227 ftype
= (enum ix86_builtin_func_type
) d
->flag
;
28228 def_builtin_const (d
->mask
, d
->name
, ftype
, d
->code
);
28231 /* pcmpestr[im] insns. */
28232 for (i
= 0, d
= bdesc_pcmpestr
;
28233 i
< ARRAY_SIZE (bdesc_pcmpestr
);
28236 if (d
->code
== IX86_BUILTIN_PCMPESTRM128
)
28237 ftype
= V16QI_FTYPE_V16QI_INT_V16QI_INT_INT
;
28239 ftype
= INT_FTYPE_V16QI_INT_V16QI_INT_INT
;
28240 def_builtin_const (d
->mask
, d
->name
, ftype
, d
->code
);
28243 /* pcmpistr[im] insns. */
28244 for (i
= 0, d
= bdesc_pcmpistr
;
28245 i
< ARRAY_SIZE (bdesc_pcmpistr
);
28248 if (d
->code
== IX86_BUILTIN_PCMPISTRM128
)
28249 ftype
= V16QI_FTYPE_V16QI_V16QI_INT
;
28251 ftype
= INT_FTYPE_V16QI_V16QI_INT
;
28252 def_builtin_const (d
->mask
, d
->name
, ftype
, d
->code
);
28255 /* comi/ucomi insns. */
28256 for (i
= 0, d
= bdesc_comi
; i
< ARRAY_SIZE (bdesc_comi
); i
++, d
++)
28258 if (d
->mask
== OPTION_MASK_ISA_SSE2
)
28259 ftype
= INT_FTYPE_V2DF_V2DF
;
28261 ftype
= INT_FTYPE_V4SF_V4SF
;
28262 def_builtin_const (d
->mask
, d
->name
, ftype
, d
->code
);
28266 def_builtin (OPTION_MASK_ISA_SSE
, "__builtin_ia32_ldmxcsr",
28267 VOID_FTYPE_UNSIGNED
, IX86_BUILTIN_LDMXCSR
);
28268 def_builtin (OPTION_MASK_ISA_SSE
, "__builtin_ia32_stmxcsr",
28269 UNSIGNED_FTYPE_VOID
, IX86_BUILTIN_STMXCSR
);
28271 /* SSE or 3DNow!A */
28272 def_builtin (OPTION_MASK_ISA_SSE
| OPTION_MASK_ISA_3DNOW_A
,
28273 "__builtin_ia32_maskmovq", VOID_FTYPE_V8QI_V8QI_PCHAR
,
28274 IX86_BUILTIN_MASKMOVQ
);
28277 def_builtin (OPTION_MASK_ISA_SSE2
, "__builtin_ia32_maskmovdqu",
28278 VOID_FTYPE_V16QI_V16QI_PCHAR
, IX86_BUILTIN_MASKMOVDQU
);
28280 def_builtin (OPTION_MASK_ISA_SSE2
, "__builtin_ia32_clflush",
28281 VOID_FTYPE_PCVOID
, IX86_BUILTIN_CLFLUSH
);
28282 x86_mfence
= def_builtin (OPTION_MASK_ISA_SSE2
, "__builtin_ia32_mfence",
28283 VOID_FTYPE_VOID
, IX86_BUILTIN_MFENCE
);
28286 def_builtin (OPTION_MASK_ISA_SSE3
, "__builtin_ia32_monitor",
28287 VOID_FTYPE_PCVOID_UNSIGNED_UNSIGNED
, IX86_BUILTIN_MONITOR
);
28288 def_builtin (OPTION_MASK_ISA_SSE3
, "__builtin_ia32_mwait",
28289 VOID_FTYPE_UNSIGNED_UNSIGNED
, IX86_BUILTIN_MWAIT
);
28292 def_builtin_const (OPTION_MASK_ISA_AES
, "__builtin_ia32_aesenc128",
28293 V2DI_FTYPE_V2DI_V2DI
, IX86_BUILTIN_AESENC128
);
28294 def_builtin_const (OPTION_MASK_ISA_AES
, "__builtin_ia32_aesenclast128",
28295 V2DI_FTYPE_V2DI_V2DI
, IX86_BUILTIN_AESENCLAST128
);
28296 def_builtin_const (OPTION_MASK_ISA_AES
, "__builtin_ia32_aesdec128",
28297 V2DI_FTYPE_V2DI_V2DI
, IX86_BUILTIN_AESDEC128
);
28298 def_builtin_const (OPTION_MASK_ISA_AES
, "__builtin_ia32_aesdeclast128",
28299 V2DI_FTYPE_V2DI_V2DI
, IX86_BUILTIN_AESDECLAST128
);
28300 def_builtin_const (OPTION_MASK_ISA_AES
, "__builtin_ia32_aesimc128",
28301 V2DI_FTYPE_V2DI
, IX86_BUILTIN_AESIMC128
);
28302 def_builtin_const (OPTION_MASK_ISA_AES
, "__builtin_ia32_aeskeygenassist128",
28303 V2DI_FTYPE_V2DI_INT
, IX86_BUILTIN_AESKEYGENASSIST128
);
28306 def_builtin_const (OPTION_MASK_ISA_PCLMUL
, "__builtin_ia32_pclmulqdq128",
28307 V2DI_FTYPE_V2DI_V2DI_INT
, IX86_BUILTIN_PCLMULQDQ128
);
28310 def_builtin (OPTION_MASK_ISA_RDRND
, "__builtin_ia32_rdrand16_step",
28311 INT_FTYPE_PUSHORT
, IX86_BUILTIN_RDRAND16_STEP
);
28312 def_builtin (OPTION_MASK_ISA_RDRND
, "__builtin_ia32_rdrand32_step",
28313 INT_FTYPE_PUNSIGNED
, IX86_BUILTIN_RDRAND32_STEP
);
28314 def_builtin (OPTION_MASK_ISA_RDRND
| OPTION_MASK_ISA_64BIT
,
28315 "__builtin_ia32_rdrand64_step", INT_FTYPE_PULONGLONG
,
28316 IX86_BUILTIN_RDRAND64_STEP
);
28319 def_builtin (OPTION_MASK_ISA_AVX2
, "__builtin_ia32_gathersiv2df",
28320 V2DF_FTYPE_V2DF_PCDOUBLE_V4SI_V2DF_INT
,
28321 IX86_BUILTIN_GATHERSIV2DF
);
28323 def_builtin (OPTION_MASK_ISA_AVX2
, "__builtin_ia32_gathersiv4df",
28324 V4DF_FTYPE_V4DF_PCDOUBLE_V4SI_V4DF_INT
,
28325 IX86_BUILTIN_GATHERSIV4DF
);
28327 def_builtin (OPTION_MASK_ISA_AVX2
, "__builtin_ia32_gatherdiv2df",
28328 V2DF_FTYPE_V2DF_PCDOUBLE_V2DI_V2DF_INT
,
28329 IX86_BUILTIN_GATHERDIV2DF
);
28331 def_builtin (OPTION_MASK_ISA_AVX2
, "__builtin_ia32_gatherdiv4df",
28332 V4DF_FTYPE_V4DF_PCDOUBLE_V4DI_V4DF_INT
,
28333 IX86_BUILTIN_GATHERDIV4DF
);
28335 def_builtin (OPTION_MASK_ISA_AVX2
, "__builtin_ia32_gathersiv4sf",
28336 V4SF_FTYPE_V4SF_PCFLOAT_V4SI_V4SF_INT
,
28337 IX86_BUILTIN_GATHERSIV4SF
);
28339 def_builtin (OPTION_MASK_ISA_AVX2
, "__builtin_ia32_gathersiv8sf",
28340 V8SF_FTYPE_V8SF_PCFLOAT_V8SI_V8SF_INT
,
28341 IX86_BUILTIN_GATHERSIV8SF
);
28343 def_builtin (OPTION_MASK_ISA_AVX2
, "__builtin_ia32_gatherdiv4sf",
28344 V4SF_FTYPE_V4SF_PCFLOAT_V2DI_V4SF_INT
,
28345 IX86_BUILTIN_GATHERDIV4SF
);
28347 def_builtin (OPTION_MASK_ISA_AVX2
, "__builtin_ia32_gatherdiv4sf256",
28348 V4SF_FTYPE_V4SF_PCFLOAT_V4DI_V4SF_INT
,
28349 IX86_BUILTIN_GATHERDIV8SF
);
28351 def_builtin (OPTION_MASK_ISA_AVX2
, "__builtin_ia32_gathersiv2di",
28352 V2DI_FTYPE_V2DI_PCINT64_V4SI_V2DI_INT
,
28353 IX86_BUILTIN_GATHERSIV2DI
);
28355 def_builtin (OPTION_MASK_ISA_AVX2
, "__builtin_ia32_gathersiv4di",
28356 V4DI_FTYPE_V4DI_PCINT64_V4SI_V4DI_INT
,
28357 IX86_BUILTIN_GATHERSIV4DI
);
28359 def_builtin (OPTION_MASK_ISA_AVX2
, "__builtin_ia32_gatherdiv2di",
28360 V2DI_FTYPE_V2DI_PCINT64_V2DI_V2DI_INT
,
28361 IX86_BUILTIN_GATHERDIV2DI
);
28363 def_builtin (OPTION_MASK_ISA_AVX2
, "__builtin_ia32_gatherdiv4di",
28364 V4DI_FTYPE_V4DI_PCINT64_V4DI_V4DI_INT
,
28365 IX86_BUILTIN_GATHERDIV4DI
);
28367 def_builtin (OPTION_MASK_ISA_AVX2
, "__builtin_ia32_gathersiv4si",
28368 V4SI_FTYPE_V4SI_PCINT_V4SI_V4SI_INT
,
28369 IX86_BUILTIN_GATHERSIV4SI
);
28371 def_builtin (OPTION_MASK_ISA_AVX2
, "__builtin_ia32_gathersiv8si",
28372 V8SI_FTYPE_V8SI_PCINT_V8SI_V8SI_INT
,
28373 IX86_BUILTIN_GATHERSIV8SI
);
28375 def_builtin (OPTION_MASK_ISA_AVX2
, "__builtin_ia32_gatherdiv4si",
28376 V4SI_FTYPE_V4SI_PCINT_V2DI_V4SI_INT
,
28377 IX86_BUILTIN_GATHERDIV4SI
);
28379 def_builtin (OPTION_MASK_ISA_AVX2
, "__builtin_ia32_gatherdiv4si256",
28380 V4SI_FTYPE_V4SI_PCINT_V4DI_V4SI_INT
,
28381 IX86_BUILTIN_GATHERDIV8SI
);
28383 def_builtin (OPTION_MASK_ISA_AVX2
, "__builtin_ia32_gatheraltsiv4df ",
28384 V4DF_FTYPE_V4DF_PCDOUBLE_V8SI_V4DF_INT
,
28385 IX86_BUILTIN_GATHERALTSIV4DF
);
28387 def_builtin (OPTION_MASK_ISA_AVX2
, "__builtin_ia32_gatheraltdiv4sf256 ",
28388 V8SF_FTYPE_V8SF_PCFLOAT_V4DI_V8SF_INT
,
28389 IX86_BUILTIN_GATHERALTDIV8SF
);
28391 def_builtin (OPTION_MASK_ISA_AVX2
, "__builtin_ia32_gatheraltsiv4di ",
28392 V4DI_FTYPE_V4DI_PCINT64_V8SI_V4DI_INT
,
28393 IX86_BUILTIN_GATHERALTSIV4DI
);
28395 def_builtin (OPTION_MASK_ISA_AVX2
, "__builtin_ia32_gatheraltdiv4si256 ",
28396 V8SI_FTYPE_V8SI_PCINT_V4DI_V8SI_INT
,
28397 IX86_BUILTIN_GATHERALTDIV8SI
);
28400 def_builtin (OPTION_MASK_ISA_RTM
, "__builtin_ia32_xabort",
28401 VOID_FTYPE_UNSIGNED
, IX86_BUILTIN_XABORT
);
28403 /* MMX access to the vec_init patterns. */
28404 def_builtin_const (OPTION_MASK_ISA_MMX
, "__builtin_ia32_vec_init_v2si",
28405 V2SI_FTYPE_INT_INT
, IX86_BUILTIN_VEC_INIT_V2SI
);
28407 def_builtin_const (OPTION_MASK_ISA_MMX
, "__builtin_ia32_vec_init_v4hi",
28408 V4HI_FTYPE_HI_HI_HI_HI
,
28409 IX86_BUILTIN_VEC_INIT_V4HI
);
28411 def_builtin_const (OPTION_MASK_ISA_MMX
, "__builtin_ia32_vec_init_v8qi",
28412 V8QI_FTYPE_QI_QI_QI_QI_QI_QI_QI_QI
,
28413 IX86_BUILTIN_VEC_INIT_V8QI
);
28415 /* Access to the vec_extract patterns. */
28416 def_builtin_const (OPTION_MASK_ISA_SSE2
, "__builtin_ia32_vec_ext_v2df",
28417 DOUBLE_FTYPE_V2DF_INT
, IX86_BUILTIN_VEC_EXT_V2DF
);
28418 def_builtin_const (OPTION_MASK_ISA_SSE2
, "__builtin_ia32_vec_ext_v2di",
28419 DI_FTYPE_V2DI_INT
, IX86_BUILTIN_VEC_EXT_V2DI
);
28420 def_builtin_const (OPTION_MASK_ISA_SSE
, "__builtin_ia32_vec_ext_v4sf",
28421 FLOAT_FTYPE_V4SF_INT
, IX86_BUILTIN_VEC_EXT_V4SF
);
28422 def_builtin_const (OPTION_MASK_ISA_SSE2
, "__builtin_ia32_vec_ext_v4si",
28423 SI_FTYPE_V4SI_INT
, IX86_BUILTIN_VEC_EXT_V4SI
);
28424 def_builtin_const (OPTION_MASK_ISA_SSE2
, "__builtin_ia32_vec_ext_v8hi",
28425 HI_FTYPE_V8HI_INT
, IX86_BUILTIN_VEC_EXT_V8HI
);
28427 def_builtin_const (OPTION_MASK_ISA_SSE
| OPTION_MASK_ISA_3DNOW_A
,
28428 "__builtin_ia32_vec_ext_v4hi",
28429 HI_FTYPE_V4HI_INT
, IX86_BUILTIN_VEC_EXT_V4HI
);
28431 def_builtin_const (OPTION_MASK_ISA_MMX
, "__builtin_ia32_vec_ext_v2si",
28432 SI_FTYPE_V2SI_INT
, IX86_BUILTIN_VEC_EXT_V2SI
);
28434 def_builtin_const (OPTION_MASK_ISA_SSE2
, "__builtin_ia32_vec_ext_v16qi",
28435 QI_FTYPE_V16QI_INT
, IX86_BUILTIN_VEC_EXT_V16QI
);
28437 /* Access to the vec_set patterns. */
28438 def_builtin_const (OPTION_MASK_ISA_SSE4_1
| OPTION_MASK_ISA_64BIT
,
28439 "__builtin_ia32_vec_set_v2di",
28440 V2DI_FTYPE_V2DI_DI_INT
, IX86_BUILTIN_VEC_SET_V2DI
);
28442 def_builtin_const (OPTION_MASK_ISA_SSE4_1
, "__builtin_ia32_vec_set_v4sf",
28443 V4SF_FTYPE_V4SF_FLOAT_INT
, IX86_BUILTIN_VEC_SET_V4SF
);
28445 def_builtin_const (OPTION_MASK_ISA_SSE4_1
, "__builtin_ia32_vec_set_v4si",
28446 V4SI_FTYPE_V4SI_SI_INT
, IX86_BUILTIN_VEC_SET_V4SI
);
28448 def_builtin_const (OPTION_MASK_ISA_SSE2
, "__builtin_ia32_vec_set_v8hi",
28449 V8HI_FTYPE_V8HI_HI_INT
, IX86_BUILTIN_VEC_SET_V8HI
);
28451 def_builtin_const (OPTION_MASK_ISA_SSE
| OPTION_MASK_ISA_3DNOW_A
,
28452 "__builtin_ia32_vec_set_v4hi",
28453 V4HI_FTYPE_V4HI_HI_INT
, IX86_BUILTIN_VEC_SET_V4HI
);
28455 def_builtin_const (OPTION_MASK_ISA_SSE4_1
, "__builtin_ia32_vec_set_v16qi",
28456 V16QI_FTYPE_V16QI_QI_INT
, IX86_BUILTIN_VEC_SET_V16QI
);
28459 def_builtin (OPTION_MASK_ISA_RDSEED
, "__builtin_ia32_rdseed_hi_step",
28460 INT_FTYPE_PUSHORT
, IX86_BUILTIN_RDSEED16_STEP
);
28461 def_builtin (OPTION_MASK_ISA_RDSEED
, "__builtin_ia32_rdseed_si_step",
28462 INT_FTYPE_PUNSIGNED
, IX86_BUILTIN_RDSEED32_STEP
);
28463 def_builtin (OPTION_MASK_ISA_RDSEED
| OPTION_MASK_ISA_64BIT
,
28464 "__builtin_ia32_rdseed_di_step",
28465 INT_FTYPE_PULONGLONG
, IX86_BUILTIN_RDSEED64_STEP
);
28468 def_builtin (0, "__builtin_ia32_addcarryx_u32",
28469 UCHAR_FTYPE_UCHAR_UINT_UINT_PUNSIGNED
, IX86_BUILTIN_ADDCARRYX32
);
28470 def_builtin (OPTION_MASK_ISA_64BIT
,
28471 "__builtin_ia32_addcarryx_u64",
28472 UCHAR_FTYPE_UCHAR_ULONGLONG_ULONGLONG_PULONGLONG
,
28473 IX86_BUILTIN_ADDCARRYX64
);
28475 /* Add FMA4 multi-arg argument instructions */
28476 for (i
= 0, d
= bdesc_multi_arg
; i
< ARRAY_SIZE (bdesc_multi_arg
); i
++, d
++)
28481 ftype
= (enum ix86_builtin_func_type
) d
->flag
;
28482 def_builtin_const (d
->mask
, d
->name
, ftype
, d
->code
);
28486 /* This adds a condition to the basic_block NEW_BB in function FUNCTION_DECL
28487 to return a pointer to VERSION_DECL if the outcome of the expression
28488 formed by PREDICATE_CHAIN is true. This function will be called during
28489 version dispatch to decide which function version to execute. It returns
28490 the basic block at the end, to which more conditions can be added. */
28493 add_condition_to_bb (tree function_decl
, tree version_decl
,
28494 tree predicate_chain
, basic_block new_bb
)
28496 gimple return_stmt
;
28497 tree convert_expr
, result_var
;
28498 gimple convert_stmt
;
28499 gimple call_cond_stmt
;
28500 gimple if_else_stmt
;
28502 basic_block bb1
, bb2
, bb3
;
28505 tree cond_var
, and_expr_var
= NULL_TREE
;
28508 tree predicate_decl
, predicate_arg
;
28510 push_cfun (DECL_STRUCT_FUNCTION (function_decl
));
28512 gcc_assert (new_bb
!= NULL
);
28513 gseq
= bb_seq (new_bb
);
28516 convert_expr
= build1 (CONVERT_EXPR
, ptr_type_node
,
28517 build_fold_addr_expr (version_decl
));
28518 result_var
= create_tmp_var (ptr_type_node
, NULL
);
28519 convert_stmt
= gimple_build_assign (result_var
, convert_expr
);
28520 return_stmt
= gimple_build_return (result_var
);
28522 if (predicate_chain
== NULL_TREE
)
28524 gimple_seq_add_stmt (&gseq
, convert_stmt
);
28525 gimple_seq_add_stmt (&gseq
, return_stmt
);
28526 set_bb_seq (new_bb
, gseq
);
28527 gimple_set_bb (convert_stmt
, new_bb
);
28528 gimple_set_bb (return_stmt
, new_bb
);
28533 while (predicate_chain
!= NULL
)
28535 cond_var
= create_tmp_var (integer_type_node
, NULL
);
28536 predicate_decl
= TREE_PURPOSE (predicate_chain
);
28537 predicate_arg
= TREE_VALUE (predicate_chain
);
28538 call_cond_stmt
= gimple_build_call (predicate_decl
, 1, predicate_arg
);
28539 gimple_call_set_lhs (call_cond_stmt
, cond_var
);
28541 gimple_set_block (call_cond_stmt
, DECL_INITIAL (function_decl
));
28542 gimple_set_bb (call_cond_stmt
, new_bb
);
28543 gimple_seq_add_stmt (&gseq
, call_cond_stmt
);
28545 predicate_chain
= TREE_CHAIN (predicate_chain
);
28547 if (and_expr_var
== NULL
)
28548 and_expr_var
= cond_var
;
28551 gimple assign_stmt
;
28552 /* Use MIN_EXPR to check if any integer is zero?.
28553 and_expr_var = min_expr <cond_var, and_expr_var> */
28554 assign_stmt
= gimple_build_assign (and_expr_var
,
28555 build2 (MIN_EXPR
, integer_type_node
,
28556 cond_var
, and_expr_var
));
28558 gimple_set_block (assign_stmt
, DECL_INITIAL (function_decl
));
28559 gimple_set_bb (assign_stmt
, new_bb
);
28560 gimple_seq_add_stmt (&gseq
, assign_stmt
);
28564 if_else_stmt
= gimple_build_cond (GT_EXPR
, and_expr_var
,
28566 NULL_TREE
, NULL_TREE
);
28567 gimple_set_block (if_else_stmt
, DECL_INITIAL (function_decl
));
28568 gimple_set_bb (if_else_stmt
, new_bb
);
28569 gimple_seq_add_stmt (&gseq
, if_else_stmt
);
28571 gimple_seq_add_stmt (&gseq
, convert_stmt
);
28572 gimple_seq_add_stmt (&gseq
, return_stmt
);
28573 set_bb_seq (new_bb
, gseq
);
28576 e12
= split_block (bb1
, if_else_stmt
);
28578 e12
->flags
&= ~EDGE_FALLTHRU
;
28579 e12
->flags
|= EDGE_TRUE_VALUE
;
28581 e23
= split_block (bb2
, return_stmt
);
28583 gimple_set_bb (convert_stmt
, bb2
);
28584 gimple_set_bb (return_stmt
, bb2
);
28587 make_edge (bb1
, bb3
, EDGE_FALSE_VALUE
);
28590 make_edge (bb2
, EXIT_BLOCK_PTR
, 0);
28597 /* This parses the attribute arguments to target in DECL and determines
28598 the right builtin to use to match the platform specification.
28599 It returns the priority value for this version decl. If PREDICATE_LIST
28600 is not NULL, it stores the list of cpu features that need to be checked
28601 before dispatching this function. */
28603 static unsigned int
28604 get_builtin_code_for_version (tree decl
, tree
*predicate_list
)
28607 struct cl_target_option cur_target
;
28609 struct cl_target_option
*new_target
;
28610 const char *arg_str
= NULL
;
28611 const char *attrs_str
= NULL
;
28612 char *tok_str
= NULL
;
28615 /* Priority of i386 features, greater value is higher priority. This is
28616 used to decide the order in which function dispatch must happen. For
28617 instance, a version specialized for SSE4.2 should be checked for dispatch
28618 before a version for SSE3, as SSE4.2 implies SSE3. */
28619 enum feature_priority
28640 enum feature_priority priority
= P_ZERO
;
28642 /* These are the target attribute strings for which a dispatcher is
28643 available, from fold_builtin_cpu. */
28645 static struct _feature_list
28647 const char *const name
;
28648 const enum feature_priority priority
;
28650 const feature_list
[] =
28656 {"ssse3", P_SSSE3
},
28657 {"sse4.1", P_SSE4_1
},
28658 {"sse4.2", P_SSE4_2
},
28659 {"popcnt", P_POPCNT
},
28665 static unsigned int NUM_FEATURES
28666 = sizeof (feature_list
) / sizeof (struct _feature_list
);
28670 tree predicate_chain
= NULL_TREE
;
28671 tree predicate_decl
, predicate_arg
;
28673 attrs
= lookup_attribute ("target", DECL_ATTRIBUTES (decl
));
28674 gcc_assert (attrs
!= NULL
);
28676 attrs
= TREE_VALUE (TREE_VALUE (attrs
));
28678 gcc_assert (TREE_CODE (attrs
) == STRING_CST
);
28679 attrs_str
= TREE_STRING_POINTER (attrs
);
28682 /* Handle arch= if specified. For priority, set it to be 1 more than
28683 the best instruction set the processor can handle. For instance, if
28684 there is a version for atom and a version for ssse3 (the highest ISA
28685 priority for atom), the atom version must be checked for dispatch
28686 before the ssse3 version. */
28687 if (strstr (attrs_str
, "arch=") != NULL
)
28689 cl_target_option_save (&cur_target
, &global_options
);
28690 target_node
= ix86_valid_target_attribute_tree (attrs
);
28692 gcc_assert (target_node
);
28693 new_target
= TREE_TARGET_OPTION (target_node
);
28694 gcc_assert (new_target
);
28696 if (new_target
->arch_specified
&& new_target
->arch
> 0)
28698 switch (new_target
->arch
)
28700 case PROCESSOR_CORE2
:
28702 priority
= P_PROC_SSSE3
;
28704 case PROCESSOR_COREI7
:
28705 arg_str
= "corei7";
28706 priority
= P_PROC_SSE4_2
;
28708 case PROCESSOR_ATOM
:
28710 priority
= P_PROC_SSSE3
;
28712 case PROCESSOR_AMDFAM10
:
28713 arg_str
= "amdfam10h";
28714 priority
= P_PROC_SSE4_a
;
28716 case PROCESSOR_BDVER1
:
28717 arg_str
= "bdver1";
28718 priority
= P_PROC_FMA
;
28720 case PROCESSOR_BDVER2
:
28721 arg_str
= "bdver2";
28722 priority
= P_PROC_FMA
;
28727 cl_target_option_restore (&global_options
, &cur_target
);
28729 if (predicate_list
&& arg_str
== NULL
)
28731 error_at (DECL_SOURCE_LOCATION (decl
),
28732 "No dispatcher found for the versioning attributes");
28736 if (predicate_list
)
28738 predicate_decl
= ix86_builtins
[(int) IX86_BUILTIN_CPU_IS
];
28739 /* For a C string literal the length includes the trailing NULL. */
28740 predicate_arg
= build_string_literal (strlen (arg_str
) + 1, arg_str
);
28741 predicate_chain
= tree_cons (predicate_decl
, predicate_arg
,
28746 /* Process feature name. */
28747 tok_str
= (char *) xmalloc (strlen (attrs_str
) + 1);
28748 strcpy (tok_str
, attrs_str
);
28749 token
= strtok (tok_str
, ",");
28750 predicate_decl
= ix86_builtins
[(int) IX86_BUILTIN_CPU_SUPPORTS
];
28752 while (token
!= NULL
)
28754 /* Do not process "arch=" */
28755 if (strncmp (token
, "arch=", 5) == 0)
28757 token
= strtok (NULL
, ",");
28760 for (i
= 0; i
< NUM_FEATURES
; ++i
)
28762 if (strcmp (token
, feature_list
[i
].name
) == 0)
28764 if (predicate_list
)
28766 predicate_arg
= build_string_literal (
28767 strlen (feature_list
[i
].name
) + 1,
28768 feature_list
[i
].name
);
28769 predicate_chain
= tree_cons (predicate_decl
, predicate_arg
,
28772 /* Find the maximum priority feature. */
28773 if (feature_list
[i
].priority
> priority
)
28774 priority
= feature_list
[i
].priority
;
28779 if (predicate_list
&& i
== NUM_FEATURES
)
28781 error_at (DECL_SOURCE_LOCATION (decl
),
28782 "No dispatcher found for %s", token
);
28785 token
= strtok (NULL
, ",");
28789 if (predicate_list
&& predicate_chain
== NULL_TREE
)
28791 error_at (DECL_SOURCE_LOCATION (decl
),
28792 "No dispatcher found for the versioning attributes : %s",
28796 else if (predicate_list
)
28798 predicate_chain
= nreverse (predicate_chain
);
28799 *predicate_list
= predicate_chain
;
28805 /* This compares the priority of target features in function DECL1
28806 and DECL2. It returns positive value if DECL1 is higher priority,
28807 negative value if DECL2 is higher priority and 0 if they are the
28811 ix86_compare_version_priority (tree decl1
, tree decl2
)
28813 unsigned int priority1
= 0;
28814 unsigned int priority2
= 0;
28816 if (lookup_attribute ("target", DECL_ATTRIBUTES (decl1
)) != NULL
)
28817 priority1
= get_builtin_code_for_version (decl1
, NULL
);
28819 if (lookup_attribute ("target", DECL_ATTRIBUTES (decl2
)) != NULL
)
28820 priority2
= get_builtin_code_for_version (decl2
, NULL
);
28822 return (int)priority1
- (int)priority2
;
28825 /* V1 and V2 point to function versions with different priorities
28826 based on the target ISA. This function compares their priorities. */
28829 feature_compare (const void *v1
, const void *v2
)
28831 typedef struct _function_version_info
28834 tree predicate_chain
;
28835 unsigned int dispatch_priority
;
28836 } function_version_info
;
28838 const function_version_info c1
= *(const function_version_info
*)v1
;
28839 const function_version_info c2
= *(const function_version_info
*)v2
;
28840 return (c2
.dispatch_priority
- c1
.dispatch_priority
);
28843 /* This function generates the dispatch function for
28844 multi-versioned functions. DISPATCH_DECL is the function which will
28845 contain the dispatch logic. FNDECLS are the function choices for
28846 dispatch, and is a tree chain. EMPTY_BB is the basic block pointer
28847 in DISPATCH_DECL in which the dispatch code is generated. */
28850 dispatch_function_versions (tree dispatch_decl
,
28852 basic_block
*empty_bb
)
28855 gimple ifunc_cpu_init_stmt
;
28859 vec
<tree
> *fndecls
;
28860 unsigned int num_versions
= 0;
28861 unsigned int actual_versions
= 0;
28864 struct _function_version_info
28867 tree predicate_chain
;
28868 unsigned int dispatch_priority
;
28869 }*function_version_info
;
28871 gcc_assert (dispatch_decl
!= NULL
28872 && fndecls_p
!= NULL
28873 && empty_bb
!= NULL
);
28875 /*fndecls_p is actually a vector. */
28876 fndecls
= static_cast<vec
<tree
> *> (fndecls_p
);
28878 /* At least one more version other than the default. */
28879 num_versions
= fndecls
->length ();
28880 gcc_assert (num_versions
>= 2);
28882 function_version_info
= (struct _function_version_info
*)
28883 XNEWVEC (struct _function_version_info
, (num_versions
- 1));
28885 /* The first version in the vector is the default decl. */
28886 default_decl
= (*fndecls
)[0];
28888 push_cfun (DECL_STRUCT_FUNCTION (dispatch_decl
));
28890 gseq
= bb_seq (*empty_bb
);
28891 /* Function version dispatch is via IFUNC. IFUNC resolvers fire before
28892 constructors, so explicity call __builtin_cpu_init here. */
28893 ifunc_cpu_init_stmt
= gimple_build_call_vec (
28894 ix86_builtins
[(int) IX86_BUILTIN_CPU_INIT
], vNULL
);
28895 gimple_seq_add_stmt (&gseq
, ifunc_cpu_init_stmt
);
28896 gimple_set_bb (ifunc_cpu_init_stmt
, *empty_bb
);
28897 set_bb_seq (*empty_bb
, gseq
);
28902 for (ix
= 1; fndecls
->iterate (ix
, &ele
); ++ix
)
28904 tree version_decl
= ele
;
28905 tree predicate_chain
= NULL_TREE
;
28906 unsigned int priority
;
28907 /* Get attribute string, parse it and find the right predicate decl.
28908 The predicate function could be a lengthy combination of many
28909 features, like arch-type and various isa-variants. */
28910 priority
= get_builtin_code_for_version (version_decl
,
28913 if (predicate_chain
== NULL_TREE
)
28917 function_version_info
[ix
- 1].version_decl
= version_decl
;
28918 function_version_info
[ix
- 1].predicate_chain
= predicate_chain
;
28919 function_version_info
[ix
- 1].dispatch_priority
= priority
;
28922 /* Sort the versions according to descending order of dispatch priority. The
28923 priority is based on the ISA. This is not a perfect solution. There
28924 could still be ambiguity. If more than one function version is suitable
28925 to execute, which one should be dispatched? In future, allow the user
28926 to specify a dispatch priority next to the version. */
28927 qsort (function_version_info
, actual_versions
,
28928 sizeof (struct _function_version_info
), feature_compare
);
28930 for (i
= 0; i
< actual_versions
; ++i
)
28931 *empty_bb
= add_condition_to_bb (dispatch_decl
,
28932 function_version_info
[i
].version_decl
,
28933 function_version_info
[i
].predicate_chain
,
28936 /* dispatch default version at the end. */
28937 *empty_bb
= add_condition_to_bb (dispatch_decl
, default_decl
,
28940 free (function_version_info
);
28944 /* This function returns true if FN1 and FN2 are versions of the same function,
28945 that is, the targets of the function decls are different. This assumes
28946 that FN1 and FN2 have the same signature. */
28949 ix86_function_versions (tree fn1
, tree fn2
)
28952 struct cl_target_option
*target1
, *target2
;
28954 if (TREE_CODE (fn1
) != FUNCTION_DECL
28955 || TREE_CODE (fn2
) != FUNCTION_DECL
)
28958 attr1
= DECL_FUNCTION_SPECIFIC_TARGET (fn1
);
28959 attr2
= DECL_FUNCTION_SPECIFIC_TARGET (fn2
);
28961 /* Atleast one function decl should have target attribute specified. */
28962 if (attr1
== NULL_TREE
&& attr2
== NULL_TREE
)
28965 if (attr1
== NULL_TREE
)
28966 attr1
= target_option_default_node
;
28967 else if (attr2
== NULL_TREE
)
28968 attr2
= target_option_default_node
;
28970 target1
= TREE_TARGET_OPTION (attr1
);
28971 target2
= TREE_TARGET_OPTION (attr2
);
28973 /* target1 and target2 must be different in some way. */
28974 if (target1
->x_ix86_isa_flags
== target2
->x_ix86_isa_flags
28975 && target1
->x_target_flags
== target2
->x_target_flags
28976 && target1
->arch
== target2
->arch
28977 && target1
->tune
== target2
->tune
28978 && target1
->x_ix86_fpmath
== target2
->x_ix86_fpmath
28979 && target1
->branch_cost
== target2
->branch_cost
)
28985 /* Comparator function to be used in qsort routine to sort attribute
28986 specification strings to "target". */
28989 attr_strcmp (const void *v1
, const void *v2
)
28991 const char *c1
= *(char *const*)v1
;
28992 const char *c2
= *(char *const*)v2
;
28993 return strcmp (c1
, c2
);
28996 /* STR is the argument to target attribute. This function tokenizes
28997 the comma separated arguments, sorts them and returns a string which
28998 is a unique identifier for the comma separated arguments. It also
28999 replaces non-identifier characters "=,-" with "_". */
29002 sorted_attr_string (const char *str
)
29004 char **args
= NULL
;
29005 char *attr_str
, *ret_str
;
29007 unsigned int argnum
= 1;
29010 for (i
= 0; i
< strlen (str
); i
++)
29014 attr_str
= (char *)xmalloc (strlen (str
) + 1);
29015 strcpy (attr_str
, str
);
29017 /* Replace "=,-" with "_". */
29018 for (i
= 0; i
< strlen (attr_str
); i
++)
29019 if (attr_str
[i
] == '=' || attr_str
[i
]== '-')
29025 args
= XNEWVEC (char *, argnum
);
29028 attr
= strtok (attr_str
, ",");
29029 while (attr
!= NULL
)
29033 attr
= strtok (NULL
, ",");
29036 qsort (args
, argnum
, sizeof (char*), attr_strcmp
);
29038 ret_str
= (char *)xmalloc (strlen (str
) + 1);
29039 strcpy (ret_str
, args
[0]);
29040 for (i
= 1; i
< argnum
; i
++)
29042 strcat (ret_str
, "_");
29043 strcat (ret_str
, args
[i
]);
29051 /* This function changes the assembler name for functions that are
29052 versions. If DECL is a function version and has a "target"
29053 attribute, it appends the attribute string to its assembler name. */
29056 ix86_mangle_function_version_assembler_name (tree decl
, tree id
)
29059 const char *orig_name
, *version_string
, *attr_str
;
29060 char *assembler_name
;
29062 if (DECL_DECLARED_INLINE_P (decl
)
29063 && lookup_attribute ("gnu_inline",
29064 DECL_ATTRIBUTES (decl
)))
29065 error_at (DECL_SOURCE_LOCATION (decl
),
29066 "Function versions cannot be marked as gnu_inline,"
29067 " bodies have to be generated");
29069 if (DECL_VIRTUAL_P (decl
)
29070 || DECL_VINDEX (decl
))
29071 error_at (DECL_SOURCE_LOCATION (decl
),
29072 "Virtual function versioning not supported\n");
29074 version_attr
= lookup_attribute ("target", DECL_ATTRIBUTES (decl
));
29076 /* target attribute string is NULL for default functions. */
29077 if (version_attr
== NULL_TREE
)
29080 orig_name
= IDENTIFIER_POINTER (id
);
29082 = TREE_STRING_POINTER (TREE_VALUE (TREE_VALUE (version_attr
)));
29084 attr_str
= sorted_attr_string (version_string
);
29085 assembler_name
= (char *) xmalloc (strlen (orig_name
)
29086 + strlen (attr_str
) + 2);
29088 sprintf (assembler_name
, "%s.%s", orig_name
, attr_str
);
29090 /* Allow assembler name to be modified if already set. */
29091 if (DECL_ASSEMBLER_NAME_SET_P (decl
))
29092 SET_DECL_RTL (decl
, NULL
);
29094 return get_identifier (assembler_name
);
29098 ix86_mangle_decl_assembler_name (tree decl
, tree id
)
29100 /* For function version, add the target suffix to the assembler name. */
29101 if (TREE_CODE (decl
) == FUNCTION_DECL
29102 && DECL_FUNCTION_VERSIONED (decl
))
29103 id
= ix86_mangle_function_version_assembler_name (decl
, id
);
29104 #ifdef SUBTARGET_MANGLE_DECL_ASSEMBLER_NAME
29105 id
= SUBTARGET_MANGLE_DECL_ASSEMBLER_NAME (decl
, id
);
29111 /* Return a new name by appending SUFFIX to the DECL name. If make_unique
29112 is true, append the full path name of the source file. */
29115 make_name (tree decl
, const char *suffix
, bool make_unique
)
29117 char *global_var_name
;
29120 const char *unique_name
= NULL
;
29122 name
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
));
29124 /* Get a unique name that can be used globally without any chances
29125 of collision at link time. */
29127 unique_name
= IDENTIFIER_POINTER (get_file_function_name ("\0"));
29129 name_len
= strlen (name
) + strlen (suffix
) + 2;
29132 name_len
+= strlen (unique_name
) + 1;
29133 global_var_name
= XNEWVEC (char, name_len
);
29135 /* Use '.' to concatenate names as it is demangler friendly. */
29137 snprintf (global_var_name
, name_len
, "%s.%s.%s", name
,
29138 unique_name
, suffix
);
29140 snprintf (global_var_name
, name_len
, "%s.%s", name
, suffix
);
29142 return global_var_name
;
29145 #if defined (ASM_OUTPUT_TYPE_DIRECTIVE) && HAVE_GNU_INDIRECT_FUNCTION
29147 /* Make a dispatcher declaration for the multi-versioned function DECL.
29148 Calls to DECL function will be replaced with calls to the dispatcher
29149 by the front-end. Return the decl created. */
29152 make_dispatcher_decl (const tree decl
)
29155 char *func_name
, *resolver_name
;
29156 tree fn_type
, func_type
;
29157 bool is_uniq
= false;
29159 if (TREE_PUBLIC (decl
) == 0)
29162 func_name
= make_name (decl
, "ifunc", is_uniq
);
29163 resolver_name
= make_name (decl
, "resolver", is_uniq
);
29164 gcc_assert (resolver_name
);
29166 fn_type
= TREE_TYPE (decl
);
29167 func_type
= build_function_type (TREE_TYPE (fn_type
),
29168 TYPE_ARG_TYPES (fn_type
));
29170 func_decl
= build_fn_decl (func_name
, func_type
);
29171 TREE_USED (func_decl
) = 1;
29172 DECL_CONTEXT (func_decl
) = NULL_TREE
;
29173 DECL_INITIAL (func_decl
) = error_mark_node
;
29174 DECL_ARTIFICIAL (func_decl
) = 1;
29175 /* Mark this func as external, the resolver will flip it again if
29176 it gets generated. */
29177 DECL_EXTERNAL (func_decl
) = 1;
29178 /* This will be of type IFUNCs have to be externally visible. */
29179 TREE_PUBLIC (func_decl
) = 1;
29186 /* Returns true if decl is multi-versioned and DECL is the default function,
29187 that is it is not tagged with target specific optimization. */
29190 is_function_default_version (const tree decl
)
29192 return (TREE_CODE (decl
) == FUNCTION_DECL
29193 && DECL_FUNCTION_VERSIONED (decl
)
29194 && DECL_FUNCTION_SPECIFIC_TARGET (decl
) == NULL_TREE
);
29197 /* Make a dispatcher declaration for the multi-versioned function DECL.
29198 Calls to DECL function will be replaced with calls to the dispatcher
29199 by the front-end. Returns the decl of the dispatcher function. */
29202 ix86_get_function_versions_dispatcher (void *decl
)
29204 tree fn
= (tree
) decl
;
29205 struct cgraph_node
*node
= NULL
;
29206 struct cgraph_node
*default_node
= NULL
;
29207 struct cgraph_function_version_info
*node_v
= NULL
;
29208 struct cgraph_function_version_info
*first_v
= NULL
;
29210 tree dispatch_decl
= NULL
;
29212 #if defined (ASM_OUTPUT_TYPE_DIRECTIVE) && HAVE_GNU_INDIRECT_FUNCTION
29213 struct cgraph_function_version_info
*it_v
= NULL
;
29214 struct cgraph_node
*dispatcher_node
= NULL
;
29215 struct cgraph_function_version_info
*dispatcher_version_info
= NULL
;
29218 struct cgraph_function_version_info
*default_version_info
= NULL
;
29220 gcc_assert (fn
!= NULL
&& DECL_FUNCTION_VERSIONED (fn
));
29222 node
= cgraph_get_node (fn
);
29223 gcc_assert (node
!= NULL
);
29225 node_v
= get_cgraph_node_version (node
);
29226 gcc_assert (node_v
!= NULL
);
29228 if (node_v
->dispatcher_resolver
!= NULL
)
29229 return node_v
->dispatcher_resolver
;
29231 /* Find the default version and make it the first node. */
29233 /* Go to the beginnig of the chain. */
29234 while (first_v
->prev
!= NULL
)
29235 first_v
= first_v
->prev
;
29236 default_version_info
= first_v
;
29237 while (default_version_info
!= NULL
)
29239 if (is_function_default_version
29240 (default_version_info
->this_node
->symbol
.decl
))
29242 default_version_info
= default_version_info
->next
;
29245 /* If there is no default node, just return NULL. */
29246 if (default_version_info
== NULL
)
29249 /* Make default info the first node. */
29250 if (first_v
!= default_version_info
)
29252 default_version_info
->prev
->next
= default_version_info
->next
;
29253 if (default_version_info
->next
)
29254 default_version_info
->next
->prev
= default_version_info
->prev
;
29255 first_v
->prev
= default_version_info
;
29256 default_version_info
->next
= first_v
;
29257 default_version_info
->prev
= NULL
;
29260 default_node
= default_version_info
->this_node
;
29262 #if defined (ASM_OUTPUT_TYPE_DIRECTIVE) && HAVE_GNU_INDIRECT_FUNCTION
29263 /* Right now, the dispatching is done via ifunc. */
29264 dispatch_decl
= make_dispatcher_decl (default_node
->symbol
.decl
);
29266 dispatcher_node
= cgraph_get_create_node (dispatch_decl
);
29267 gcc_assert (dispatcher_node
!= NULL
);
29268 dispatcher_node
->dispatcher_function
= 1;
29269 dispatcher_version_info
29270 = insert_new_cgraph_node_version (dispatcher_node
);
29271 dispatcher_version_info
->next
= default_version_info
;
29272 dispatcher_node
->local
.finalized
= 1;
29274 /* Set the dispatcher for all the versions. */
29275 it_v
= default_version_info
;
29276 while (it_v
->next
!= NULL
)
29278 it_v
->dispatcher_resolver
= dispatch_decl
;
29282 error_at (DECL_SOURCE_LOCATION (default_node
->symbol
.decl
),
29283 "multiversioning needs ifunc which is not supported "
29284 "in this configuration");
29286 return dispatch_decl
;
29289 /* Makes a function attribute of the form NAME(ARG_NAME) and chains
29293 make_attribute (const char *name
, const char *arg_name
, tree chain
)
29296 tree attr_arg_name
;
29300 attr_name
= get_identifier (name
);
29301 attr_arg_name
= build_string (strlen (arg_name
), arg_name
);
29302 attr_args
= tree_cons (NULL_TREE
, attr_arg_name
, NULL_TREE
);
29303 attr
= tree_cons (attr_name
, attr_args
, chain
);
29307 /* Make the resolver function decl to dispatch the versions of
29308 a multi-versioned function, DEFAULT_DECL. Create an
29309 empty basic block in the resolver and store the pointer in
29310 EMPTY_BB. Return the decl of the resolver function. */
29313 make_resolver_func (const tree default_decl
,
29314 const tree dispatch_decl
,
29315 basic_block
*empty_bb
)
29317 char *resolver_name
;
29318 tree decl
, type
, decl_name
, t
;
29319 bool is_uniq
= false;
29321 /* IFUNC's have to be globally visible. So, if the default_decl is
29322 not, then the name of the IFUNC should be made unique. */
29323 if (TREE_PUBLIC (default_decl
) == 0)
29326 /* Append the filename to the resolver function if the versions are
29327 not externally visible. This is because the resolver function has
29328 to be externally visible for the loader to find it. So, appending
29329 the filename will prevent conflicts with a resolver function from
29330 another module which is based on the same version name. */
29331 resolver_name
= make_name (default_decl
, "resolver", is_uniq
);
29333 /* The resolver function should return a (void *). */
29334 type
= build_function_type_list (ptr_type_node
, NULL_TREE
);
29336 decl
= build_fn_decl (resolver_name
, type
);
29337 decl_name
= get_identifier (resolver_name
);
29338 SET_DECL_ASSEMBLER_NAME (decl
, decl_name
);
29340 DECL_NAME (decl
) = decl_name
;
29341 TREE_USED (decl
) = 1;
29342 DECL_ARTIFICIAL (decl
) = 1;
29343 DECL_IGNORED_P (decl
) = 0;
29344 /* IFUNC resolvers have to be externally visible. */
29345 TREE_PUBLIC (decl
) = 1;
29346 DECL_UNINLINABLE (decl
) = 0;
29348 /* Resolver is not external, body is generated. */
29349 DECL_EXTERNAL (decl
) = 0;
29350 DECL_EXTERNAL (dispatch_decl
) = 0;
29352 DECL_CONTEXT (decl
) = NULL_TREE
;
29353 DECL_INITIAL (decl
) = make_node (BLOCK
);
29354 DECL_STATIC_CONSTRUCTOR (decl
) = 0;
29356 if (DECL_COMDAT_GROUP (default_decl
)
29357 || TREE_PUBLIC (default_decl
))
29359 /* In this case, each translation unit with a call to this
29360 versioned function will put out a resolver. Ensure it
29361 is comdat to keep just one copy. */
29362 DECL_COMDAT (decl
) = 1;
29363 make_decl_one_only (decl
, DECL_ASSEMBLER_NAME (decl
));
29365 /* Build result decl and add to function_decl. */
29366 t
= build_decl (UNKNOWN_LOCATION
, RESULT_DECL
, NULL_TREE
, ptr_type_node
);
29367 DECL_ARTIFICIAL (t
) = 1;
29368 DECL_IGNORED_P (t
) = 1;
29369 DECL_RESULT (decl
) = t
;
29371 gimplify_function_tree (decl
);
29372 push_cfun (DECL_STRUCT_FUNCTION (decl
));
29373 *empty_bb
= init_lowered_empty_function (decl
, false);
29375 cgraph_add_new_function (decl
, true);
29376 cgraph_call_function_insertion_hooks (cgraph_get_create_node (decl
));
29380 gcc_assert (dispatch_decl
!= NULL
);
29381 /* Mark dispatch_decl as "ifunc" with resolver as resolver_name. */
29382 DECL_ATTRIBUTES (dispatch_decl
)
29383 = make_attribute ("ifunc", resolver_name
, DECL_ATTRIBUTES (dispatch_decl
));
29385 /* Create the alias for dispatch to resolver here. */
29386 /*cgraph_create_function_alias (dispatch_decl, decl);*/
29387 cgraph_same_body_alias (NULL
, dispatch_decl
, decl
);
29391 /* Generate the dispatching code body to dispatch multi-versioned function
29392 DECL. The target hook is called to process the "target" attributes and
29393 provide the code to dispatch the right function at run-time. NODE points
29394 to the dispatcher decl whose body will be created. */
29397 ix86_generate_version_dispatcher_body (void *node_p
)
29399 tree resolver_decl
;
29400 basic_block empty_bb
;
29401 vec
<tree
> fn_ver_vec
= vNULL
;
29402 tree default_ver_decl
;
29403 struct cgraph_node
*versn
;
29404 struct cgraph_node
*node
;
29406 struct cgraph_function_version_info
*node_version_info
= NULL
;
29407 struct cgraph_function_version_info
*versn_info
= NULL
;
29409 node
= (cgraph_node
*)node_p
;
29411 node_version_info
= get_cgraph_node_version (node
);
29412 gcc_assert (node
->dispatcher_function
29413 && node_version_info
!= NULL
);
29415 if (node_version_info
->dispatcher_resolver
)
29416 return node_version_info
->dispatcher_resolver
;
29418 /* The first version in the chain corresponds to the default version. */
29419 default_ver_decl
= node_version_info
->next
->this_node
->symbol
.decl
;
29421 /* node is going to be an alias, so remove the finalized bit. */
29422 node
->local
.finalized
= false;
29424 resolver_decl
= make_resolver_func (default_ver_decl
,
29425 node
->symbol
.decl
, &empty_bb
);
29427 node_version_info
->dispatcher_resolver
= resolver_decl
;
29429 push_cfun (DECL_STRUCT_FUNCTION (resolver_decl
));
29431 fn_ver_vec
.create (2);
29433 for (versn_info
= node_version_info
->next
; versn_info
;
29434 versn_info
= versn_info
->next
)
29436 versn
= versn_info
->this_node
;
29437 /* Check for virtual functions here again, as by this time it should
29438 have been determined if this function needs a vtable index or
29439 not. This happens for methods in derived classes that override
29440 virtual methods in base classes but are not explicitly marked as
29442 if (DECL_VINDEX (versn
->symbol
.decl
))
29443 error_at (DECL_SOURCE_LOCATION (versn
->symbol
.decl
),
29444 "Virtual function multiversioning not supported");
29445 fn_ver_vec
.safe_push (versn
->symbol
.decl
);
29448 dispatch_function_versions (resolver_decl
, &fn_ver_vec
, &empty_bb
);
29450 rebuild_cgraph_edges ();
29452 return resolver_decl
;
29454 /* This builds the processor_model struct type defined in
29455 libgcc/config/i386/cpuinfo.c */
29458 build_processor_model_struct (void)
29460 const char *field_name
[] = {"__cpu_vendor", "__cpu_type", "__cpu_subtype",
29462 tree field
= NULL_TREE
, field_chain
= NULL_TREE
;
29464 tree type
= make_node (RECORD_TYPE
);
29466 /* The first 3 fields are unsigned int. */
29467 for (i
= 0; i
< 3; ++i
)
29469 field
= build_decl (UNKNOWN_LOCATION
, FIELD_DECL
,
29470 get_identifier (field_name
[i
]), unsigned_type_node
);
29471 if (field_chain
!= NULL_TREE
)
29472 DECL_CHAIN (field
) = field_chain
;
29473 field_chain
= field
;
29476 /* The last field is an array of unsigned integers of size one. */
29477 field
= build_decl (UNKNOWN_LOCATION
, FIELD_DECL
,
29478 get_identifier (field_name
[3]),
29479 build_array_type (unsigned_type_node
,
29480 build_index_type (size_one_node
)));
29481 if (field_chain
!= NULL_TREE
)
29482 DECL_CHAIN (field
) = field_chain
;
29483 field_chain
= field
;
29485 finish_builtin_struct (type
, "__processor_model", field_chain
, NULL_TREE
);
29489 /* Returns a extern, comdat VAR_DECL of type TYPE and name NAME. */
29492 make_var_decl (tree type
, const char *name
)
29496 new_decl
= build_decl (UNKNOWN_LOCATION
,
29498 get_identifier(name
),
29501 DECL_EXTERNAL (new_decl
) = 1;
29502 TREE_STATIC (new_decl
) = 1;
29503 TREE_PUBLIC (new_decl
) = 1;
29504 DECL_INITIAL (new_decl
) = 0;
29505 DECL_ARTIFICIAL (new_decl
) = 0;
29506 DECL_PRESERVE_P (new_decl
) = 1;
29508 make_decl_one_only (new_decl
, DECL_ASSEMBLER_NAME (new_decl
));
29509 assemble_variable (new_decl
, 0, 0, 0);
29514 /* FNDECL is a __builtin_cpu_is or a __builtin_cpu_supports call that is folded
29515 into an integer defined in libgcc/config/i386/cpuinfo.c */
29518 fold_builtin_cpu (tree fndecl
, tree
*args
)
29521 enum ix86_builtins fn_code
= (enum ix86_builtins
)
29522 DECL_FUNCTION_CODE (fndecl
);
29523 tree param_string_cst
= NULL
;
29525 /* This is the order of bit-fields in __processor_features in cpuinfo.c */
29526 enum processor_features
29542 /* These are the values for vendor types and cpu types and subtypes
29543 in cpuinfo.c. Cpu types and subtypes should be subtracted by
29544 the corresponding start value. */
29545 enum processor_model
29555 M_CPU_SUBTYPE_START
,
29556 M_INTEL_COREI7_NEHALEM
,
29557 M_INTEL_COREI7_WESTMERE
,
29558 M_INTEL_COREI7_SANDYBRIDGE
,
29559 M_AMDFAM10H_BARCELONA
,
29560 M_AMDFAM10H_SHANGHAI
,
29561 M_AMDFAM10H_ISTANBUL
,
29562 M_AMDFAM15H_BDVER1
,
29563 M_AMDFAM15H_BDVER2
,
29567 static struct _arch_names_table
29569 const char *const name
;
29570 const enum processor_model model
;
29572 const arch_names_table
[] =
29575 {"intel", M_INTEL
},
29576 {"atom", M_INTEL_ATOM
},
29577 {"core2", M_INTEL_CORE2
},
29578 {"corei7", M_INTEL_COREI7
},
29579 {"nehalem", M_INTEL_COREI7_NEHALEM
},
29580 {"westmere", M_INTEL_COREI7_WESTMERE
},
29581 {"sandybridge", M_INTEL_COREI7_SANDYBRIDGE
},
29582 {"amdfam10h", M_AMDFAM10H
},
29583 {"barcelona", M_AMDFAM10H_BARCELONA
},
29584 {"shanghai", M_AMDFAM10H_SHANGHAI
},
29585 {"istanbul", M_AMDFAM10H_ISTANBUL
},
29586 {"amdfam15h", M_AMDFAM15H
},
29587 {"bdver1", M_AMDFAM15H_BDVER1
},
29588 {"bdver2", M_AMDFAM15H_BDVER2
},
29589 {"bdver3", M_AMDFAM15H_BDVER3
},
29592 static struct _isa_names_table
29594 const char *const name
;
29595 const enum processor_features feature
;
29597 const isa_names_table
[] =
29601 {"popcnt", F_POPCNT
},
29605 {"ssse3", F_SSSE3
},
29606 {"sse4.1", F_SSE4_1
},
29607 {"sse4.2", F_SSE4_2
},
29612 static tree __processor_model_type
= NULL_TREE
;
29613 static tree __cpu_model_var
= NULL_TREE
;
29615 if (__processor_model_type
== NULL_TREE
)
29616 __processor_model_type
= build_processor_model_struct ();
29618 if (__cpu_model_var
== NULL_TREE
)
29619 __cpu_model_var
= make_var_decl (__processor_model_type
,
29622 gcc_assert ((args
!= NULL
) && (*args
!= NULL
));
29624 param_string_cst
= *args
;
29625 while (param_string_cst
29626 && TREE_CODE (param_string_cst
) != STRING_CST
)
29628 /* *args must be a expr that can contain other EXPRS leading to a
29630 if (!EXPR_P (param_string_cst
))
29632 error ("Parameter to builtin must be a string constant or literal");
29633 return integer_zero_node
;
29635 param_string_cst
= TREE_OPERAND (EXPR_CHECK (param_string_cst
), 0);
29638 gcc_assert (param_string_cst
);
29640 if (fn_code
== IX86_BUILTIN_CPU_IS
)
29646 unsigned int field_val
= 0;
29647 unsigned int NUM_ARCH_NAMES
29648 = sizeof (arch_names_table
) / sizeof (struct _arch_names_table
);
29650 for (i
= 0; i
< NUM_ARCH_NAMES
; i
++)
29651 if (strcmp (arch_names_table
[i
].name
,
29652 TREE_STRING_POINTER (param_string_cst
)) == 0)
29655 if (i
== NUM_ARCH_NAMES
)
29657 error ("Parameter to builtin not valid: %s",
29658 TREE_STRING_POINTER (param_string_cst
));
29659 return integer_zero_node
;
29662 field
= TYPE_FIELDS (__processor_model_type
);
29663 field_val
= arch_names_table
[i
].model
;
29665 /* CPU types are stored in the next field. */
29666 if (field_val
> M_CPU_TYPE_START
29667 && field_val
< M_CPU_SUBTYPE_START
)
29669 field
= DECL_CHAIN (field
);
29670 field_val
-= M_CPU_TYPE_START
;
29673 /* CPU subtypes are stored in the next field. */
29674 if (field_val
> M_CPU_SUBTYPE_START
)
29676 field
= DECL_CHAIN ( DECL_CHAIN (field
));
29677 field_val
-= M_CPU_SUBTYPE_START
;
29680 /* Get the appropriate field in __cpu_model. */
29681 ref
= build3 (COMPONENT_REF
, TREE_TYPE (field
), __cpu_model_var
,
29684 /* Check the value. */
29685 final
= build2 (EQ_EXPR
, unsigned_type_node
, ref
,
29686 build_int_cstu (unsigned_type_node
, field_val
));
29687 return build1 (CONVERT_EXPR
, integer_type_node
, final
);
29689 else if (fn_code
== IX86_BUILTIN_CPU_SUPPORTS
)
29696 unsigned int field_val
= 0;
29697 unsigned int NUM_ISA_NAMES
29698 = sizeof (isa_names_table
) / sizeof (struct _isa_names_table
);
29700 for (i
= 0; i
< NUM_ISA_NAMES
; i
++)
29701 if (strcmp (isa_names_table
[i
].name
,
29702 TREE_STRING_POINTER (param_string_cst
)) == 0)
29705 if (i
== NUM_ISA_NAMES
)
29707 error ("Parameter to builtin not valid: %s",
29708 TREE_STRING_POINTER (param_string_cst
));
29709 return integer_zero_node
;
29712 field
= TYPE_FIELDS (__processor_model_type
);
29713 /* Get the last field, which is __cpu_features. */
29714 while (DECL_CHAIN (field
))
29715 field
= DECL_CHAIN (field
);
29717 /* Get the appropriate field: __cpu_model.__cpu_features */
29718 ref
= build3 (COMPONENT_REF
, TREE_TYPE (field
), __cpu_model_var
,
29721 /* Access the 0th element of __cpu_features array. */
29722 array_elt
= build4 (ARRAY_REF
, unsigned_type_node
, ref
,
29723 integer_zero_node
, NULL_TREE
, NULL_TREE
);
29725 field_val
= (1 << isa_names_table
[i
].feature
);
29726 /* Return __cpu_model.__cpu_features[0] & field_val */
29727 final
= build2 (BIT_AND_EXPR
, unsigned_type_node
, array_elt
,
29728 build_int_cstu (unsigned_type_node
, field_val
));
29729 return build1 (CONVERT_EXPR
, integer_type_node
, final
);
29731 gcc_unreachable ();
29735 ix86_fold_builtin (tree fndecl
, int n_args
,
29736 tree
*args
, bool ignore ATTRIBUTE_UNUSED
)
29738 if (DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_MD
)
29740 enum ix86_builtins fn_code
= (enum ix86_builtins
)
29741 DECL_FUNCTION_CODE (fndecl
);
29742 if (fn_code
== IX86_BUILTIN_CPU_IS
29743 || fn_code
== IX86_BUILTIN_CPU_SUPPORTS
)
29745 gcc_assert (n_args
== 1);
29746 return fold_builtin_cpu (fndecl
, args
);
29750 #ifdef SUBTARGET_FOLD_BUILTIN
29751 return SUBTARGET_FOLD_BUILTIN (fndecl
, n_args
, args
, ignore
);
29757 /* Make builtins to detect cpu type and features supported. NAME is
29758 the builtin name, CODE is the builtin code, and FTYPE is the function
29759 type of the builtin. */
29762 make_cpu_type_builtin (const char* name
, int code
,
29763 enum ix86_builtin_func_type ftype
, bool is_const
)
29768 type
= ix86_get_builtin_func_type (ftype
);
29769 decl
= add_builtin_function (name
, type
, code
, BUILT_IN_MD
,
29771 gcc_assert (decl
!= NULL_TREE
);
29772 ix86_builtins
[(int) code
] = decl
;
29773 TREE_READONLY (decl
) = is_const
;
29776 /* Make builtins to get CPU type and features supported. The created
29779 __builtin_cpu_init (), to detect cpu type and features,
29780 __builtin_cpu_is ("<CPUNAME>"), to check if cpu is of type <CPUNAME>,
29781 __builtin_cpu_supports ("<FEATURE>"), to check if cpu supports <FEATURE>
29785 ix86_init_platform_type_builtins (void)
29787 make_cpu_type_builtin ("__builtin_cpu_init", IX86_BUILTIN_CPU_INIT
,
29788 INT_FTYPE_VOID
, false);
29789 make_cpu_type_builtin ("__builtin_cpu_is", IX86_BUILTIN_CPU_IS
,
29790 INT_FTYPE_PCCHAR
, true);
29791 make_cpu_type_builtin ("__builtin_cpu_supports", IX86_BUILTIN_CPU_SUPPORTS
,
29792 INT_FTYPE_PCCHAR
, true);
29795 /* Internal method for ix86_init_builtins. */
29798 ix86_init_builtins_va_builtins_abi (void)
29800 tree ms_va_ref
, sysv_va_ref
;
29801 tree fnvoid_va_end_ms
, fnvoid_va_end_sysv
;
29802 tree fnvoid_va_start_ms
, fnvoid_va_start_sysv
;
29803 tree fnvoid_va_copy_ms
, fnvoid_va_copy_sysv
;
29804 tree fnattr_ms
= NULL_TREE
, fnattr_sysv
= NULL_TREE
;
29808 fnattr_ms
= build_tree_list (get_identifier ("ms_abi"), NULL_TREE
);
29809 fnattr_sysv
= build_tree_list (get_identifier ("sysv_abi"), NULL_TREE
);
29810 ms_va_ref
= build_reference_type (ms_va_list_type_node
);
29812 build_pointer_type (TREE_TYPE (sysv_va_list_type_node
));
29815 build_function_type_list (void_type_node
, ms_va_ref
, NULL_TREE
);
29816 fnvoid_va_start_ms
=
29817 build_varargs_function_type_list (void_type_node
, ms_va_ref
, NULL_TREE
);
29818 fnvoid_va_end_sysv
=
29819 build_function_type_list (void_type_node
, sysv_va_ref
, NULL_TREE
);
29820 fnvoid_va_start_sysv
=
29821 build_varargs_function_type_list (void_type_node
, sysv_va_ref
,
29823 fnvoid_va_copy_ms
=
29824 build_function_type_list (void_type_node
, ms_va_ref
, ms_va_list_type_node
,
29826 fnvoid_va_copy_sysv
=
29827 build_function_type_list (void_type_node
, sysv_va_ref
,
29828 sysv_va_ref
, NULL_TREE
);
29830 add_builtin_function ("__builtin_ms_va_start", fnvoid_va_start_ms
,
29831 BUILT_IN_VA_START
, BUILT_IN_NORMAL
, NULL
, fnattr_ms
);
29832 add_builtin_function ("__builtin_ms_va_end", fnvoid_va_end_ms
,
29833 BUILT_IN_VA_END
, BUILT_IN_NORMAL
, NULL
, fnattr_ms
);
29834 add_builtin_function ("__builtin_ms_va_copy", fnvoid_va_copy_ms
,
29835 BUILT_IN_VA_COPY
, BUILT_IN_NORMAL
, NULL
, fnattr_ms
);
29836 add_builtin_function ("__builtin_sysv_va_start", fnvoid_va_start_sysv
,
29837 BUILT_IN_VA_START
, BUILT_IN_NORMAL
, NULL
, fnattr_sysv
);
29838 add_builtin_function ("__builtin_sysv_va_end", fnvoid_va_end_sysv
,
29839 BUILT_IN_VA_END
, BUILT_IN_NORMAL
, NULL
, fnattr_sysv
);
29840 add_builtin_function ("__builtin_sysv_va_copy", fnvoid_va_copy_sysv
,
29841 BUILT_IN_VA_COPY
, BUILT_IN_NORMAL
, NULL
, fnattr_sysv
);
29845 ix86_init_builtin_types (void)
29847 tree float128_type_node
, float80_type_node
;
29849 /* The __float80 type. */
29850 float80_type_node
= long_double_type_node
;
29851 if (TYPE_MODE (float80_type_node
) != XFmode
)
29853 /* The __float80 type. */
29854 float80_type_node
= make_node (REAL_TYPE
);
29856 TYPE_PRECISION (float80_type_node
) = 80;
29857 layout_type (float80_type_node
);
29859 lang_hooks
.types
.register_builtin_type (float80_type_node
, "__float80");
29861 /* The __float128 type. */
29862 float128_type_node
= make_node (REAL_TYPE
);
29863 TYPE_PRECISION (float128_type_node
) = 128;
29864 layout_type (float128_type_node
);
29865 lang_hooks
.types
.register_builtin_type (float128_type_node
, "__float128");
29867 /* This macro is built by i386-builtin-types.awk. */
29868 DEFINE_BUILTIN_PRIMITIVE_TYPES
;
29872 ix86_init_builtins (void)
29876 ix86_init_builtin_types ();
29878 /* Builtins to get CPU type and features. */
29879 ix86_init_platform_type_builtins ();
29881 /* TFmode support builtins. */
29882 def_builtin_const (0, "__builtin_infq",
29883 FLOAT128_FTYPE_VOID
, IX86_BUILTIN_INFQ
);
29884 def_builtin_const (0, "__builtin_huge_valq",
29885 FLOAT128_FTYPE_VOID
, IX86_BUILTIN_HUGE_VALQ
);
29887 /* We will expand them to normal call if SSE isn't available since
29888 they are used by libgcc. */
29889 t
= ix86_get_builtin_func_type (FLOAT128_FTYPE_FLOAT128
);
29890 t
= add_builtin_function ("__builtin_fabsq", t
, IX86_BUILTIN_FABSQ
,
29891 BUILT_IN_MD
, "__fabstf2", NULL_TREE
);
29892 TREE_READONLY (t
) = 1;
29893 ix86_builtins
[(int) IX86_BUILTIN_FABSQ
] = t
;
29895 t
= ix86_get_builtin_func_type (FLOAT128_FTYPE_FLOAT128_FLOAT128
);
29896 t
= add_builtin_function ("__builtin_copysignq", t
, IX86_BUILTIN_COPYSIGNQ
,
29897 BUILT_IN_MD
, "__copysigntf3", NULL_TREE
);
29898 TREE_READONLY (t
) = 1;
29899 ix86_builtins
[(int) IX86_BUILTIN_COPYSIGNQ
] = t
;
29901 ix86_init_tm_builtins ();
29902 ix86_init_mmx_sse_builtins ();
29905 ix86_init_builtins_va_builtins_abi ();
29907 #ifdef SUBTARGET_INIT_BUILTINS
29908 SUBTARGET_INIT_BUILTINS
;
29912 /* Return the ix86 builtin for CODE. */
29915 ix86_builtin_decl (unsigned code
, bool initialize_p ATTRIBUTE_UNUSED
)
29917 if (code
>= IX86_BUILTIN_MAX
)
29918 return error_mark_node
;
29920 return ix86_builtins
[code
];
29923 /* Errors in the source file can cause expand_expr to return const0_rtx
29924 where we expect a vector. To avoid crashing, use one of the vector
29925 clear instructions. */
29927 safe_vector_operand (rtx x
, enum machine_mode mode
)
29929 if (x
== const0_rtx
)
29930 x
= CONST0_RTX (mode
);
29934 /* Subroutine of ix86_expand_builtin to take care of binop insns. */
29937 ix86_expand_binop_builtin (enum insn_code icode
, tree exp
, rtx target
)
29940 tree arg0
= CALL_EXPR_ARG (exp
, 0);
29941 tree arg1
= CALL_EXPR_ARG (exp
, 1);
29942 rtx op0
= expand_normal (arg0
);
29943 rtx op1
= expand_normal (arg1
);
29944 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
29945 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
29946 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
29948 if (VECTOR_MODE_P (mode0
))
29949 op0
= safe_vector_operand (op0
, mode0
);
29950 if (VECTOR_MODE_P (mode1
))
29951 op1
= safe_vector_operand (op1
, mode1
);
29953 if (optimize
|| !target
29954 || GET_MODE (target
) != tmode
29955 || !insn_data
[icode
].operand
[0].predicate (target
, tmode
))
29956 target
= gen_reg_rtx (tmode
);
29958 if (GET_MODE (op1
) == SImode
&& mode1
== TImode
)
29960 rtx x
= gen_reg_rtx (V4SImode
);
29961 emit_insn (gen_sse2_loadd (x
, op1
));
29962 op1
= gen_lowpart (TImode
, x
);
29965 if (!insn_data
[icode
].operand
[1].predicate (op0
, mode0
))
29966 op0
= copy_to_mode_reg (mode0
, op0
);
29967 if (!insn_data
[icode
].operand
[2].predicate (op1
, mode1
))
29968 op1
= copy_to_mode_reg (mode1
, op1
);
29970 pat
= GEN_FCN (icode
) (target
, op0
, op1
);
29979 /* Subroutine of ix86_expand_builtin to take care of 2-4 argument insns. */
29982 ix86_expand_multi_arg_builtin (enum insn_code icode
, tree exp
, rtx target
,
29983 enum ix86_builtin_func_type m_type
,
29984 enum rtx_code sub_code
)
29989 bool comparison_p
= false;
29991 bool last_arg_constant
= false;
29992 int num_memory
= 0;
29995 enum machine_mode mode
;
29998 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
30002 case MULTI_ARG_4_DF2_DI_I
:
30003 case MULTI_ARG_4_DF2_DI_I1
:
30004 case MULTI_ARG_4_SF2_SI_I
:
30005 case MULTI_ARG_4_SF2_SI_I1
:
30007 last_arg_constant
= true;
30010 case MULTI_ARG_3_SF
:
30011 case MULTI_ARG_3_DF
:
30012 case MULTI_ARG_3_SF2
:
30013 case MULTI_ARG_3_DF2
:
30014 case MULTI_ARG_3_DI
:
30015 case MULTI_ARG_3_SI
:
30016 case MULTI_ARG_3_SI_DI
:
30017 case MULTI_ARG_3_HI
:
30018 case MULTI_ARG_3_HI_SI
:
30019 case MULTI_ARG_3_QI
:
30020 case MULTI_ARG_3_DI2
:
30021 case MULTI_ARG_3_SI2
:
30022 case MULTI_ARG_3_HI2
:
30023 case MULTI_ARG_3_QI2
:
30027 case MULTI_ARG_2_SF
:
30028 case MULTI_ARG_2_DF
:
30029 case MULTI_ARG_2_DI
:
30030 case MULTI_ARG_2_SI
:
30031 case MULTI_ARG_2_HI
:
30032 case MULTI_ARG_2_QI
:
30036 case MULTI_ARG_2_DI_IMM
:
30037 case MULTI_ARG_2_SI_IMM
:
30038 case MULTI_ARG_2_HI_IMM
:
30039 case MULTI_ARG_2_QI_IMM
:
30041 last_arg_constant
= true;
30044 case MULTI_ARG_1_SF
:
30045 case MULTI_ARG_1_DF
:
30046 case MULTI_ARG_1_SF2
:
30047 case MULTI_ARG_1_DF2
:
30048 case MULTI_ARG_1_DI
:
30049 case MULTI_ARG_1_SI
:
30050 case MULTI_ARG_1_HI
:
30051 case MULTI_ARG_1_QI
:
30052 case MULTI_ARG_1_SI_DI
:
30053 case MULTI_ARG_1_HI_DI
:
30054 case MULTI_ARG_1_HI_SI
:
30055 case MULTI_ARG_1_QI_DI
:
30056 case MULTI_ARG_1_QI_SI
:
30057 case MULTI_ARG_1_QI_HI
:
30061 case MULTI_ARG_2_DI_CMP
:
30062 case MULTI_ARG_2_SI_CMP
:
30063 case MULTI_ARG_2_HI_CMP
:
30064 case MULTI_ARG_2_QI_CMP
:
30066 comparison_p
= true;
30069 case MULTI_ARG_2_SF_TF
:
30070 case MULTI_ARG_2_DF_TF
:
30071 case MULTI_ARG_2_DI_TF
:
30072 case MULTI_ARG_2_SI_TF
:
30073 case MULTI_ARG_2_HI_TF
:
30074 case MULTI_ARG_2_QI_TF
:
30080 gcc_unreachable ();
30083 if (optimize
|| !target
30084 || GET_MODE (target
) != tmode
30085 || !insn_data
[icode
].operand
[0].predicate (target
, tmode
))
30086 target
= gen_reg_rtx (tmode
);
30088 gcc_assert (nargs
<= 4);
30090 for (i
= 0; i
< nargs
; i
++)
30092 tree arg
= CALL_EXPR_ARG (exp
, i
);
30093 rtx op
= expand_normal (arg
);
30094 int adjust
= (comparison_p
) ? 1 : 0;
30095 enum machine_mode mode
= insn_data
[icode
].operand
[i
+adjust
+1].mode
;
30097 if (last_arg_constant
&& i
== nargs
- 1)
30099 if (!insn_data
[icode
].operand
[i
+ 1].predicate (op
, mode
))
30101 enum insn_code new_icode
= icode
;
30104 case CODE_FOR_xop_vpermil2v2df3
:
30105 case CODE_FOR_xop_vpermil2v4sf3
:
30106 case CODE_FOR_xop_vpermil2v4df3
:
30107 case CODE_FOR_xop_vpermil2v8sf3
:
30108 error ("the last argument must be a 2-bit immediate");
30109 return gen_reg_rtx (tmode
);
30110 case CODE_FOR_xop_rotlv2di3
:
30111 new_icode
= CODE_FOR_rotlv2di3
;
30113 case CODE_FOR_xop_rotlv4si3
:
30114 new_icode
= CODE_FOR_rotlv4si3
;
30116 case CODE_FOR_xop_rotlv8hi3
:
30117 new_icode
= CODE_FOR_rotlv8hi3
;
30119 case CODE_FOR_xop_rotlv16qi3
:
30120 new_icode
= CODE_FOR_rotlv16qi3
;
30122 if (CONST_INT_P (op
))
30124 int mask
= GET_MODE_BITSIZE (GET_MODE_INNER (tmode
)) - 1;
30125 op
= GEN_INT (INTVAL (op
) & mask
);
30126 gcc_checking_assert
30127 (insn_data
[icode
].operand
[i
+ 1].predicate (op
, mode
));
30131 gcc_checking_assert
30133 && insn_data
[new_icode
].operand
[0].mode
== tmode
30134 && insn_data
[new_icode
].operand
[1].mode
== tmode
30135 && insn_data
[new_icode
].operand
[2].mode
== mode
30136 && insn_data
[new_icode
].operand
[0].predicate
30137 == insn_data
[icode
].operand
[0].predicate
30138 && insn_data
[new_icode
].operand
[1].predicate
30139 == insn_data
[icode
].operand
[1].predicate
);
30145 gcc_unreachable ();
30152 if (VECTOR_MODE_P (mode
))
30153 op
= safe_vector_operand (op
, mode
);
30155 /* If we aren't optimizing, only allow one memory operand to be
30157 if (memory_operand (op
, mode
))
30160 gcc_assert (GET_MODE (op
) == mode
|| GET_MODE (op
) == VOIDmode
);
30163 || !insn_data
[icode
].operand
[i
+adjust
+1].predicate (op
, mode
)
30165 op
= force_reg (mode
, op
);
30169 args
[i
].mode
= mode
;
30175 pat
= GEN_FCN (icode
) (target
, args
[0].op
);
30180 pat
= GEN_FCN (icode
) (target
, args
[0].op
, args
[1].op
,
30181 GEN_INT ((int)sub_code
));
30182 else if (! comparison_p
)
30183 pat
= GEN_FCN (icode
) (target
, args
[0].op
, args
[1].op
);
30186 rtx cmp_op
= gen_rtx_fmt_ee (sub_code
, GET_MODE (target
),
30190 pat
= GEN_FCN (icode
) (target
, cmp_op
, args
[0].op
, args
[1].op
);
30195 pat
= GEN_FCN (icode
) (target
, args
[0].op
, args
[1].op
, args
[2].op
);
30199 pat
= GEN_FCN (icode
) (target
, args
[0].op
, args
[1].op
, args
[2].op
, args
[3].op
);
30203 gcc_unreachable ();
30213 /* Subroutine of ix86_expand_args_builtin to take care of scalar unop
30214 insns with vec_merge. */
30217 ix86_expand_unop_vec_merge_builtin (enum insn_code icode
, tree exp
,
30221 tree arg0
= CALL_EXPR_ARG (exp
, 0);
30222 rtx op1
, op0
= expand_normal (arg0
);
30223 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
30224 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
30226 if (optimize
|| !target
30227 || GET_MODE (target
) != tmode
30228 || !insn_data
[icode
].operand
[0].predicate (target
, tmode
))
30229 target
= gen_reg_rtx (tmode
);
30231 if (VECTOR_MODE_P (mode0
))
30232 op0
= safe_vector_operand (op0
, mode0
);
30234 if ((optimize
&& !register_operand (op0
, mode0
))
30235 || !insn_data
[icode
].operand
[1].predicate (op0
, mode0
))
30236 op0
= copy_to_mode_reg (mode0
, op0
);
30239 if (!insn_data
[icode
].operand
[2].predicate (op1
, mode0
))
30240 op1
= copy_to_mode_reg (mode0
, op1
);
30242 pat
= GEN_FCN (icode
) (target
, op0
, op1
);
30249 /* Subroutine of ix86_expand_builtin to take care of comparison insns. */
30252 ix86_expand_sse_compare (const struct builtin_description
*d
,
30253 tree exp
, rtx target
, bool swap
)
30256 tree arg0
= CALL_EXPR_ARG (exp
, 0);
30257 tree arg1
= CALL_EXPR_ARG (exp
, 1);
30258 rtx op0
= expand_normal (arg0
);
30259 rtx op1
= expand_normal (arg1
);
30261 enum machine_mode tmode
= insn_data
[d
->icode
].operand
[0].mode
;
30262 enum machine_mode mode0
= insn_data
[d
->icode
].operand
[1].mode
;
30263 enum machine_mode mode1
= insn_data
[d
->icode
].operand
[2].mode
;
30264 enum rtx_code comparison
= d
->comparison
;
30266 if (VECTOR_MODE_P (mode0
))
30267 op0
= safe_vector_operand (op0
, mode0
);
30268 if (VECTOR_MODE_P (mode1
))
30269 op1
= safe_vector_operand (op1
, mode1
);
30271 /* Swap operands if we have a comparison that isn't available in
30275 rtx tmp
= gen_reg_rtx (mode1
);
30276 emit_move_insn (tmp
, op1
);
30281 if (optimize
|| !target
30282 || GET_MODE (target
) != tmode
30283 || !insn_data
[d
->icode
].operand
[0].predicate (target
, tmode
))
30284 target
= gen_reg_rtx (tmode
);
30286 if ((optimize
&& !register_operand (op0
, mode0
))
30287 || !insn_data
[d
->icode
].operand
[1].predicate (op0
, mode0
))
30288 op0
= copy_to_mode_reg (mode0
, op0
);
30289 if ((optimize
&& !register_operand (op1
, mode1
))
30290 || !insn_data
[d
->icode
].operand
[2].predicate (op1
, mode1
))
30291 op1
= copy_to_mode_reg (mode1
, op1
);
30293 op2
= gen_rtx_fmt_ee (comparison
, mode0
, op0
, op1
);
30294 pat
= GEN_FCN (d
->icode
) (target
, op0
, op1
, op2
);
30301 /* Subroutine of ix86_expand_builtin to take care of comi insns. */
30304 ix86_expand_sse_comi (const struct builtin_description
*d
, tree exp
,
30308 tree arg0
= CALL_EXPR_ARG (exp
, 0);
30309 tree arg1
= CALL_EXPR_ARG (exp
, 1);
30310 rtx op0
= expand_normal (arg0
);
30311 rtx op1
= expand_normal (arg1
);
30312 enum machine_mode mode0
= insn_data
[d
->icode
].operand
[0].mode
;
30313 enum machine_mode mode1
= insn_data
[d
->icode
].operand
[1].mode
;
30314 enum rtx_code comparison
= d
->comparison
;
30316 if (VECTOR_MODE_P (mode0
))
30317 op0
= safe_vector_operand (op0
, mode0
);
30318 if (VECTOR_MODE_P (mode1
))
30319 op1
= safe_vector_operand (op1
, mode1
);
30321 /* Swap operands if we have a comparison that isn't available in
30323 if (d
->flag
& BUILTIN_DESC_SWAP_OPERANDS
)
30330 target
= gen_reg_rtx (SImode
);
30331 emit_move_insn (target
, const0_rtx
);
30332 target
= gen_rtx_SUBREG (QImode
, target
, 0);
30334 if ((optimize
&& !register_operand (op0
, mode0
))
30335 || !insn_data
[d
->icode
].operand
[0].predicate (op0
, mode0
))
30336 op0
= copy_to_mode_reg (mode0
, op0
);
30337 if ((optimize
&& !register_operand (op1
, mode1
))
30338 || !insn_data
[d
->icode
].operand
[1].predicate (op1
, mode1
))
30339 op1
= copy_to_mode_reg (mode1
, op1
);
30341 pat
= GEN_FCN (d
->icode
) (op0
, op1
);
30345 emit_insn (gen_rtx_SET (VOIDmode
,
30346 gen_rtx_STRICT_LOW_PART (VOIDmode
, target
),
30347 gen_rtx_fmt_ee (comparison
, QImode
,
30351 return SUBREG_REG (target
);
30354 /* Subroutines of ix86_expand_args_builtin to take care of round insns. */
30357 ix86_expand_sse_round (const struct builtin_description
*d
, tree exp
,
30361 tree arg0
= CALL_EXPR_ARG (exp
, 0);
30362 rtx op1
, op0
= expand_normal (arg0
);
30363 enum machine_mode tmode
= insn_data
[d
->icode
].operand
[0].mode
;
30364 enum machine_mode mode0
= insn_data
[d
->icode
].operand
[1].mode
;
30366 if (optimize
|| target
== 0
30367 || GET_MODE (target
) != tmode
30368 || !insn_data
[d
->icode
].operand
[0].predicate (target
, tmode
))
30369 target
= gen_reg_rtx (tmode
);
30371 if (VECTOR_MODE_P (mode0
))
30372 op0
= safe_vector_operand (op0
, mode0
);
30374 if ((optimize
&& !register_operand (op0
, mode0
))
30375 || !insn_data
[d
->icode
].operand
[0].predicate (op0
, mode0
))
30376 op0
= copy_to_mode_reg (mode0
, op0
);
30378 op1
= GEN_INT (d
->comparison
);
30380 pat
= GEN_FCN (d
->icode
) (target
, op0
, op1
);
30388 ix86_expand_sse_round_vec_pack_sfix (const struct builtin_description
*d
,
30389 tree exp
, rtx target
)
30392 tree arg0
= CALL_EXPR_ARG (exp
, 0);
30393 tree arg1
= CALL_EXPR_ARG (exp
, 1);
30394 rtx op0
= expand_normal (arg0
);
30395 rtx op1
= expand_normal (arg1
);
30397 enum machine_mode tmode
= insn_data
[d
->icode
].operand
[0].mode
;
30398 enum machine_mode mode0
= insn_data
[d
->icode
].operand
[1].mode
;
30399 enum machine_mode mode1
= insn_data
[d
->icode
].operand
[2].mode
;
30401 if (optimize
|| target
== 0
30402 || GET_MODE (target
) != tmode
30403 || !insn_data
[d
->icode
].operand
[0].predicate (target
, tmode
))
30404 target
= gen_reg_rtx (tmode
);
30406 op0
= safe_vector_operand (op0
, mode0
);
30407 op1
= safe_vector_operand (op1
, mode1
);
30409 if ((optimize
&& !register_operand (op0
, mode0
))
30410 || !insn_data
[d
->icode
].operand
[0].predicate (op0
, mode0
))
30411 op0
= copy_to_mode_reg (mode0
, op0
);
30412 if ((optimize
&& !register_operand (op1
, mode1
))
30413 || !insn_data
[d
->icode
].operand
[1].predicate (op1
, mode1
))
30414 op1
= copy_to_mode_reg (mode1
, op1
);
30416 op2
= GEN_INT (d
->comparison
);
30418 pat
= GEN_FCN (d
->icode
) (target
, op0
, op1
, op2
);
30425 /* Subroutine of ix86_expand_builtin to take care of ptest insns. */
30428 ix86_expand_sse_ptest (const struct builtin_description
*d
, tree exp
,
30432 tree arg0
= CALL_EXPR_ARG (exp
, 0);
30433 tree arg1
= CALL_EXPR_ARG (exp
, 1);
30434 rtx op0
= expand_normal (arg0
);
30435 rtx op1
= expand_normal (arg1
);
30436 enum machine_mode mode0
= insn_data
[d
->icode
].operand
[0].mode
;
30437 enum machine_mode mode1
= insn_data
[d
->icode
].operand
[1].mode
;
30438 enum rtx_code comparison
= d
->comparison
;
30440 if (VECTOR_MODE_P (mode0
))
30441 op0
= safe_vector_operand (op0
, mode0
);
30442 if (VECTOR_MODE_P (mode1
))
30443 op1
= safe_vector_operand (op1
, mode1
);
30445 target
= gen_reg_rtx (SImode
);
30446 emit_move_insn (target
, const0_rtx
);
30447 target
= gen_rtx_SUBREG (QImode
, target
, 0);
30449 if ((optimize
&& !register_operand (op0
, mode0
))
30450 || !insn_data
[d
->icode
].operand
[0].predicate (op0
, mode0
))
30451 op0
= copy_to_mode_reg (mode0
, op0
);
30452 if ((optimize
&& !register_operand (op1
, mode1
))
30453 || !insn_data
[d
->icode
].operand
[1].predicate (op1
, mode1
))
30454 op1
= copy_to_mode_reg (mode1
, op1
);
30456 pat
= GEN_FCN (d
->icode
) (op0
, op1
);
30460 emit_insn (gen_rtx_SET (VOIDmode
,
30461 gen_rtx_STRICT_LOW_PART (VOIDmode
, target
),
30462 gen_rtx_fmt_ee (comparison
, QImode
,
30466 return SUBREG_REG (target
);
30469 /* Subroutine of ix86_expand_builtin to take care of pcmpestr[im] insns. */
30472 ix86_expand_sse_pcmpestr (const struct builtin_description
*d
,
30473 tree exp
, rtx target
)
30476 tree arg0
= CALL_EXPR_ARG (exp
, 0);
30477 tree arg1
= CALL_EXPR_ARG (exp
, 1);
30478 tree arg2
= CALL_EXPR_ARG (exp
, 2);
30479 tree arg3
= CALL_EXPR_ARG (exp
, 3);
30480 tree arg4
= CALL_EXPR_ARG (exp
, 4);
30481 rtx scratch0
, scratch1
;
30482 rtx op0
= expand_normal (arg0
);
30483 rtx op1
= expand_normal (arg1
);
30484 rtx op2
= expand_normal (arg2
);
30485 rtx op3
= expand_normal (arg3
);
30486 rtx op4
= expand_normal (arg4
);
30487 enum machine_mode tmode0
, tmode1
, modev2
, modei3
, modev4
, modei5
, modeimm
;
30489 tmode0
= insn_data
[d
->icode
].operand
[0].mode
;
30490 tmode1
= insn_data
[d
->icode
].operand
[1].mode
;
30491 modev2
= insn_data
[d
->icode
].operand
[2].mode
;
30492 modei3
= insn_data
[d
->icode
].operand
[3].mode
;
30493 modev4
= insn_data
[d
->icode
].operand
[4].mode
;
30494 modei5
= insn_data
[d
->icode
].operand
[5].mode
;
30495 modeimm
= insn_data
[d
->icode
].operand
[6].mode
;
30497 if (VECTOR_MODE_P (modev2
))
30498 op0
= safe_vector_operand (op0
, modev2
);
30499 if (VECTOR_MODE_P (modev4
))
30500 op2
= safe_vector_operand (op2
, modev4
);
30502 if (!insn_data
[d
->icode
].operand
[2].predicate (op0
, modev2
))
30503 op0
= copy_to_mode_reg (modev2
, op0
);
30504 if (!insn_data
[d
->icode
].operand
[3].predicate (op1
, modei3
))
30505 op1
= copy_to_mode_reg (modei3
, op1
);
30506 if ((optimize
&& !register_operand (op2
, modev4
))
30507 || !insn_data
[d
->icode
].operand
[4].predicate (op2
, modev4
))
30508 op2
= copy_to_mode_reg (modev4
, op2
);
30509 if (!insn_data
[d
->icode
].operand
[5].predicate (op3
, modei5
))
30510 op3
= copy_to_mode_reg (modei5
, op3
);
30512 if (!insn_data
[d
->icode
].operand
[6].predicate (op4
, modeimm
))
30514 error ("the fifth argument must be an 8-bit immediate");
30518 if (d
->code
== IX86_BUILTIN_PCMPESTRI128
)
30520 if (optimize
|| !target
30521 || GET_MODE (target
) != tmode0
30522 || !insn_data
[d
->icode
].operand
[0].predicate (target
, tmode0
))
30523 target
= gen_reg_rtx (tmode0
);
30525 scratch1
= gen_reg_rtx (tmode1
);
30527 pat
= GEN_FCN (d
->icode
) (target
, scratch1
, op0
, op1
, op2
, op3
, op4
);
30529 else if (d
->code
== IX86_BUILTIN_PCMPESTRM128
)
30531 if (optimize
|| !target
30532 || GET_MODE (target
) != tmode1
30533 || !insn_data
[d
->icode
].operand
[1].predicate (target
, tmode1
))
30534 target
= gen_reg_rtx (tmode1
);
30536 scratch0
= gen_reg_rtx (tmode0
);
30538 pat
= GEN_FCN (d
->icode
) (scratch0
, target
, op0
, op1
, op2
, op3
, op4
);
30542 gcc_assert (d
->flag
);
30544 scratch0
= gen_reg_rtx (tmode0
);
30545 scratch1
= gen_reg_rtx (tmode1
);
30547 pat
= GEN_FCN (d
->icode
) (scratch0
, scratch1
, op0
, op1
, op2
, op3
, op4
);
30557 target
= gen_reg_rtx (SImode
);
30558 emit_move_insn (target
, const0_rtx
);
30559 target
= gen_rtx_SUBREG (QImode
, target
, 0);
30562 (gen_rtx_SET (VOIDmode
, gen_rtx_STRICT_LOW_PART (VOIDmode
, target
),
30563 gen_rtx_fmt_ee (EQ
, QImode
,
30564 gen_rtx_REG ((enum machine_mode
) d
->flag
,
30567 return SUBREG_REG (target
);
30574 /* Subroutine of ix86_expand_builtin to take care of pcmpistr[im] insns. */
30577 ix86_expand_sse_pcmpistr (const struct builtin_description
*d
,
30578 tree exp
, rtx target
)
30581 tree arg0
= CALL_EXPR_ARG (exp
, 0);
30582 tree arg1
= CALL_EXPR_ARG (exp
, 1);
30583 tree arg2
= CALL_EXPR_ARG (exp
, 2);
30584 rtx scratch0
, scratch1
;
30585 rtx op0
= expand_normal (arg0
);
30586 rtx op1
= expand_normal (arg1
);
30587 rtx op2
= expand_normal (arg2
);
30588 enum machine_mode tmode0
, tmode1
, modev2
, modev3
, modeimm
;
30590 tmode0
= insn_data
[d
->icode
].operand
[0].mode
;
30591 tmode1
= insn_data
[d
->icode
].operand
[1].mode
;
30592 modev2
= insn_data
[d
->icode
].operand
[2].mode
;
30593 modev3
= insn_data
[d
->icode
].operand
[3].mode
;
30594 modeimm
= insn_data
[d
->icode
].operand
[4].mode
;
30596 if (VECTOR_MODE_P (modev2
))
30597 op0
= safe_vector_operand (op0
, modev2
);
30598 if (VECTOR_MODE_P (modev3
))
30599 op1
= safe_vector_operand (op1
, modev3
);
30601 if (!insn_data
[d
->icode
].operand
[2].predicate (op0
, modev2
))
30602 op0
= copy_to_mode_reg (modev2
, op0
);
30603 if ((optimize
&& !register_operand (op1
, modev3
))
30604 || !insn_data
[d
->icode
].operand
[3].predicate (op1
, modev3
))
30605 op1
= copy_to_mode_reg (modev3
, op1
);
30607 if (!insn_data
[d
->icode
].operand
[4].predicate (op2
, modeimm
))
30609 error ("the third argument must be an 8-bit immediate");
30613 if (d
->code
== IX86_BUILTIN_PCMPISTRI128
)
30615 if (optimize
|| !target
30616 || GET_MODE (target
) != tmode0
30617 || !insn_data
[d
->icode
].operand
[0].predicate (target
, tmode0
))
30618 target
= gen_reg_rtx (tmode0
);
30620 scratch1
= gen_reg_rtx (tmode1
);
30622 pat
= GEN_FCN (d
->icode
) (target
, scratch1
, op0
, op1
, op2
);
30624 else if (d
->code
== IX86_BUILTIN_PCMPISTRM128
)
30626 if (optimize
|| !target
30627 || GET_MODE (target
) != tmode1
30628 || !insn_data
[d
->icode
].operand
[1].predicate (target
, tmode1
))
30629 target
= gen_reg_rtx (tmode1
);
30631 scratch0
= gen_reg_rtx (tmode0
);
30633 pat
= GEN_FCN (d
->icode
) (scratch0
, target
, op0
, op1
, op2
);
30637 gcc_assert (d
->flag
);
30639 scratch0
= gen_reg_rtx (tmode0
);
30640 scratch1
= gen_reg_rtx (tmode1
);
30642 pat
= GEN_FCN (d
->icode
) (scratch0
, scratch1
, op0
, op1
, op2
);
30652 target
= gen_reg_rtx (SImode
);
30653 emit_move_insn (target
, const0_rtx
);
30654 target
= gen_rtx_SUBREG (QImode
, target
, 0);
30657 (gen_rtx_SET (VOIDmode
, gen_rtx_STRICT_LOW_PART (VOIDmode
, target
),
30658 gen_rtx_fmt_ee (EQ
, QImode
,
30659 gen_rtx_REG ((enum machine_mode
) d
->flag
,
30662 return SUBREG_REG (target
);
30668 /* Subroutine of ix86_expand_builtin to take care of insns with
30669 variable number of operands. */
30672 ix86_expand_args_builtin (const struct builtin_description
*d
,
30673 tree exp
, rtx target
)
30675 rtx pat
, real_target
;
30676 unsigned int i
, nargs
;
30677 unsigned int nargs_constant
= 0;
30678 int num_memory
= 0;
30682 enum machine_mode mode
;
30684 bool last_arg_count
= false;
30685 enum insn_code icode
= d
->icode
;
30686 const struct insn_data_d
*insn_p
= &insn_data
[icode
];
30687 enum machine_mode tmode
= insn_p
->operand
[0].mode
;
30688 enum machine_mode rmode
= VOIDmode
;
30690 enum rtx_code comparison
= d
->comparison
;
30692 switch ((enum ix86_builtin_func_type
) d
->flag
)
30694 case V2DF_FTYPE_V2DF_ROUND
:
30695 case V4DF_FTYPE_V4DF_ROUND
:
30696 case V4SF_FTYPE_V4SF_ROUND
:
30697 case V8SF_FTYPE_V8SF_ROUND
:
30698 case V4SI_FTYPE_V4SF_ROUND
:
30699 case V8SI_FTYPE_V8SF_ROUND
:
30700 return ix86_expand_sse_round (d
, exp
, target
);
30701 case V4SI_FTYPE_V2DF_V2DF_ROUND
:
30702 case V8SI_FTYPE_V4DF_V4DF_ROUND
:
30703 return ix86_expand_sse_round_vec_pack_sfix (d
, exp
, target
);
30704 case INT_FTYPE_V8SF_V8SF_PTEST
:
30705 case INT_FTYPE_V4DI_V4DI_PTEST
:
30706 case INT_FTYPE_V4DF_V4DF_PTEST
:
30707 case INT_FTYPE_V4SF_V4SF_PTEST
:
30708 case INT_FTYPE_V2DI_V2DI_PTEST
:
30709 case INT_FTYPE_V2DF_V2DF_PTEST
:
30710 return ix86_expand_sse_ptest (d
, exp
, target
);
30711 case FLOAT128_FTYPE_FLOAT128
:
30712 case FLOAT_FTYPE_FLOAT
:
30713 case INT_FTYPE_INT
:
30714 case UINT64_FTYPE_INT
:
30715 case UINT16_FTYPE_UINT16
:
30716 case INT64_FTYPE_INT64
:
30717 case INT64_FTYPE_V4SF
:
30718 case INT64_FTYPE_V2DF
:
30719 case INT_FTYPE_V16QI
:
30720 case INT_FTYPE_V8QI
:
30721 case INT_FTYPE_V8SF
:
30722 case INT_FTYPE_V4DF
:
30723 case INT_FTYPE_V4SF
:
30724 case INT_FTYPE_V2DF
:
30725 case INT_FTYPE_V32QI
:
30726 case V16QI_FTYPE_V16QI
:
30727 case V8SI_FTYPE_V8SF
:
30728 case V8SI_FTYPE_V4SI
:
30729 case V8HI_FTYPE_V8HI
:
30730 case V8HI_FTYPE_V16QI
:
30731 case V8QI_FTYPE_V8QI
:
30732 case V8SF_FTYPE_V8SF
:
30733 case V8SF_FTYPE_V8SI
:
30734 case V8SF_FTYPE_V4SF
:
30735 case V8SF_FTYPE_V8HI
:
30736 case V4SI_FTYPE_V4SI
:
30737 case V4SI_FTYPE_V16QI
:
30738 case V4SI_FTYPE_V4SF
:
30739 case V4SI_FTYPE_V8SI
:
30740 case V4SI_FTYPE_V8HI
:
30741 case V4SI_FTYPE_V4DF
:
30742 case V4SI_FTYPE_V2DF
:
30743 case V4HI_FTYPE_V4HI
:
30744 case V4DF_FTYPE_V4DF
:
30745 case V4DF_FTYPE_V4SI
:
30746 case V4DF_FTYPE_V4SF
:
30747 case V4DF_FTYPE_V2DF
:
30748 case V4SF_FTYPE_V4SF
:
30749 case V4SF_FTYPE_V4SI
:
30750 case V4SF_FTYPE_V8SF
:
30751 case V4SF_FTYPE_V4DF
:
30752 case V4SF_FTYPE_V8HI
:
30753 case V4SF_FTYPE_V2DF
:
30754 case V2DI_FTYPE_V2DI
:
30755 case V2DI_FTYPE_V16QI
:
30756 case V2DI_FTYPE_V8HI
:
30757 case V2DI_FTYPE_V4SI
:
30758 case V2DF_FTYPE_V2DF
:
30759 case V2DF_FTYPE_V4SI
:
30760 case V2DF_FTYPE_V4DF
:
30761 case V2DF_FTYPE_V4SF
:
30762 case V2DF_FTYPE_V2SI
:
30763 case V2SI_FTYPE_V2SI
:
30764 case V2SI_FTYPE_V4SF
:
30765 case V2SI_FTYPE_V2SF
:
30766 case V2SI_FTYPE_V2DF
:
30767 case V2SF_FTYPE_V2SF
:
30768 case V2SF_FTYPE_V2SI
:
30769 case V32QI_FTYPE_V32QI
:
30770 case V32QI_FTYPE_V16QI
:
30771 case V16HI_FTYPE_V16HI
:
30772 case V16HI_FTYPE_V8HI
:
30773 case V8SI_FTYPE_V8SI
:
30774 case V16HI_FTYPE_V16QI
:
30775 case V8SI_FTYPE_V16QI
:
30776 case V4DI_FTYPE_V16QI
:
30777 case V8SI_FTYPE_V8HI
:
30778 case V4DI_FTYPE_V8HI
:
30779 case V4DI_FTYPE_V4SI
:
30780 case V4DI_FTYPE_V2DI
:
30783 case V4SF_FTYPE_V4SF_VEC_MERGE
:
30784 case V2DF_FTYPE_V2DF_VEC_MERGE
:
30785 return ix86_expand_unop_vec_merge_builtin (icode
, exp
, target
);
30786 case FLOAT128_FTYPE_FLOAT128_FLOAT128
:
30787 case V16QI_FTYPE_V16QI_V16QI
:
30788 case V16QI_FTYPE_V8HI_V8HI
:
30789 case V8QI_FTYPE_V8QI_V8QI
:
30790 case V8QI_FTYPE_V4HI_V4HI
:
30791 case V8HI_FTYPE_V8HI_V8HI
:
30792 case V8HI_FTYPE_V16QI_V16QI
:
30793 case V8HI_FTYPE_V4SI_V4SI
:
30794 case V8SF_FTYPE_V8SF_V8SF
:
30795 case V8SF_FTYPE_V8SF_V8SI
:
30796 case V4SI_FTYPE_V4SI_V4SI
:
30797 case V4SI_FTYPE_V8HI_V8HI
:
30798 case V4SI_FTYPE_V4SF_V4SF
:
30799 case V4SI_FTYPE_V2DF_V2DF
:
30800 case V4HI_FTYPE_V4HI_V4HI
:
30801 case V4HI_FTYPE_V8QI_V8QI
:
30802 case V4HI_FTYPE_V2SI_V2SI
:
30803 case V4DF_FTYPE_V4DF_V4DF
:
30804 case V4DF_FTYPE_V4DF_V4DI
:
30805 case V4SF_FTYPE_V4SF_V4SF
:
30806 case V4SF_FTYPE_V4SF_V4SI
:
30807 case V4SF_FTYPE_V4SF_V2SI
:
30808 case V4SF_FTYPE_V4SF_V2DF
:
30809 case V4SF_FTYPE_V4SF_DI
:
30810 case V4SF_FTYPE_V4SF_SI
:
30811 case V2DI_FTYPE_V2DI_V2DI
:
30812 case V2DI_FTYPE_V16QI_V16QI
:
30813 case V2DI_FTYPE_V4SI_V4SI
:
30814 case V2UDI_FTYPE_V4USI_V4USI
:
30815 case V2DI_FTYPE_V2DI_V16QI
:
30816 case V2DI_FTYPE_V2DF_V2DF
:
30817 case V2SI_FTYPE_V2SI_V2SI
:
30818 case V2SI_FTYPE_V4HI_V4HI
:
30819 case V2SI_FTYPE_V2SF_V2SF
:
30820 case V2DF_FTYPE_V2DF_V2DF
:
30821 case V2DF_FTYPE_V2DF_V4SF
:
30822 case V2DF_FTYPE_V2DF_V2DI
:
30823 case V2DF_FTYPE_V2DF_DI
:
30824 case V2DF_FTYPE_V2DF_SI
:
30825 case V2SF_FTYPE_V2SF_V2SF
:
30826 case V1DI_FTYPE_V1DI_V1DI
:
30827 case V1DI_FTYPE_V8QI_V8QI
:
30828 case V1DI_FTYPE_V2SI_V2SI
:
30829 case V32QI_FTYPE_V16HI_V16HI
:
30830 case V16HI_FTYPE_V8SI_V8SI
:
30831 case V32QI_FTYPE_V32QI_V32QI
:
30832 case V16HI_FTYPE_V32QI_V32QI
:
30833 case V16HI_FTYPE_V16HI_V16HI
:
30834 case V8SI_FTYPE_V4DF_V4DF
:
30835 case V8SI_FTYPE_V8SI_V8SI
:
30836 case V8SI_FTYPE_V16HI_V16HI
:
30837 case V4DI_FTYPE_V4DI_V4DI
:
30838 case V4DI_FTYPE_V8SI_V8SI
:
30839 case V4UDI_FTYPE_V8USI_V8USI
:
30840 if (comparison
== UNKNOWN
)
30841 return ix86_expand_binop_builtin (icode
, exp
, target
);
30844 case V4SF_FTYPE_V4SF_V4SF_SWAP
:
30845 case V2DF_FTYPE_V2DF_V2DF_SWAP
:
30846 gcc_assert (comparison
!= UNKNOWN
);
30850 case V16HI_FTYPE_V16HI_V8HI_COUNT
:
30851 case V16HI_FTYPE_V16HI_SI_COUNT
:
30852 case V8SI_FTYPE_V8SI_V4SI_COUNT
:
30853 case V8SI_FTYPE_V8SI_SI_COUNT
:
30854 case V4DI_FTYPE_V4DI_V2DI_COUNT
:
30855 case V4DI_FTYPE_V4DI_INT_COUNT
:
30856 case V8HI_FTYPE_V8HI_V8HI_COUNT
:
30857 case V8HI_FTYPE_V8HI_SI_COUNT
:
30858 case V4SI_FTYPE_V4SI_V4SI_COUNT
:
30859 case V4SI_FTYPE_V4SI_SI_COUNT
:
30860 case V4HI_FTYPE_V4HI_V4HI_COUNT
:
30861 case V4HI_FTYPE_V4HI_SI_COUNT
:
30862 case V2DI_FTYPE_V2DI_V2DI_COUNT
:
30863 case V2DI_FTYPE_V2DI_SI_COUNT
:
30864 case V2SI_FTYPE_V2SI_V2SI_COUNT
:
30865 case V2SI_FTYPE_V2SI_SI_COUNT
:
30866 case V1DI_FTYPE_V1DI_V1DI_COUNT
:
30867 case V1DI_FTYPE_V1DI_SI_COUNT
:
30869 last_arg_count
= true;
30871 case UINT64_FTYPE_UINT64_UINT64
:
30872 case UINT_FTYPE_UINT_UINT
:
30873 case UINT_FTYPE_UINT_USHORT
:
30874 case UINT_FTYPE_UINT_UCHAR
:
30875 case UINT16_FTYPE_UINT16_INT
:
30876 case UINT8_FTYPE_UINT8_INT
:
30879 case V2DI_FTYPE_V2DI_INT_CONVERT
:
30882 nargs_constant
= 1;
30884 case V4DI_FTYPE_V4DI_INT_CONVERT
:
30887 nargs_constant
= 1;
30889 case V8HI_FTYPE_V8HI_INT
:
30890 case V8HI_FTYPE_V8SF_INT
:
30891 case V8HI_FTYPE_V4SF_INT
:
30892 case V8SF_FTYPE_V8SF_INT
:
30893 case V4SI_FTYPE_V4SI_INT
:
30894 case V4SI_FTYPE_V8SI_INT
:
30895 case V4HI_FTYPE_V4HI_INT
:
30896 case V4DF_FTYPE_V4DF_INT
:
30897 case V4SF_FTYPE_V4SF_INT
:
30898 case V4SF_FTYPE_V8SF_INT
:
30899 case V2DI_FTYPE_V2DI_INT
:
30900 case V2DF_FTYPE_V2DF_INT
:
30901 case V2DF_FTYPE_V4DF_INT
:
30902 case V16HI_FTYPE_V16HI_INT
:
30903 case V8SI_FTYPE_V8SI_INT
:
30904 case V4DI_FTYPE_V4DI_INT
:
30905 case V2DI_FTYPE_V4DI_INT
:
30907 nargs_constant
= 1;
30909 case V16QI_FTYPE_V16QI_V16QI_V16QI
:
30910 case V8SF_FTYPE_V8SF_V8SF_V8SF
:
30911 case V4DF_FTYPE_V4DF_V4DF_V4DF
:
30912 case V4SF_FTYPE_V4SF_V4SF_V4SF
:
30913 case V2DF_FTYPE_V2DF_V2DF_V2DF
:
30914 case V32QI_FTYPE_V32QI_V32QI_V32QI
:
30917 case V32QI_FTYPE_V32QI_V32QI_INT
:
30918 case V16HI_FTYPE_V16HI_V16HI_INT
:
30919 case V16QI_FTYPE_V16QI_V16QI_INT
:
30920 case V4DI_FTYPE_V4DI_V4DI_INT
:
30921 case V8HI_FTYPE_V8HI_V8HI_INT
:
30922 case V8SI_FTYPE_V8SI_V8SI_INT
:
30923 case V8SI_FTYPE_V8SI_V4SI_INT
:
30924 case V8SF_FTYPE_V8SF_V8SF_INT
:
30925 case V8SF_FTYPE_V8SF_V4SF_INT
:
30926 case V4SI_FTYPE_V4SI_V4SI_INT
:
30927 case V4DF_FTYPE_V4DF_V4DF_INT
:
30928 case V4DF_FTYPE_V4DF_V2DF_INT
:
30929 case V4SF_FTYPE_V4SF_V4SF_INT
:
30930 case V2DI_FTYPE_V2DI_V2DI_INT
:
30931 case V4DI_FTYPE_V4DI_V2DI_INT
:
30932 case V2DF_FTYPE_V2DF_V2DF_INT
:
30934 nargs_constant
= 1;
30936 case V4DI_FTYPE_V4DI_V4DI_INT_CONVERT
:
30939 nargs_constant
= 1;
30941 case V2DI_FTYPE_V2DI_V2DI_INT_CONVERT
:
30944 nargs_constant
= 1;
30946 case V1DI_FTYPE_V1DI_V1DI_INT_CONVERT
:
30949 nargs_constant
= 1;
30951 case V2DI_FTYPE_V2DI_UINT_UINT
:
30953 nargs_constant
= 2;
30955 case V2DF_FTYPE_V2DF_V2DF_V2DI_INT
:
30956 case V4DF_FTYPE_V4DF_V4DF_V4DI_INT
:
30957 case V4SF_FTYPE_V4SF_V4SF_V4SI_INT
:
30958 case V8SF_FTYPE_V8SF_V8SF_V8SI_INT
:
30960 nargs_constant
= 1;
30962 case V2DI_FTYPE_V2DI_V2DI_UINT_UINT
:
30964 nargs_constant
= 2;
30966 case UCHAR_FTYPE_UCHAR_UINT_UINT_PUNSIGNED
:
30967 case UCHAR_FTYPE_UCHAR_ULONGLONG_ULONGLONG_PULONGLONG
:
30971 gcc_unreachable ();
30974 gcc_assert (nargs
<= ARRAY_SIZE (args
));
30976 if (comparison
!= UNKNOWN
)
30978 gcc_assert (nargs
== 2);
30979 return ix86_expand_sse_compare (d
, exp
, target
, swap
);
30982 if (rmode
== VOIDmode
|| rmode
== tmode
)
30986 || GET_MODE (target
) != tmode
30987 || !insn_p
->operand
[0].predicate (target
, tmode
))
30988 target
= gen_reg_rtx (tmode
);
30989 real_target
= target
;
30993 target
= gen_reg_rtx (rmode
);
30994 real_target
= simplify_gen_subreg (tmode
, target
, rmode
, 0);
30997 for (i
= 0; i
< nargs
; i
++)
30999 tree arg
= CALL_EXPR_ARG (exp
, i
);
31000 rtx op
= expand_normal (arg
);
31001 enum machine_mode mode
= insn_p
->operand
[i
+ 1].mode
;
31002 bool match
= insn_p
->operand
[i
+ 1].predicate (op
, mode
);
31004 if (last_arg_count
&& (i
+ 1) == nargs
)
31006 /* SIMD shift insns take either an 8-bit immediate or
31007 register as count. But builtin functions take int as
31008 count. If count doesn't match, we put it in register. */
31011 op
= simplify_gen_subreg (SImode
, op
, GET_MODE (op
), 0);
31012 if (!insn_p
->operand
[i
+ 1].predicate (op
, mode
))
31013 op
= copy_to_reg (op
);
31016 else if ((nargs
- i
) <= nargs_constant
)
31021 case CODE_FOR_avx2_inserti128
:
31022 case CODE_FOR_avx2_extracti128
:
31023 error ("the last argument must be an 1-bit immediate");
31026 case CODE_FOR_sse4_1_roundsd
:
31027 case CODE_FOR_sse4_1_roundss
:
31029 case CODE_FOR_sse4_1_roundpd
:
31030 case CODE_FOR_sse4_1_roundps
:
31031 case CODE_FOR_avx_roundpd256
:
31032 case CODE_FOR_avx_roundps256
:
31034 case CODE_FOR_sse4_1_roundpd_vec_pack_sfix
:
31035 case CODE_FOR_sse4_1_roundps_sfix
:
31036 case CODE_FOR_avx_roundpd_vec_pack_sfix256
:
31037 case CODE_FOR_avx_roundps_sfix256
:
31039 case CODE_FOR_sse4_1_blendps
:
31040 case CODE_FOR_avx_blendpd256
:
31041 case CODE_FOR_avx_vpermilv4df
:
31042 error ("the last argument must be a 4-bit immediate");
31045 case CODE_FOR_sse4_1_blendpd
:
31046 case CODE_FOR_avx_vpermilv2df
:
31047 case CODE_FOR_xop_vpermil2v2df3
:
31048 case CODE_FOR_xop_vpermil2v4sf3
:
31049 case CODE_FOR_xop_vpermil2v4df3
:
31050 case CODE_FOR_xop_vpermil2v8sf3
:
31051 error ("the last argument must be a 2-bit immediate");
31054 case CODE_FOR_avx_vextractf128v4df
:
31055 case CODE_FOR_avx_vextractf128v8sf
:
31056 case CODE_FOR_avx_vextractf128v8si
:
31057 case CODE_FOR_avx_vinsertf128v4df
:
31058 case CODE_FOR_avx_vinsertf128v8sf
:
31059 case CODE_FOR_avx_vinsertf128v8si
:
31060 error ("the last argument must be a 1-bit immediate");
31063 case CODE_FOR_avx_vmcmpv2df3
:
31064 case CODE_FOR_avx_vmcmpv4sf3
:
31065 case CODE_FOR_avx_cmpv2df3
:
31066 case CODE_FOR_avx_cmpv4sf3
:
31067 case CODE_FOR_avx_cmpv4df3
:
31068 case CODE_FOR_avx_cmpv8sf3
:
31069 error ("the last argument must be a 5-bit immediate");
31073 switch (nargs_constant
)
31076 if ((nargs
- i
) == nargs_constant
)
31078 error ("the next to last argument must be an 8-bit immediate");
31082 error ("the last argument must be an 8-bit immediate");
31085 gcc_unreachable ();
31092 if (VECTOR_MODE_P (mode
))
31093 op
= safe_vector_operand (op
, mode
);
31095 /* If we aren't optimizing, only allow one memory operand to
31097 if (memory_operand (op
, mode
))
31100 if (GET_MODE (op
) == mode
|| GET_MODE (op
) == VOIDmode
)
31102 if (optimize
|| !match
|| num_memory
> 1)
31103 op
= copy_to_mode_reg (mode
, op
);
31107 op
= copy_to_reg (op
);
31108 op
= simplify_gen_subreg (mode
, op
, GET_MODE (op
), 0);
31113 args
[i
].mode
= mode
;
31119 pat
= GEN_FCN (icode
) (real_target
, args
[0].op
);
31122 pat
= GEN_FCN (icode
) (real_target
, args
[0].op
, args
[1].op
);
31125 pat
= GEN_FCN (icode
) (real_target
, args
[0].op
, args
[1].op
,
31129 pat
= GEN_FCN (icode
) (real_target
, args
[0].op
, args
[1].op
,
31130 args
[2].op
, args
[3].op
);
31133 gcc_unreachable ();
31143 /* Subroutine of ix86_expand_builtin to take care of special insns
31144 with variable number of operands. */
31147 ix86_expand_special_args_builtin (const struct builtin_description
*d
,
31148 tree exp
, rtx target
)
31152 unsigned int i
, nargs
, arg_adjust
, memory
;
31156 enum machine_mode mode
;
31158 enum insn_code icode
= d
->icode
;
31159 bool last_arg_constant
= false;
31160 const struct insn_data_d
*insn_p
= &insn_data
[icode
];
31161 enum machine_mode tmode
= insn_p
->operand
[0].mode
;
31162 enum { load
, store
} klass
;
31164 switch ((enum ix86_builtin_func_type
) d
->flag
)
31166 case VOID_FTYPE_VOID
:
31167 emit_insn (GEN_FCN (icode
) (target
));
31169 case VOID_FTYPE_UINT64
:
31170 case VOID_FTYPE_UNSIGNED
:
31176 case INT_FTYPE_VOID
:
31177 case UINT64_FTYPE_VOID
:
31178 case UNSIGNED_FTYPE_VOID
:
31183 case UINT64_FTYPE_PUNSIGNED
:
31184 case V2DI_FTYPE_PV2DI
:
31185 case V4DI_FTYPE_PV4DI
:
31186 case V32QI_FTYPE_PCCHAR
:
31187 case V16QI_FTYPE_PCCHAR
:
31188 case V8SF_FTYPE_PCV4SF
:
31189 case V8SF_FTYPE_PCFLOAT
:
31190 case V4SF_FTYPE_PCFLOAT
:
31191 case V4DF_FTYPE_PCV2DF
:
31192 case V4DF_FTYPE_PCDOUBLE
:
31193 case V2DF_FTYPE_PCDOUBLE
:
31194 case VOID_FTYPE_PVOID
:
31199 case VOID_FTYPE_PV2SF_V4SF
:
31200 case VOID_FTYPE_PV4DI_V4DI
:
31201 case VOID_FTYPE_PV2DI_V2DI
:
31202 case VOID_FTYPE_PCHAR_V32QI
:
31203 case VOID_FTYPE_PCHAR_V16QI
:
31204 case VOID_FTYPE_PFLOAT_V8SF
:
31205 case VOID_FTYPE_PFLOAT_V4SF
:
31206 case VOID_FTYPE_PDOUBLE_V4DF
:
31207 case VOID_FTYPE_PDOUBLE_V2DF
:
31208 case VOID_FTYPE_PLONGLONG_LONGLONG
:
31209 case VOID_FTYPE_PULONGLONG_ULONGLONG
:
31210 case VOID_FTYPE_PINT_INT
:
31213 /* Reserve memory operand for target. */
31214 memory
= ARRAY_SIZE (args
);
31216 case V4SF_FTYPE_V4SF_PCV2SF
:
31217 case V2DF_FTYPE_V2DF_PCDOUBLE
:
31222 case V8SF_FTYPE_PCV8SF_V8SI
:
31223 case V4DF_FTYPE_PCV4DF_V4DI
:
31224 case V4SF_FTYPE_PCV4SF_V4SI
:
31225 case V2DF_FTYPE_PCV2DF_V2DI
:
31226 case V8SI_FTYPE_PCV8SI_V8SI
:
31227 case V4DI_FTYPE_PCV4DI_V4DI
:
31228 case V4SI_FTYPE_PCV4SI_V4SI
:
31229 case V2DI_FTYPE_PCV2DI_V2DI
:
31234 case VOID_FTYPE_PV8SF_V8SI_V8SF
:
31235 case VOID_FTYPE_PV4DF_V4DI_V4DF
:
31236 case VOID_FTYPE_PV4SF_V4SI_V4SF
:
31237 case VOID_FTYPE_PV2DF_V2DI_V2DF
:
31238 case VOID_FTYPE_PV8SI_V8SI_V8SI
:
31239 case VOID_FTYPE_PV4DI_V4DI_V4DI
:
31240 case VOID_FTYPE_PV4SI_V4SI_V4SI
:
31241 case VOID_FTYPE_PV2DI_V2DI_V2DI
:
31244 /* Reserve memory operand for target. */
31245 memory
= ARRAY_SIZE (args
);
31247 case VOID_FTYPE_UINT_UINT_UINT
:
31248 case VOID_FTYPE_UINT64_UINT_UINT
:
31249 case UCHAR_FTYPE_UINT_UINT_UINT
:
31250 case UCHAR_FTYPE_UINT64_UINT_UINT
:
31253 memory
= ARRAY_SIZE (args
);
31254 last_arg_constant
= true;
31257 gcc_unreachable ();
31260 gcc_assert (nargs
<= ARRAY_SIZE (args
));
31262 if (klass
== store
)
31264 arg
= CALL_EXPR_ARG (exp
, 0);
31265 op
= expand_normal (arg
);
31266 gcc_assert (target
== 0);
31269 if (GET_MODE (op
) != Pmode
)
31270 op
= convert_to_mode (Pmode
, op
, 1);
31271 target
= gen_rtx_MEM (tmode
, force_reg (Pmode
, op
));
31274 target
= force_reg (tmode
, op
);
31282 || !register_operand (target
, tmode
)
31283 || GET_MODE (target
) != tmode
)
31284 target
= gen_reg_rtx (tmode
);
31287 for (i
= 0; i
< nargs
; i
++)
31289 enum machine_mode mode
= insn_p
->operand
[i
+ 1].mode
;
31292 arg
= CALL_EXPR_ARG (exp
, i
+ arg_adjust
);
31293 op
= expand_normal (arg
);
31294 match
= insn_p
->operand
[i
+ 1].predicate (op
, mode
);
31296 if (last_arg_constant
&& (i
+ 1) == nargs
)
31300 if (icode
== CODE_FOR_lwp_lwpvalsi3
31301 || icode
== CODE_FOR_lwp_lwpinssi3
31302 || icode
== CODE_FOR_lwp_lwpvaldi3
31303 || icode
== CODE_FOR_lwp_lwpinsdi3
)
31304 error ("the last argument must be a 32-bit immediate");
31306 error ("the last argument must be an 8-bit immediate");
31314 /* This must be the memory operand. */
31315 if (GET_MODE (op
) != Pmode
)
31316 op
= convert_to_mode (Pmode
, op
, 1);
31317 op
= gen_rtx_MEM (mode
, force_reg (Pmode
, op
));
31318 gcc_assert (GET_MODE (op
) == mode
31319 || GET_MODE (op
) == VOIDmode
);
31323 /* This must be register. */
31324 if (VECTOR_MODE_P (mode
))
31325 op
= safe_vector_operand (op
, mode
);
31327 gcc_assert (GET_MODE (op
) == mode
31328 || GET_MODE (op
) == VOIDmode
);
31329 op
= copy_to_mode_reg (mode
, op
);
31334 args
[i
].mode
= mode
;
31340 pat
= GEN_FCN (icode
) (target
);
31343 pat
= GEN_FCN (icode
) (target
, args
[0].op
);
31346 pat
= GEN_FCN (icode
) (target
, args
[0].op
, args
[1].op
);
31349 pat
= GEN_FCN (icode
) (target
, args
[0].op
, args
[1].op
, args
[2].op
);
31352 gcc_unreachable ();
31358 return klass
== store
? 0 : target
;
31361 /* Return the integer constant in ARG. Constrain it to be in the range
31362 of the subparts of VEC_TYPE; issue an error if not. */
31365 get_element_number (tree vec_type
, tree arg
)
31367 unsigned HOST_WIDE_INT elt
, max
= TYPE_VECTOR_SUBPARTS (vec_type
) - 1;
31369 if (!host_integerp (arg
, 1)
31370 || (elt
= tree_low_cst (arg
, 1), elt
> max
))
31372 error ("selector must be an integer constant in the range 0..%wi", max
);
31379 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
31380 ix86_expand_vector_init. We DO have language-level syntax for this, in
31381 the form of (type){ init-list }. Except that since we can't place emms
31382 instructions from inside the compiler, we can't allow the use of MMX
31383 registers unless the user explicitly asks for it. So we do *not* define
31384 vec_set/vec_extract/vec_init patterns for MMX modes in mmx.md. Instead
31385 we have builtins invoked by mmintrin.h that gives us license to emit
31386 these sorts of instructions. */
31389 ix86_expand_vec_init_builtin (tree type
, tree exp
, rtx target
)
31391 enum machine_mode tmode
= TYPE_MODE (type
);
31392 enum machine_mode inner_mode
= GET_MODE_INNER (tmode
);
31393 int i
, n_elt
= GET_MODE_NUNITS (tmode
);
31394 rtvec v
= rtvec_alloc (n_elt
);
31396 gcc_assert (VECTOR_MODE_P (tmode
));
31397 gcc_assert (call_expr_nargs (exp
) == n_elt
);
31399 for (i
= 0; i
< n_elt
; ++i
)
31401 rtx x
= expand_normal (CALL_EXPR_ARG (exp
, i
));
31402 RTVEC_ELT (v
, i
) = gen_lowpart (inner_mode
, x
);
31405 if (!target
|| !register_operand (target
, tmode
))
31406 target
= gen_reg_rtx (tmode
);
31408 ix86_expand_vector_init (true, target
, gen_rtx_PARALLEL (tmode
, v
));
31412 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
31413 ix86_expand_vector_extract. They would be redundant (for non-MMX) if we
31414 had a language-level syntax for referencing vector elements. */
31417 ix86_expand_vec_ext_builtin (tree exp
, rtx target
)
31419 enum machine_mode tmode
, mode0
;
31424 arg0
= CALL_EXPR_ARG (exp
, 0);
31425 arg1
= CALL_EXPR_ARG (exp
, 1);
31427 op0
= expand_normal (arg0
);
31428 elt
= get_element_number (TREE_TYPE (arg0
), arg1
);
31430 tmode
= TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0
)));
31431 mode0
= TYPE_MODE (TREE_TYPE (arg0
));
31432 gcc_assert (VECTOR_MODE_P (mode0
));
31434 op0
= force_reg (mode0
, op0
);
31436 if (optimize
|| !target
|| !register_operand (target
, tmode
))
31437 target
= gen_reg_rtx (tmode
);
31439 ix86_expand_vector_extract (true, target
, op0
, elt
);
31444 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
31445 ix86_expand_vector_set. They would be redundant (for non-MMX) if we had
31446 a language-level syntax for referencing vector elements. */
31449 ix86_expand_vec_set_builtin (tree exp
)
31451 enum machine_mode tmode
, mode1
;
31452 tree arg0
, arg1
, arg2
;
31454 rtx op0
, op1
, target
;
31456 arg0
= CALL_EXPR_ARG (exp
, 0);
31457 arg1
= CALL_EXPR_ARG (exp
, 1);
31458 arg2
= CALL_EXPR_ARG (exp
, 2);
31460 tmode
= TYPE_MODE (TREE_TYPE (arg0
));
31461 mode1
= TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0
)));
31462 gcc_assert (VECTOR_MODE_P (tmode
));
31464 op0
= expand_expr (arg0
, NULL_RTX
, tmode
, EXPAND_NORMAL
);
31465 op1
= expand_expr (arg1
, NULL_RTX
, mode1
, EXPAND_NORMAL
);
31466 elt
= get_element_number (TREE_TYPE (arg0
), arg2
);
31468 if (GET_MODE (op1
) != mode1
&& GET_MODE (op1
) != VOIDmode
)
31469 op1
= convert_modes (mode1
, GET_MODE (op1
), op1
, true);
31471 op0
= force_reg (tmode
, op0
);
31472 op1
= force_reg (mode1
, op1
);
31474 /* OP0 is the source of these builtin functions and shouldn't be
31475 modified. Create a copy, use it and return it as target. */
31476 target
= gen_reg_rtx (tmode
);
31477 emit_move_insn (target
, op0
);
31478 ix86_expand_vector_set (true, target
, op1
, elt
);
31483 /* Expand an expression EXP that calls a built-in function,
31484 with result going to TARGET if that's convenient
31485 (and in mode MODE if that's convenient).
31486 SUBTARGET may be used as the target for computing one of EXP's operands.
31487 IGNORE is nonzero if the value is to be ignored. */
31490 ix86_expand_builtin (tree exp
, rtx target
, rtx subtarget ATTRIBUTE_UNUSED
,
31491 enum machine_mode mode ATTRIBUTE_UNUSED
,
31492 int ignore ATTRIBUTE_UNUSED
)
31494 const struct builtin_description
*d
;
31496 enum insn_code icode
;
31497 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
31498 tree arg0
, arg1
, arg2
, arg3
, arg4
;
31499 rtx op0
, op1
, op2
, op3
, op4
, pat
, insn
;
31500 enum machine_mode mode0
, mode1
, mode2
, mode3
, mode4
;
31501 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
31503 /* For CPU builtins that can be folded, fold first and expand the fold. */
31506 case IX86_BUILTIN_CPU_INIT
:
31508 /* Make it call __cpu_indicator_init in libgcc. */
31509 tree call_expr
, fndecl
, type
;
31510 type
= build_function_type_list (integer_type_node
, NULL_TREE
);
31511 fndecl
= build_fn_decl ("__cpu_indicator_init", type
);
31512 call_expr
= build_call_expr (fndecl
, 0);
31513 return expand_expr (call_expr
, target
, mode
, EXPAND_NORMAL
);
31515 case IX86_BUILTIN_CPU_IS
:
31516 case IX86_BUILTIN_CPU_SUPPORTS
:
31518 tree arg0
= CALL_EXPR_ARG (exp
, 0);
31519 tree fold_expr
= fold_builtin_cpu (fndecl
, &arg0
);
31520 gcc_assert (fold_expr
!= NULL_TREE
);
31521 return expand_expr (fold_expr
, target
, mode
, EXPAND_NORMAL
);
31525 /* Determine whether the builtin function is available under the current ISA.
31526 Originally the builtin was not created if it wasn't applicable to the
31527 current ISA based on the command line switches. With function specific
31528 options, we need to check in the context of the function making the call
31529 whether it is supported. */
31530 if (ix86_builtins_isa
[fcode
].isa
31531 && !(ix86_builtins_isa
[fcode
].isa
& ix86_isa_flags
))
31533 char *opts
= ix86_target_string (ix86_builtins_isa
[fcode
].isa
, 0, NULL
,
31534 NULL
, (enum fpmath_unit
) 0, false);
31537 error ("%qE needs unknown isa option", fndecl
);
31540 gcc_assert (opts
!= NULL
);
31541 error ("%qE needs isa option %s", fndecl
, opts
);
31549 case IX86_BUILTIN_MASKMOVQ
:
31550 case IX86_BUILTIN_MASKMOVDQU
:
31551 icode
= (fcode
== IX86_BUILTIN_MASKMOVQ
31552 ? CODE_FOR_mmx_maskmovq
31553 : CODE_FOR_sse2_maskmovdqu
);
31554 /* Note the arg order is different from the operand order. */
31555 arg1
= CALL_EXPR_ARG (exp
, 0);
31556 arg2
= CALL_EXPR_ARG (exp
, 1);
31557 arg0
= CALL_EXPR_ARG (exp
, 2);
31558 op0
= expand_normal (arg0
);
31559 op1
= expand_normal (arg1
);
31560 op2
= expand_normal (arg2
);
31561 mode0
= insn_data
[icode
].operand
[0].mode
;
31562 mode1
= insn_data
[icode
].operand
[1].mode
;
31563 mode2
= insn_data
[icode
].operand
[2].mode
;
31565 if (GET_MODE (op0
) != Pmode
)
31566 op0
= convert_to_mode (Pmode
, op0
, 1);
31567 op0
= gen_rtx_MEM (mode1
, force_reg (Pmode
, op0
));
31569 if (!insn_data
[icode
].operand
[0].predicate (op0
, mode0
))
31570 op0
= copy_to_mode_reg (mode0
, op0
);
31571 if (!insn_data
[icode
].operand
[1].predicate (op1
, mode1
))
31572 op1
= copy_to_mode_reg (mode1
, op1
);
31573 if (!insn_data
[icode
].operand
[2].predicate (op2
, mode2
))
31574 op2
= copy_to_mode_reg (mode2
, op2
);
31575 pat
= GEN_FCN (icode
) (op0
, op1
, op2
);
31581 case IX86_BUILTIN_LDMXCSR
:
31582 op0
= expand_normal (CALL_EXPR_ARG (exp
, 0));
31583 target
= assign_386_stack_local (SImode
, SLOT_TEMP
);
31584 emit_move_insn (target
, op0
);
31585 emit_insn (gen_sse_ldmxcsr (target
));
31588 case IX86_BUILTIN_STMXCSR
:
31589 target
= assign_386_stack_local (SImode
, SLOT_TEMP
);
31590 emit_insn (gen_sse_stmxcsr (target
));
31591 return copy_to_mode_reg (SImode
, target
);
31593 case IX86_BUILTIN_CLFLUSH
:
31594 arg0
= CALL_EXPR_ARG (exp
, 0);
31595 op0
= expand_normal (arg0
);
31596 icode
= CODE_FOR_sse2_clflush
;
31597 if (!insn_data
[icode
].operand
[0].predicate (op0
, Pmode
))
31599 if (GET_MODE (op0
) != Pmode
)
31600 op0
= convert_to_mode (Pmode
, op0
, 1);
31601 op0
= force_reg (Pmode
, op0
);
31604 emit_insn (gen_sse2_clflush (op0
));
31607 case IX86_BUILTIN_MONITOR
:
31608 arg0
= CALL_EXPR_ARG (exp
, 0);
31609 arg1
= CALL_EXPR_ARG (exp
, 1);
31610 arg2
= CALL_EXPR_ARG (exp
, 2);
31611 op0
= expand_normal (arg0
);
31612 op1
= expand_normal (arg1
);
31613 op2
= expand_normal (arg2
);
31616 if (GET_MODE (op0
) != Pmode
)
31617 op0
= convert_to_mode (Pmode
, op0
, 1);
31618 op0
= force_reg (Pmode
, op0
);
31621 op1
= copy_to_mode_reg (SImode
, op1
);
31623 op2
= copy_to_mode_reg (SImode
, op2
);
31624 emit_insn (ix86_gen_monitor (op0
, op1
, op2
));
31627 case IX86_BUILTIN_MWAIT
:
31628 arg0
= CALL_EXPR_ARG (exp
, 0);
31629 arg1
= CALL_EXPR_ARG (exp
, 1);
31630 op0
= expand_normal (arg0
);
31631 op1
= expand_normal (arg1
);
31633 op0
= copy_to_mode_reg (SImode
, op0
);
31635 op1
= copy_to_mode_reg (SImode
, op1
);
31636 emit_insn (gen_sse3_mwait (op0
, op1
));
31639 case IX86_BUILTIN_VEC_INIT_V2SI
:
31640 case IX86_BUILTIN_VEC_INIT_V4HI
:
31641 case IX86_BUILTIN_VEC_INIT_V8QI
:
31642 return ix86_expand_vec_init_builtin (TREE_TYPE (exp
), exp
, target
);
31644 case IX86_BUILTIN_VEC_EXT_V2DF
:
31645 case IX86_BUILTIN_VEC_EXT_V2DI
:
31646 case IX86_BUILTIN_VEC_EXT_V4SF
:
31647 case IX86_BUILTIN_VEC_EXT_V4SI
:
31648 case IX86_BUILTIN_VEC_EXT_V8HI
:
31649 case IX86_BUILTIN_VEC_EXT_V2SI
:
31650 case IX86_BUILTIN_VEC_EXT_V4HI
:
31651 case IX86_BUILTIN_VEC_EXT_V16QI
:
31652 return ix86_expand_vec_ext_builtin (exp
, target
);
31654 case IX86_BUILTIN_VEC_SET_V2DI
:
31655 case IX86_BUILTIN_VEC_SET_V4SF
:
31656 case IX86_BUILTIN_VEC_SET_V4SI
:
31657 case IX86_BUILTIN_VEC_SET_V8HI
:
31658 case IX86_BUILTIN_VEC_SET_V4HI
:
31659 case IX86_BUILTIN_VEC_SET_V16QI
:
31660 return ix86_expand_vec_set_builtin (exp
);
31662 case IX86_BUILTIN_INFQ
:
31663 case IX86_BUILTIN_HUGE_VALQ
:
31665 REAL_VALUE_TYPE inf
;
31669 tmp
= CONST_DOUBLE_FROM_REAL_VALUE (inf
, mode
);
31671 tmp
= validize_mem (force_const_mem (mode
, tmp
));
31674 target
= gen_reg_rtx (mode
);
31676 emit_move_insn (target
, tmp
);
31680 case IX86_BUILTIN_RDPMC
:
31681 case IX86_BUILTIN_RDTSC
:
31682 case IX86_BUILTIN_RDTSCP
:
31684 op0
= gen_reg_rtx (DImode
);
31685 op1
= gen_reg_rtx (DImode
);
31687 if (fcode
== IX86_BUILTIN_RDPMC
)
31689 arg0
= CALL_EXPR_ARG (exp
, 0);
31690 op2
= expand_normal (arg0
);
31691 if (!register_operand (op2
, SImode
))
31692 op2
= copy_to_mode_reg (SImode
, op2
);
31694 insn
= (TARGET_64BIT
31695 ? gen_rdpmc_rex64 (op0
, op1
, op2
)
31696 : gen_rdpmc (op0
, op2
));
31699 else if (fcode
== IX86_BUILTIN_RDTSC
)
31701 insn
= (TARGET_64BIT
31702 ? gen_rdtsc_rex64 (op0
, op1
)
31703 : gen_rdtsc (op0
));
31708 op2
= gen_reg_rtx (SImode
);
31710 insn
= (TARGET_64BIT
31711 ? gen_rdtscp_rex64 (op0
, op1
, op2
)
31712 : gen_rdtscp (op0
, op2
));
31715 arg0
= CALL_EXPR_ARG (exp
, 0);
31716 op4
= expand_normal (arg0
);
31717 if (!address_operand (op4
, VOIDmode
))
31719 op4
= convert_memory_address (Pmode
, op4
);
31720 op4
= copy_addr_to_reg (op4
);
31722 emit_move_insn (gen_rtx_MEM (SImode
, op4
), op2
);
31726 target
= gen_reg_rtx (mode
);
31730 op1
= expand_simple_binop (DImode
, ASHIFT
, op1
, GEN_INT (32),
31731 op1
, 1, OPTAB_DIRECT
);
31732 op0
= expand_simple_binop (DImode
, IOR
, op0
, op1
,
31733 op0
, 1, OPTAB_DIRECT
);
31736 emit_move_insn (target
, op0
);
31739 case IX86_BUILTIN_FXSAVE
:
31740 case IX86_BUILTIN_FXRSTOR
:
31741 case IX86_BUILTIN_FXSAVE64
:
31742 case IX86_BUILTIN_FXRSTOR64
:
31745 case IX86_BUILTIN_FXSAVE
:
31746 icode
= CODE_FOR_fxsave
;
31748 case IX86_BUILTIN_FXRSTOR
:
31749 icode
= CODE_FOR_fxrstor
;
31751 case IX86_BUILTIN_FXSAVE64
:
31752 icode
= CODE_FOR_fxsave64
;
31754 case IX86_BUILTIN_FXRSTOR64
:
31755 icode
= CODE_FOR_fxrstor64
;
31758 gcc_unreachable ();
31761 arg0
= CALL_EXPR_ARG (exp
, 0);
31762 op0
= expand_normal (arg0
);
31764 if (!address_operand (op0
, VOIDmode
))
31766 op0
= convert_memory_address (Pmode
, op0
);
31767 op0
= copy_addr_to_reg (op0
);
31769 op0
= gen_rtx_MEM (BLKmode
, op0
);
31771 pat
= GEN_FCN (icode
) (op0
);
31776 case IX86_BUILTIN_XSAVE
:
31777 case IX86_BUILTIN_XRSTOR
:
31778 case IX86_BUILTIN_XSAVE64
:
31779 case IX86_BUILTIN_XRSTOR64
:
31780 case IX86_BUILTIN_XSAVEOPT
:
31781 case IX86_BUILTIN_XSAVEOPT64
:
31782 arg0
= CALL_EXPR_ARG (exp
, 0);
31783 arg1
= CALL_EXPR_ARG (exp
, 1);
31784 op0
= expand_normal (arg0
);
31785 op1
= expand_normal (arg1
);
31787 if (!address_operand (op0
, VOIDmode
))
31789 op0
= convert_memory_address (Pmode
, op0
);
31790 op0
= copy_addr_to_reg (op0
);
31792 op0
= gen_rtx_MEM (BLKmode
, op0
);
31794 op1
= force_reg (DImode
, op1
);
31798 op2
= expand_simple_binop (DImode
, LSHIFTRT
, op1
, GEN_INT (32),
31799 NULL
, 1, OPTAB_DIRECT
);
31802 case IX86_BUILTIN_XSAVE
:
31803 icode
= CODE_FOR_xsave_rex64
;
31805 case IX86_BUILTIN_XRSTOR
:
31806 icode
= CODE_FOR_xrstor_rex64
;
31808 case IX86_BUILTIN_XSAVE64
:
31809 icode
= CODE_FOR_xsave64
;
31811 case IX86_BUILTIN_XRSTOR64
:
31812 icode
= CODE_FOR_xrstor64
;
31814 case IX86_BUILTIN_XSAVEOPT
:
31815 icode
= CODE_FOR_xsaveopt_rex64
;
31817 case IX86_BUILTIN_XSAVEOPT64
:
31818 icode
= CODE_FOR_xsaveopt64
;
31821 gcc_unreachable ();
31824 op2
= gen_lowpart (SImode
, op2
);
31825 op1
= gen_lowpart (SImode
, op1
);
31826 pat
= GEN_FCN (icode
) (op0
, op1
, op2
);
31832 case IX86_BUILTIN_XSAVE
:
31833 icode
= CODE_FOR_xsave
;
31835 case IX86_BUILTIN_XRSTOR
:
31836 icode
= CODE_FOR_xrstor
;
31838 case IX86_BUILTIN_XSAVEOPT
:
31839 icode
= CODE_FOR_xsaveopt
;
31842 gcc_unreachable ();
31844 pat
= GEN_FCN (icode
) (op0
, op1
);
31851 case IX86_BUILTIN_LLWPCB
:
31852 arg0
= CALL_EXPR_ARG (exp
, 0);
31853 op0
= expand_normal (arg0
);
31854 icode
= CODE_FOR_lwp_llwpcb
;
31855 if (!insn_data
[icode
].operand
[0].predicate (op0
, Pmode
))
31857 if (GET_MODE (op0
) != Pmode
)
31858 op0
= convert_to_mode (Pmode
, op0
, 1);
31859 op0
= force_reg (Pmode
, op0
);
31861 emit_insn (gen_lwp_llwpcb (op0
));
31864 case IX86_BUILTIN_SLWPCB
:
31865 icode
= CODE_FOR_lwp_slwpcb
;
31867 || !insn_data
[icode
].operand
[0].predicate (target
, Pmode
))
31868 target
= gen_reg_rtx (Pmode
);
31869 emit_insn (gen_lwp_slwpcb (target
));
31872 case IX86_BUILTIN_BEXTRI32
:
31873 case IX86_BUILTIN_BEXTRI64
:
31874 arg0
= CALL_EXPR_ARG (exp
, 0);
31875 arg1
= CALL_EXPR_ARG (exp
, 1);
31876 op0
= expand_normal (arg0
);
31877 op1
= expand_normal (arg1
);
31878 icode
= (fcode
== IX86_BUILTIN_BEXTRI32
31879 ? CODE_FOR_tbm_bextri_si
31880 : CODE_FOR_tbm_bextri_di
);
31881 if (!CONST_INT_P (op1
))
31883 error ("last argument must be an immediate");
31888 unsigned char length
= (INTVAL (op1
) >> 8) & 0xFF;
31889 unsigned char lsb_index
= INTVAL (op1
) & 0xFF;
31890 op1
= GEN_INT (length
);
31891 op2
= GEN_INT (lsb_index
);
31892 pat
= GEN_FCN (icode
) (target
, op0
, op1
, op2
);
31898 case IX86_BUILTIN_RDRAND16_STEP
:
31899 icode
= CODE_FOR_rdrandhi_1
;
31903 case IX86_BUILTIN_RDRAND32_STEP
:
31904 icode
= CODE_FOR_rdrandsi_1
;
31908 case IX86_BUILTIN_RDRAND64_STEP
:
31909 icode
= CODE_FOR_rdranddi_1
;
31913 op0
= gen_reg_rtx (mode0
);
31914 emit_insn (GEN_FCN (icode
) (op0
));
31916 arg0
= CALL_EXPR_ARG (exp
, 0);
31917 op1
= expand_normal (arg0
);
31918 if (!address_operand (op1
, VOIDmode
))
31920 op1
= convert_memory_address (Pmode
, op1
);
31921 op1
= copy_addr_to_reg (op1
);
31923 emit_move_insn (gen_rtx_MEM (mode0
, op1
), op0
);
31925 op1
= gen_reg_rtx (SImode
);
31926 emit_move_insn (op1
, CONST1_RTX (SImode
));
31928 /* Emit SImode conditional move. */
31929 if (mode0
== HImode
)
31931 op2
= gen_reg_rtx (SImode
);
31932 emit_insn (gen_zero_extendhisi2 (op2
, op0
));
31934 else if (mode0
== SImode
)
31937 op2
= gen_rtx_SUBREG (SImode
, op0
, 0);
31940 target
= gen_reg_rtx (SImode
);
31942 pat
= gen_rtx_GEU (VOIDmode
, gen_rtx_REG (CCCmode
, FLAGS_REG
),
31944 emit_insn (gen_rtx_SET (VOIDmode
, target
,
31945 gen_rtx_IF_THEN_ELSE (SImode
, pat
, op2
, op1
)));
31948 case IX86_BUILTIN_RDSEED16_STEP
:
31949 icode
= CODE_FOR_rdseedhi_1
;
31953 case IX86_BUILTIN_RDSEED32_STEP
:
31954 icode
= CODE_FOR_rdseedsi_1
;
31958 case IX86_BUILTIN_RDSEED64_STEP
:
31959 icode
= CODE_FOR_rdseeddi_1
;
31963 op0
= gen_reg_rtx (mode0
);
31964 emit_insn (GEN_FCN (icode
) (op0
));
31966 arg0
= CALL_EXPR_ARG (exp
, 0);
31967 op1
= expand_normal (arg0
);
31968 if (!address_operand (op1
, VOIDmode
))
31970 op1
= convert_memory_address (Pmode
, op1
);
31971 op1
= copy_addr_to_reg (op1
);
31973 emit_move_insn (gen_rtx_MEM (mode0
, op1
), op0
);
31975 op2
= gen_reg_rtx (QImode
);
31977 pat
= gen_rtx_LTU (QImode
, gen_rtx_REG (CCCmode
, FLAGS_REG
),
31979 emit_insn (gen_rtx_SET (VOIDmode
, op2
, pat
));
31982 target
= gen_reg_rtx (SImode
);
31984 emit_insn (gen_zero_extendqisi2 (target
, op2
));
31987 case IX86_BUILTIN_ADDCARRYX32
:
31988 icode
= TARGET_ADX
? CODE_FOR_adcxsi3
: CODE_FOR_addsi3_carry
;
31992 case IX86_BUILTIN_ADDCARRYX64
:
31993 icode
= TARGET_ADX
? CODE_FOR_adcxdi3
: CODE_FOR_adddi3_carry
;
31997 arg0
= CALL_EXPR_ARG (exp
, 0); /* unsigned char c_in. */
31998 arg1
= CALL_EXPR_ARG (exp
, 1); /* unsigned int src1. */
31999 arg2
= CALL_EXPR_ARG (exp
, 2); /* unsigned int src2. */
32000 arg3
= CALL_EXPR_ARG (exp
, 3); /* unsigned int *sum_out. */
32002 op0
= gen_reg_rtx (QImode
);
32004 /* Generate CF from input operand. */
32005 op1
= expand_normal (arg0
);
32006 if (GET_MODE (op1
) != QImode
)
32007 op1
= convert_to_mode (QImode
, op1
, 1);
32008 op1
= copy_to_mode_reg (QImode
, op1
);
32009 emit_insn (gen_addqi3_cc (op0
, op1
, constm1_rtx
));
32011 /* Gen ADCX instruction to compute X+Y+CF. */
32012 op2
= expand_normal (arg1
);
32013 op3
= expand_normal (arg2
);
32016 op2
= copy_to_mode_reg (mode0
, op2
);
32018 op3
= copy_to_mode_reg (mode0
, op3
);
32020 op0
= gen_reg_rtx (mode0
);
32022 op4
= gen_rtx_REG (CCCmode
, FLAGS_REG
);
32023 pat
= gen_rtx_LTU (VOIDmode
, op4
, const0_rtx
);
32024 emit_insn (GEN_FCN (icode
) (op0
, op2
, op3
, op4
, pat
));
32026 /* Store the result. */
32027 op4
= expand_normal (arg3
);
32028 if (!address_operand (op4
, VOIDmode
))
32030 op4
= convert_memory_address (Pmode
, op4
);
32031 op4
= copy_addr_to_reg (op4
);
32033 emit_move_insn (gen_rtx_MEM (mode0
, op4
), op0
);
32035 /* Return current CF value. */
32037 target
= gen_reg_rtx (QImode
);
32039 PUT_MODE (pat
, QImode
);
32040 emit_insn (gen_rtx_SET (VOIDmode
, target
, pat
));
32043 case IX86_BUILTIN_GATHERSIV2DF
:
32044 icode
= CODE_FOR_avx2_gathersiv2df
;
32046 case IX86_BUILTIN_GATHERSIV4DF
:
32047 icode
= CODE_FOR_avx2_gathersiv4df
;
32049 case IX86_BUILTIN_GATHERDIV2DF
:
32050 icode
= CODE_FOR_avx2_gatherdiv2df
;
32052 case IX86_BUILTIN_GATHERDIV4DF
:
32053 icode
= CODE_FOR_avx2_gatherdiv4df
;
32055 case IX86_BUILTIN_GATHERSIV4SF
:
32056 icode
= CODE_FOR_avx2_gathersiv4sf
;
32058 case IX86_BUILTIN_GATHERSIV8SF
:
32059 icode
= CODE_FOR_avx2_gathersiv8sf
;
32061 case IX86_BUILTIN_GATHERDIV4SF
:
32062 icode
= CODE_FOR_avx2_gatherdiv4sf
;
32064 case IX86_BUILTIN_GATHERDIV8SF
:
32065 icode
= CODE_FOR_avx2_gatherdiv8sf
;
32067 case IX86_BUILTIN_GATHERSIV2DI
:
32068 icode
= CODE_FOR_avx2_gathersiv2di
;
32070 case IX86_BUILTIN_GATHERSIV4DI
:
32071 icode
= CODE_FOR_avx2_gathersiv4di
;
32073 case IX86_BUILTIN_GATHERDIV2DI
:
32074 icode
= CODE_FOR_avx2_gatherdiv2di
;
32076 case IX86_BUILTIN_GATHERDIV4DI
:
32077 icode
= CODE_FOR_avx2_gatherdiv4di
;
32079 case IX86_BUILTIN_GATHERSIV4SI
:
32080 icode
= CODE_FOR_avx2_gathersiv4si
;
32082 case IX86_BUILTIN_GATHERSIV8SI
:
32083 icode
= CODE_FOR_avx2_gathersiv8si
;
32085 case IX86_BUILTIN_GATHERDIV4SI
:
32086 icode
= CODE_FOR_avx2_gatherdiv4si
;
32088 case IX86_BUILTIN_GATHERDIV8SI
:
32089 icode
= CODE_FOR_avx2_gatherdiv8si
;
32091 case IX86_BUILTIN_GATHERALTSIV4DF
:
32092 icode
= CODE_FOR_avx2_gathersiv4df
;
32094 case IX86_BUILTIN_GATHERALTDIV8SF
:
32095 icode
= CODE_FOR_avx2_gatherdiv8sf
;
32097 case IX86_BUILTIN_GATHERALTSIV4DI
:
32098 icode
= CODE_FOR_avx2_gathersiv4di
;
32100 case IX86_BUILTIN_GATHERALTDIV8SI
:
32101 icode
= CODE_FOR_avx2_gatherdiv8si
;
32105 arg0
= CALL_EXPR_ARG (exp
, 0);
32106 arg1
= CALL_EXPR_ARG (exp
, 1);
32107 arg2
= CALL_EXPR_ARG (exp
, 2);
32108 arg3
= CALL_EXPR_ARG (exp
, 3);
32109 arg4
= CALL_EXPR_ARG (exp
, 4);
32110 op0
= expand_normal (arg0
);
32111 op1
= expand_normal (arg1
);
32112 op2
= expand_normal (arg2
);
32113 op3
= expand_normal (arg3
);
32114 op4
= expand_normal (arg4
);
32115 /* Note the arg order is different from the operand order. */
32116 mode0
= insn_data
[icode
].operand
[1].mode
;
32117 mode2
= insn_data
[icode
].operand
[3].mode
;
32118 mode3
= insn_data
[icode
].operand
[4].mode
;
32119 mode4
= insn_data
[icode
].operand
[5].mode
;
32121 if (target
== NULL_RTX
32122 || GET_MODE (target
) != insn_data
[icode
].operand
[0].mode
)
32123 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
32125 subtarget
= target
;
32127 if (fcode
== IX86_BUILTIN_GATHERALTSIV4DF
32128 || fcode
== IX86_BUILTIN_GATHERALTSIV4DI
)
32130 rtx half
= gen_reg_rtx (V4SImode
);
32131 if (!nonimmediate_operand (op2
, V8SImode
))
32132 op2
= copy_to_mode_reg (V8SImode
, op2
);
32133 emit_insn (gen_vec_extract_lo_v8si (half
, op2
));
32136 else if (fcode
== IX86_BUILTIN_GATHERALTDIV8SF
32137 || fcode
== IX86_BUILTIN_GATHERALTDIV8SI
)
32139 rtx (*gen
) (rtx
, rtx
);
32140 rtx half
= gen_reg_rtx (mode0
);
32141 if (mode0
== V4SFmode
)
32142 gen
= gen_vec_extract_lo_v8sf
;
32144 gen
= gen_vec_extract_lo_v8si
;
32145 if (!nonimmediate_operand (op0
, GET_MODE (op0
)))
32146 op0
= copy_to_mode_reg (GET_MODE (op0
), op0
);
32147 emit_insn (gen (half
, op0
));
32149 if (!nonimmediate_operand (op3
, GET_MODE (op3
)))
32150 op3
= copy_to_mode_reg (GET_MODE (op3
), op3
);
32151 emit_insn (gen (half
, op3
));
32155 /* Force memory operand only with base register here. But we
32156 don't want to do it on memory operand for other builtin
32158 if (GET_MODE (op1
) != Pmode
)
32159 op1
= convert_to_mode (Pmode
, op1
, 1);
32160 op1
= force_reg (Pmode
, op1
);
32162 if (!insn_data
[icode
].operand
[1].predicate (op0
, mode0
))
32163 op0
= copy_to_mode_reg (mode0
, op0
);
32164 if (!insn_data
[icode
].operand
[2].predicate (op1
, Pmode
))
32165 op1
= copy_to_mode_reg (Pmode
, op1
);
32166 if (!insn_data
[icode
].operand
[3].predicate (op2
, mode2
))
32167 op2
= copy_to_mode_reg (mode2
, op2
);
32168 if (!insn_data
[icode
].operand
[4].predicate (op3
, mode3
))
32169 op3
= copy_to_mode_reg (mode3
, op3
);
32170 if (!insn_data
[icode
].operand
[5].predicate (op4
, mode4
))
32172 error ("last argument must be scale 1, 2, 4, 8");
32176 /* Optimize. If mask is known to have all high bits set,
32177 replace op0 with pc_rtx to signal that the instruction
32178 overwrites the whole destination and doesn't use its
32179 previous contents. */
32182 if (TREE_CODE (arg3
) == VECTOR_CST
)
32184 unsigned int negative
= 0;
32185 for (i
= 0; i
< VECTOR_CST_NELTS (arg3
); ++i
)
32187 tree cst
= VECTOR_CST_ELT (arg3
, i
);
32188 if (TREE_CODE (cst
) == INTEGER_CST
32189 && tree_int_cst_sign_bit (cst
))
32191 else if (TREE_CODE (cst
) == REAL_CST
32192 && REAL_VALUE_NEGATIVE (TREE_REAL_CST (cst
)))
32195 if (negative
== TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg3
)))
32198 else if (TREE_CODE (arg3
) == SSA_NAME
)
32200 /* Recognize also when mask is like:
32201 __v2df src = _mm_setzero_pd ();
32202 __v2df mask = _mm_cmpeq_pd (src, src);
32204 __v8sf src = _mm256_setzero_ps ();
32205 __v8sf mask = _mm256_cmp_ps (src, src, _CMP_EQ_OQ);
32206 as that is a cheaper way to load all ones into
32207 a register than having to load a constant from
32209 gimple def_stmt
= SSA_NAME_DEF_STMT (arg3
);
32210 if (is_gimple_call (def_stmt
))
32212 tree fndecl
= gimple_call_fndecl (def_stmt
);
32214 && DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_MD
)
32215 switch ((unsigned int) DECL_FUNCTION_CODE (fndecl
))
32217 case IX86_BUILTIN_CMPPD
:
32218 case IX86_BUILTIN_CMPPS
:
32219 case IX86_BUILTIN_CMPPD256
:
32220 case IX86_BUILTIN_CMPPS256
:
32221 if (!integer_zerop (gimple_call_arg (def_stmt
, 2)))
32224 case IX86_BUILTIN_CMPEQPD
:
32225 case IX86_BUILTIN_CMPEQPS
:
32226 if (initializer_zerop (gimple_call_arg (def_stmt
, 0))
32227 && initializer_zerop (gimple_call_arg (def_stmt
,
32238 pat
= GEN_FCN (icode
) (subtarget
, op0
, op1
, op2
, op3
, op4
);
32243 if (fcode
== IX86_BUILTIN_GATHERDIV8SF
32244 || fcode
== IX86_BUILTIN_GATHERDIV8SI
)
32246 enum machine_mode tmode
= GET_MODE (subtarget
) == V8SFmode
32247 ? V4SFmode
: V4SImode
;
32248 if (target
== NULL_RTX
)
32249 target
= gen_reg_rtx (tmode
);
32250 if (tmode
== V4SFmode
)
32251 emit_insn (gen_vec_extract_lo_v8sf (target
, subtarget
));
32253 emit_insn (gen_vec_extract_lo_v8si (target
, subtarget
));
32256 target
= subtarget
;
32260 case IX86_BUILTIN_XABORT
:
32261 icode
= CODE_FOR_xabort
;
32262 arg0
= CALL_EXPR_ARG (exp
, 0);
32263 op0
= expand_normal (arg0
);
32264 mode0
= insn_data
[icode
].operand
[0].mode
;
32265 if (!insn_data
[icode
].operand
[0].predicate (op0
, mode0
))
32267 error ("the xabort's argument must be an 8-bit immediate");
32270 emit_insn (gen_xabort (op0
));
32277 for (i
= 0, d
= bdesc_special_args
;
32278 i
< ARRAY_SIZE (bdesc_special_args
);
32280 if (d
->code
== fcode
)
32281 return ix86_expand_special_args_builtin (d
, exp
, target
);
32283 for (i
= 0, d
= bdesc_args
;
32284 i
< ARRAY_SIZE (bdesc_args
);
32286 if (d
->code
== fcode
)
32289 case IX86_BUILTIN_FABSQ
:
32290 case IX86_BUILTIN_COPYSIGNQ
:
32292 /* Emit a normal call if SSE isn't available. */
32293 return expand_call (exp
, target
, ignore
);
32295 return ix86_expand_args_builtin (d
, exp
, target
);
32298 for (i
= 0, d
= bdesc_comi
; i
< ARRAY_SIZE (bdesc_comi
); i
++, d
++)
32299 if (d
->code
== fcode
)
32300 return ix86_expand_sse_comi (d
, exp
, target
);
32302 for (i
= 0, d
= bdesc_pcmpestr
;
32303 i
< ARRAY_SIZE (bdesc_pcmpestr
);
32305 if (d
->code
== fcode
)
32306 return ix86_expand_sse_pcmpestr (d
, exp
, target
);
32308 for (i
= 0, d
= bdesc_pcmpistr
;
32309 i
< ARRAY_SIZE (bdesc_pcmpistr
);
32311 if (d
->code
== fcode
)
32312 return ix86_expand_sse_pcmpistr (d
, exp
, target
);
32314 for (i
= 0, d
= bdesc_multi_arg
; i
< ARRAY_SIZE (bdesc_multi_arg
); i
++, d
++)
32315 if (d
->code
== fcode
)
32316 return ix86_expand_multi_arg_builtin (d
->icode
, exp
, target
,
32317 (enum ix86_builtin_func_type
)
32318 d
->flag
, d
->comparison
);
32320 gcc_unreachable ();
32323 /* Returns a function decl for a vectorized version of the builtin function
32324 with builtin function code FN and the result vector type TYPE, or NULL_TREE
32325 if it is not available. */
32328 ix86_builtin_vectorized_function (tree fndecl
, tree type_out
,
32331 enum machine_mode in_mode
, out_mode
;
32333 enum built_in_function fn
= DECL_FUNCTION_CODE (fndecl
);
32335 if (TREE_CODE (type_out
) != VECTOR_TYPE
32336 || TREE_CODE (type_in
) != VECTOR_TYPE
32337 || DECL_BUILT_IN_CLASS (fndecl
) != BUILT_IN_NORMAL
)
32340 out_mode
= TYPE_MODE (TREE_TYPE (type_out
));
32341 out_n
= TYPE_VECTOR_SUBPARTS (type_out
);
32342 in_mode
= TYPE_MODE (TREE_TYPE (type_in
));
32343 in_n
= TYPE_VECTOR_SUBPARTS (type_in
);
32347 case BUILT_IN_SQRT
:
32348 if (out_mode
== DFmode
&& in_mode
== DFmode
)
32350 if (out_n
== 2 && in_n
== 2)
32351 return ix86_builtins
[IX86_BUILTIN_SQRTPD
];
32352 else if (out_n
== 4 && in_n
== 4)
32353 return ix86_builtins
[IX86_BUILTIN_SQRTPD256
];
32357 case BUILT_IN_SQRTF
:
32358 if (out_mode
== SFmode
&& in_mode
== SFmode
)
32360 if (out_n
== 4 && in_n
== 4)
32361 return ix86_builtins
[IX86_BUILTIN_SQRTPS_NR
];
32362 else if (out_n
== 8 && in_n
== 8)
32363 return ix86_builtins
[IX86_BUILTIN_SQRTPS_NR256
];
32367 case BUILT_IN_IFLOOR
:
32368 case BUILT_IN_LFLOOR
:
32369 case BUILT_IN_LLFLOOR
:
32370 /* The round insn does not trap on denormals. */
32371 if (flag_trapping_math
|| !TARGET_ROUND
)
32374 if (out_mode
== SImode
&& in_mode
== DFmode
)
32376 if (out_n
== 4 && in_n
== 2)
32377 return ix86_builtins
[IX86_BUILTIN_FLOORPD_VEC_PACK_SFIX
];
32378 else if (out_n
== 8 && in_n
== 4)
32379 return ix86_builtins
[IX86_BUILTIN_FLOORPD_VEC_PACK_SFIX256
];
32383 case BUILT_IN_IFLOORF
:
32384 case BUILT_IN_LFLOORF
:
32385 case BUILT_IN_LLFLOORF
:
32386 /* The round insn does not trap on denormals. */
32387 if (flag_trapping_math
|| !TARGET_ROUND
)
32390 if (out_mode
== SImode
&& in_mode
== SFmode
)
32392 if (out_n
== 4 && in_n
== 4)
32393 return ix86_builtins
[IX86_BUILTIN_FLOORPS_SFIX
];
32394 else if (out_n
== 8 && in_n
== 8)
32395 return ix86_builtins
[IX86_BUILTIN_FLOORPS_SFIX256
];
32399 case BUILT_IN_ICEIL
:
32400 case BUILT_IN_LCEIL
:
32401 case BUILT_IN_LLCEIL
:
32402 /* The round insn does not trap on denormals. */
32403 if (flag_trapping_math
|| !TARGET_ROUND
)
32406 if (out_mode
== SImode
&& in_mode
== DFmode
)
32408 if (out_n
== 4 && in_n
== 2)
32409 return ix86_builtins
[IX86_BUILTIN_CEILPD_VEC_PACK_SFIX
];
32410 else if (out_n
== 8 && in_n
== 4)
32411 return ix86_builtins
[IX86_BUILTIN_CEILPD_VEC_PACK_SFIX256
];
32415 case BUILT_IN_ICEILF
:
32416 case BUILT_IN_LCEILF
:
32417 case BUILT_IN_LLCEILF
:
32418 /* The round insn does not trap on denormals. */
32419 if (flag_trapping_math
|| !TARGET_ROUND
)
32422 if (out_mode
== SImode
&& in_mode
== SFmode
)
32424 if (out_n
== 4 && in_n
== 4)
32425 return ix86_builtins
[IX86_BUILTIN_CEILPS_SFIX
];
32426 else if (out_n
== 8 && in_n
== 8)
32427 return ix86_builtins
[IX86_BUILTIN_CEILPS_SFIX256
];
32431 case BUILT_IN_IRINT
:
32432 case BUILT_IN_LRINT
:
32433 case BUILT_IN_LLRINT
:
32434 if (out_mode
== SImode
&& in_mode
== DFmode
)
32436 if (out_n
== 4 && in_n
== 2)
32437 return ix86_builtins
[IX86_BUILTIN_VEC_PACK_SFIX
];
32438 else if (out_n
== 8 && in_n
== 4)
32439 return ix86_builtins
[IX86_BUILTIN_VEC_PACK_SFIX256
];
32443 case BUILT_IN_IRINTF
:
32444 case BUILT_IN_LRINTF
:
32445 case BUILT_IN_LLRINTF
:
32446 if (out_mode
== SImode
&& in_mode
== SFmode
)
32448 if (out_n
== 4 && in_n
== 4)
32449 return ix86_builtins
[IX86_BUILTIN_CVTPS2DQ
];
32450 else if (out_n
== 8 && in_n
== 8)
32451 return ix86_builtins
[IX86_BUILTIN_CVTPS2DQ256
];
32455 case BUILT_IN_IROUND
:
32456 case BUILT_IN_LROUND
:
32457 case BUILT_IN_LLROUND
:
32458 /* The round insn does not trap on denormals. */
32459 if (flag_trapping_math
|| !TARGET_ROUND
)
32462 if (out_mode
== SImode
&& in_mode
== DFmode
)
32464 if (out_n
== 4 && in_n
== 2)
32465 return ix86_builtins
[IX86_BUILTIN_ROUNDPD_AZ_VEC_PACK_SFIX
];
32466 else if (out_n
== 8 && in_n
== 4)
32467 return ix86_builtins
[IX86_BUILTIN_ROUNDPD_AZ_VEC_PACK_SFIX256
];
32471 case BUILT_IN_IROUNDF
:
32472 case BUILT_IN_LROUNDF
:
32473 case BUILT_IN_LLROUNDF
:
32474 /* The round insn does not trap on denormals. */
32475 if (flag_trapping_math
|| !TARGET_ROUND
)
32478 if (out_mode
== SImode
&& in_mode
== SFmode
)
32480 if (out_n
== 4 && in_n
== 4)
32481 return ix86_builtins
[IX86_BUILTIN_ROUNDPS_AZ_SFIX
];
32482 else if (out_n
== 8 && in_n
== 8)
32483 return ix86_builtins
[IX86_BUILTIN_ROUNDPS_AZ_SFIX256
];
32487 case BUILT_IN_COPYSIGN
:
32488 if (out_mode
== DFmode
&& in_mode
== DFmode
)
32490 if (out_n
== 2 && in_n
== 2)
32491 return ix86_builtins
[IX86_BUILTIN_CPYSGNPD
];
32492 else if (out_n
== 4 && in_n
== 4)
32493 return ix86_builtins
[IX86_BUILTIN_CPYSGNPD256
];
32497 case BUILT_IN_COPYSIGNF
:
32498 if (out_mode
== SFmode
&& in_mode
== SFmode
)
32500 if (out_n
== 4 && in_n
== 4)
32501 return ix86_builtins
[IX86_BUILTIN_CPYSGNPS
];
32502 else if (out_n
== 8 && in_n
== 8)
32503 return ix86_builtins
[IX86_BUILTIN_CPYSGNPS256
];
32507 case BUILT_IN_FLOOR
:
32508 /* The round insn does not trap on denormals. */
32509 if (flag_trapping_math
|| !TARGET_ROUND
)
32512 if (out_mode
== DFmode
&& in_mode
== DFmode
)
32514 if (out_n
== 2 && in_n
== 2)
32515 return ix86_builtins
[IX86_BUILTIN_FLOORPD
];
32516 else if (out_n
== 4 && in_n
== 4)
32517 return ix86_builtins
[IX86_BUILTIN_FLOORPD256
];
32521 case BUILT_IN_FLOORF
:
32522 /* The round insn does not trap on denormals. */
32523 if (flag_trapping_math
|| !TARGET_ROUND
)
32526 if (out_mode
== SFmode
&& in_mode
== SFmode
)
32528 if (out_n
== 4 && in_n
== 4)
32529 return ix86_builtins
[IX86_BUILTIN_FLOORPS
];
32530 else if (out_n
== 8 && in_n
== 8)
32531 return ix86_builtins
[IX86_BUILTIN_FLOORPS256
];
32535 case BUILT_IN_CEIL
:
32536 /* The round insn does not trap on denormals. */
32537 if (flag_trapping_math
|| !TARGET_ROUND
)
32540 if (out_mode
== DFmode
&& in_mode
== DFmode
)
32542 if (out_n
== 2 && in_n
== 2)
32543 return ix86_builtins
[IX86_BUILTIN_CEILPD
];
32544 else if (out_n
== 4 && in_n
== 4)
32545 return ix86_builtins
[IX86_BUILTIN_CEILPD256
];
32549 case BUILT_IN_CEILF
:
32550 /* The round insn does not trap on denormals. */
32551 if (flag_trapping_math
|| !TARGET_ROUND
)
32554 if (out_mode
== SFmode
&& in_mode
== SFmode
)
32556 if (out_n
== 4 && in_n
== 4)
32557 return ix86_builtins
[IX86_BUILTIN_CEILPS
];
32558 else if (out_n
== 8 && in_n
== 8)
32559 return ix86_builtins
[IX86_BUILTIN_CEILPS256
];
32563 case BUILT_IN_TRUNC
:
32564 /* The round insn does not trap on denormals. */
32565 if (flag_trapping_math
|| !TARGET_ROUND
)
32568 if (out_mode
== DFmode
&& in_mode
== DFmode
)
32570 if (out_n
== 2 && in_n
== 2)
32571 return ix86_builtins
[IX86_BUILTIN_TRUNCPD
];
32572 else if (out_n
== 4 && in_n
== 4)
32573 return ix86_builtins
[IX86_BUILTIN_TRUNCPD256
];
32577 case BUILT_IN_TRUNCF
:
32578 /* The round insn does not trap on denormals. */
32579 if (flag_trapping_math
|| !TARGET_ROUND
)
32582 if (out_mode
== SFmode
&& in_mode
== SFmode
)
32584 if (out_n
== 4 && in_n
== 4)
32585 return ix86_builtins
[IX86_BUILTIN_TRUNCPS
];
32586 else if (out_n
== 8 && in_n
== 8)
32587 return ix86_builtins
[IX86_BUILTIN_TRUNCPS256
];
32591 case BUILT_IN_RINT
:
32592 /* The round insn does not trap on denormals. */
32593 if (flag_trapping_math
|| !TARGET_ROUND
)
32596 if (out_mode
== DFmode
&& in_mode
== DFmode
)
32598 if (out_n
== 2 && in_n
== 2)
32599 return ix86_builtins
[IX86_BUILTIN_RINTPD
];
32600 else if (out_n
== 4 && in_n
== 4)
32601 return ix86_builtins
[IX86_BUILTIN_RINTPD256
];
32605 case BUILT_IN_RINTF
:
32606 /* The round insn does not trap on denormals. */
32607 if (flag_trapping_math
|| !TARGET_ROUND
)
32610 if (out_mode
== SFmode
&& in_mode
== SFmode
)
32612 if (out_n
== 4 && in_n
== 4)
32613 return ix86_builtins
[IX86_BUILTIN_RINTPS
];
32614 else if (out_n
== 8 && in_n
== 8)
32615 return ix86_builtins
[IX86_BUILTIN_RINTPS256
];
32619 case BUILT_IN_ROUND
:
32620 /* The round insn does not trap on denormals. */
32621 if (flag_trapping_math
|| !TARGET_ROUND
)
32624 if (out_mode
== DFmode
&& in_mode
== DFmode
)
32626 if (out_n
== 2 && in_n
== 2)
32627 return ix86_builtins
[IX86_BUILTIN_ROUNDPD_AZ
];
32628 else if (out_n
== 4 && in_n
== 4)
32629 return ix86_builtins
[IX86_BUILTIN_ROUNDPD_AZ256
];
32633 case BUILT_IN_ROUNDF
:
32634 /* The round insn does not trap on denormals. */
32635 if (flag_trapping_math
|| !TARGET_ROUND
)
32638 if (out_mode
== SFmode
&& in_mode
== SFmode
)
32640 if (out_n
== 4 && in_n
== 4)
32641 return ix86_builtins
[IX86_BUILTIN_ROUNDPS_AZ
];
32642 else if (out_n
== 8 && in_n
== 8)
32643 return ix86_builtins
[IX86_BUILTIN_ROUNDPS_AZ256
];
32648 if (out_mode
== DFmode
&& in_mode
== DFmode
)
32650 if (out_n
== 2 && in_n
== 2)
32651 return ix86_builtins
[IX86_BUILTIN_VFMADDPD
];
32652 if (out_n
== 4 && in_n
== 4)
32653 return ix86_builtins
[IX86_BUILTIN_VFMADDPD256
];
32657 case BUILT_IN_FMAF
:
32658 if (out_mode
== SFmode
&& in_mode
== SFmode
)
32660 if (out_n
== 4 && in_n
== 4)
32661 return ix86_builtins
[IX86_BUILTIN_VFMADDPS
];
32662 if (out_n
== 8 && in_n
== 8)
32663 return ix86_builtins
[IX86_BUILTIN_VFMADDPS256
];
32671 /* Dispatch to a handler for a vectorization library. */
32672 if (ix86_veclib_handler
)
32673 return ix86_veclib_handler ((enum built_in_function
) fn
, type_out
,
32679 /* Handler for an SVML-style interface to
32680 a library with vectorized intrinsics. */
32683 ix86_veclibabi_svml (enum built_in_function fn
, tree type_out
, tree type_in
)
32686 tree fntype
, new_fndecl
, args
;
32689 enum machine_mode el_mode
, in_mode
;
32692 /* The SVML is suitable for unsafe math only. */
32693 if (!flag_unsafe_math_optimizations
)
32696 el_mode
= TYPE_MODE (TREE_TYPE (type_out
));
32697 n
= TYPE_VECTOR_SUBPARTS (type_out
);
32698 in_mode
= TYPE_MODE (TREE_TYPE (type_in
));
32699 in_n
= TYPE_VECTOR_SUBPARTS (type_in
);
32700 if (el_mode
!= in_mode
32708 case BUILT_IN_LOG10
:
32710 case BUILT_IN_TANH
:
32712 case BUILT_IN_ATAN
:
32713 case BUILT_IN_ATAN2
:
32714 case BUILT_IN_ATANH
:
32715 case BUILT_IN_CBRT
:
32716 case BUILT_IN_SINH
:
32718 case BUILT_IN_ASINH
:
32719 case BUILT_IN_ASIN
:
32720 case BUILT_IN_COSH
:
32722 case BUILT_IN_ACOSH
:
32723 case BUILT_IN_ACOS
:
32724 if (el_mode
!= DFmode
|| n
!= 2)
32728 case BUILT_IN_EXPF
:
32729 case BUILT_IN_LOGF
:
32730 case BUILT_IN_LOG10F
:
32731 case BUILT_IN_POWF
:
32732 case BUILT_IN_TANHF
:
32733 case BUILT_IN_TANF
:
32734 case BUILT_IN_ATANF
:
32735 case BUILT_IN_ATAN2F
:
32736 case BUILT_IN_ATANHF
:
32737 case BUILT_IN_CBRTF
:
32738 case BUILT_IN_SINHF
:
32739 case BUILT_IN_SINF
:
32740 case BUILT_IN_ASINHF
:
32741 case BUILT_IN_ASINF
:
32742 case BUILT_IN_COSHF
:
32743 case BUILT_IN_COSF
:
32744 case BUILT_IN_ACOSHF
:
32745 case BUILT_IN_ACOSF
:
32746 if (el_mode
!= SFmode
|| n
!= 4)
32754 bname
= IDENTIFIER_POINTER (DECL_NAME (builtin_decl_implicit (fn
)));
32756 if (fn
== BUILT_IN_LOGF
)
32757 strcpy (name
, "vmlsLn4");
32758 else if (fn
== BUILT_IN_LOG
)
32759 strcpy (name
, "vmldLn2");
32762 sprintf (name
, "vmls%s", bname
+10);
32763 name
[strlen (name
)-1] = '4';
32766 sprintf (name
, "vmld%s2", bname
+10);
32768 /* Convert to uppercase. */
32772 for (args
= DECL_ARGUMENTS (builtin_decl_implicit (fn
));
32774 args
= TREE_CHAIN (args
))
32778 fntype
= build_function_type_list (type_out
, type_in
, NULL
);
32780 fntype
= build_function_type_list (type_out
, type_in
, type_in
, NULL
);
32782 /* Build a function declaration for the vectorized function. */
32783 new_fndecl
= build_decl (BUILTINS_LOCATION
,
32784 FUNCTION_DECL
, get_identifier (name
), fntype
);
32785 TREE_PUBLIC (new_fndecl
) = 1;
32786 DECL_EXTERNAL (new_fndecl
) = 1;
32787 DECL_IS_NOVOPS (new_fndecl
) = 1;
32788 TREE_READONLY (new_fndecl
) = 1;
32793 /* Handler for an ACML-style interface to
32794 a library with vectorized intrinsics. */
32797 ix86_veclibabi_acml (enum built_in_function fn
, tree type_out
, tree type_in
)
32799 char name
[20] = "__vr.._";
32800 tree fntype
, new_fndecl
, args
;
32803 enum machine_mode el_mode
, in_mode
;
32806 /* The ACML is 64bits only and suitable for unsafe math only as
32807 it does not correctly support parts of IEEE with the required
32808 precision such as denormals. */
32810 || !flag_unsafe_math_optimizations
)
32813 el_mode
= TYPE_MODE (TREE_TYPE (type_out
));
32814 n
= TYPE_VECTOR_SUBPARTS (type_out
);
32815 in_mode
= TYPE_MODE (TREE_TYPE (type_in
));
32816 in_n
= TYPE_VECTOR_SUBPARTS (type_in
);
32817 if (el_mode
!= in_mode
32827 case BUILT_IN_LOG2
:
32828 case BUILT_IN_LOG10
:
32831 if (el_mode
!= DFmode
32836 case BUILT_IN_SINF
:
32837 case BUILT_IN_COSF
:
32838 case BUILT_IN_EXPF
:
32839 case BUILT_IN_POWF
:
32840 case BUILT_IN_LOGF
:
32841 case BUILT_IN_LOG2F
:
32842 case BUILT_IN_LOG10F
:
32845 if (el_mode
!= SFmode
32854 bname
= IDENTIFIER_POINTER (DECL_NAME (builtin_decl_implicit (fn
)));
32855 sprintf (name
+ 7, "%s", bname
+10);
32858 for (args
= DECL_ARGUMENTS (builtin_decl_implicit (fn
));
32860 args
= TREE_CHAIN (args
))
32864 fntype
= build_function_type_list (type_out
, type_in
, NULL
);
32866 fntype
= build_function_type_list (type_out
, type_in
, type_in
, NULL
);
32868 /* Build a function declaration for the vectorized function. */
32869 new_fndecl
= build_decl (BUILTINS_LOCATION
,
32870 FUNCTION_DECL
, get_identifier (name
), fntype
);
32871 TREE_PUBLIC (new_fndecl
) = 1;
32872 DECL_EXTERNAL (new_fndecl
) = 1;
32873 DECL_IS_NOVOPS (new_fndecl
) = 1;
32874 TREE_READONLY (new_fndecl
) = 1;
32879 /* Returns a decl of a function that implements gather load with
32880 memory type MEM_VECTYPE and index type INDEX_VECTYPE and SCALE.
32881 Return NULL_TREE if it is not available. */
32884 ix86_vectorize_builtin_gather (const_tree mem_vectype
,
32885 const_tree index_type
, int scale
)
32888 enum ix86_builtins code
;
32893 if ((TREE_CODE (index_type
) != INTEGER_TYPE
32894 && !POINTER_TYPE_P (index_type
))
32895 || (TYPE_MODE (index_type
) != SImode
32896 && TYPE_MODE (index_type
) != DImode
))
32899 if (TYPE_PRECISION (index_type
) > POINTER_SIZE
)
32902 /* v*gather* insn sign extends index to pointer mode. */
32903 if (TYPE_PRECISION (index_type
) < POINTER_SIZE
32904 && TYPE_UNSIGNED (index_type
))
32909 || (scale
& (scale
- 1)) != 0)
32912 si
= TYPE_MODE (index_type
) == SImode
;
32913 switch (TYPE_MODE (mem_vectype
))
32916 code
= si
? IX86_BUILTIN_GATHERSIV2DF
: IX86_BUILTIN_GATHERDIV2DF
;
32919 code
= si
? IX86_BUILTIN_GATHERALTSIV4DF
: IX86_BUILTIN_GATHERDIV4DF
;
32922 code
= si
? IX86_BUILTIN_GATHERSIV2DI
: IX86_BUILTIN_GATHERDIV2DI
;
32925 code
= si
? IX86_BUILTIN_GATHERALTSIV4DI
: IX86_BUILTIN_GATHERDIV4DI
;
32928 code
= si
? IX86_BUILTIN_GATHERSIV4SF
: IX86_BUILTIN_GATHERDIV4SF
;
32931 code
= si
? IX86_BUILTIN_GATHERSIV8SF
: IX86_BUILTIN_GATHERALTDIV8SF
;
32934 code
= si
? IX86_BUILTIN_GATHERSIV4SI
: IX86_BUILTIN_GATHERDIV4SI
;
32937 code
= si
? IX86_BUILTIN_GATHERSIV8SI
: IX86_BUILTIN_GATHERALTDIV8SI
;
32943 return ix86_builtins
[code
];
32946 /* Returns a code for a target-specific builtin that implements
32947 reciprocal of the function, or NULL_TREE if not available. */
32950 ix86_builtin_reciprocal (unsigned int fn
, bool md_fn
,
32951 bool sqrt ATTRIBUTE_UNUSED
)
32953 if (! (TARGET_SSE_MATH
&& !optimize_insn_for_size_p ()
32954 && flag_finite_math_only
&& !flag_trapping_math
32955 && flag_unsafe_math_optimizations
))
32959 /* Machine dependent builtins. */
32962 /* Vectorized version of sqrt to rsqrt conversion. */
32963 case IX86_BUILTIN_SQRTPS_NR
:
32964 return ix86_builtins
[IX86_BUILTIN_RSQRTPS_NR
];
32966 case IX86_BUILTIN_SQRTPS_NR256
:
32967 return ix86_builtins
[IX86_BUILTIN_RSQRTPS_NR256
];
32973 /* Normal builtins. */
32976 /* Sqrt to rsqrt conversion. */
32977 case BUILT_IN_SQRTF
:
32978 return ix86_builtins
[IX86_BUILTIN_RSQRTF
];
32985 /* Helper for avx_vpermilps256_operand et al. This is also used by
32986 the expansion functions to turn the parallel back into a mask.
32987 The return value is 0 for no match and the imm8+1 for a match. */
32990 avx_vpermilp_parallel (rtx par
, enum machine_mode mode
)
32992 unsigned i
, nelt
= GET_MODE_NUNITS (mode
);
32994 unsigned char ipar
[8] = {}; /* Silence -Wuninitialized warning. */
32996 if (XVECLEN (par
, 0) != (int) nelt
)
32999 /* Validate that all of the elements are constants, and not totally
33000 out of range. Copy the data into an integral array to make the
33001 subsequent checks easier. */
33002 for (i
= 0; i
< nelt
; ++i
)
33004 rtx er
= XVECEXP (par
, 0, i
);
33005 unsigned HOST_WIDE_INT ei
;
33007 if (!CONST_INT_P (er
))
33018 /* In the 256-bit DFmode case, we can only move elements within
33020 for (i
= 0; i
< 2; ++i
)
33024 mask
|= ipar
[i
] << i
;
33026 for (i
= 2; i
< 4; ++i
)
33030 mask
|= (ipar
[i
] - 2) << i
;
33035 /* In the 256-bit SFmode case, we have full freedom of movement
33036 within the low 128-bit lane, but the high 128-bit lane must
33037 mirror the exact same pattern. */
33038 for (i
= 0; i
< 4; ++i
)
33039 if (ipar
[i
] + 4 != ipar
[i
+ 4])
33046 /* In the 128-bit case, we've full freedom in the placement of
33047 the elements from the source operand. */
33048 for (i
= 0; i
< nelt
; ++i
)
33049 mask
|= ipar
[i
] << (i
* (nelt
/ 2));
33053 gcc_unreachable ();
33056 /* Make sure success has a non-zero value by adding one. */
33060 /* Helper for avx_vperm2f128_v4df_operand et al. This is also used by
33061 the expansion functions to turn the parallel back into a mask.
33062 The return value is 0 for no match and the imm8+1 for a match. */
33065 avx_vperm2f128_parallel (rtx par
, enum machine_mode mode
)
33067 unsigned i
, nelt
= GET_MODE_NUNITS (mode
), nelt2
= nelt
/ 2;
33069 unsigned char ipar
[8] = {}; /* Silence -Wuninitialized warning. */
33071 if (XVECLEN (par
, 0) != (int) nelt
)
33074 /* Validate that all of the elements are constants, and not totally
33075 out of range. Copy the data into an integral array to make the
33076 subsequent checks easier. */
33077 for (i
= 0; i
< nelt
; ++i
)
33079 rtx er
= XVECEXP (par
, 0, i
);
33080 unsigned HOST_WIDE_INT ei
;
33082 if (!CONST_INT_P (er
))
33085 if (ei
>= 2 * nelt
)
33090 /* Validate that the halves of the permute are halves. */
33091 for (i
= 0; i
< nelt2
- 1; ++i
)
33092 if (ipar
[i
] + 1 != ipar
[i
+ 1])
33094 for (i
= nelt2
; i
< nelt
- 1; ++i
)
33095 if (ipar
[i
] + 1 != ipar
[i
+ 1])
33098 /* Reconstruct the mask. */
33099 for (i
= 0; i
< 2; ++i
)
33101 unsigned e
= ipar
[i
* nelt2
];
33105 mask
|= e
<< (i
* 4);
33108 /* Make sure success has a non-zero value by adding one. */
33112 /* Store OPERAND to the memory after reload is completed. This means
33113 that we can't easily use assign_stack_local. */
33115 ix86_force_to_memory (enum machine_mode mode
, rtx operand
)
33119 gcc_assert (reload_completed
);
33120 if (ix86_using_red_zone ())
33122 result
= gen_rtx_MEM (mode
,
33123 gen_rtx_PLUS (Pmode
,
33125 GEN_INT (-RED_ZONE_SIZE
)));
33126 emit_move_insn (result
, operand
);
33128 else if (TARGET_64BIT
)
33134 operand
= gen_lowpart (DImode
, operand
);
33138 gen_rtx_SET (VOIDmode
,
33139 gen_rtx_MEM (DImode
,
33140 gen_rtx_PRE_DEC (DImode
,
33141 stack_pointer_rtx
)),
33145 gcc_unreachable ();
33147 result
= gen_rtx_MEM (mode
, stack_pointer_rtx
);
33156 split_double_mode (mode
, &operand
, 1, operands
, operands
+ 1);
33158 gen_rtx_SET (VOIDmode
,
33159 gen_rtx_MEM (SImode
,
33160 gen_rtx_PRE_DEC (Pmode
,
33161 stack_pointer_rtx
)),
33164 gen_rtx_SET (VOIDmode
,
33165 gen_rtx_MEM (SImode
,
33166 gen_rtx_PRE_DEC (Pmode
,
33167 stack_pointer_rtx
)),
33172 /* Store HImodes as SImodes. */
33173 operand
= gen_lowpart (SImode
, operand
);
33177 gen_rtx_SET (VOIDmode
,
33178 gen_rtx_MEM (GET_MODE (operand
),
33179 gen_rtx_PRE_DEC (SImode
,
33180 stack_pointer_rtx
)),
33184 gcc_unreachable ();
33186 result
= gen_rtx_MEM (mode
, stack_pointer_rtx
);
33191 /* Free operand from the memory. */
33193 ix86_free_from_memory (enum machine_mode mode
)
33195 if (!ix86_using_red_zone ())
33199 if (mode
== DImode
|| TARGET_64BIT
)
33203 /* Use LEA to deallocate stack space. In peephole2 it will be converted
33204 to pop or add instruction if registers are available. */
33205 emit_insn (gen_rtx_SET (VOIDmode
, stack_pointer_rtx
,
33206 gen_rtx_PLUS (Pmode
, stack_pointer_rtx
,
33211 /* Return a register priority for hard reg REGNO. */
33213 ix86_register_priority (int hard_regno
)
33215 /* ebp and r13 as the base always wants a displacement, r12 as the
33216 base always wants an index. So discourage their usage in an
33218 if (hard_regno
== R12_REG
|| hard_regno
== R13_REG
)
33220 if (hard_regno
== BP_REG
)
33222 /* New x86-64 int registers result in bigger code size. Discourage
33224 if (FIRST_REX_INT_REG
<= hard_regno
&& hard_regno
<= LAST_REX_INT_REG
)
33226 /* New x86-64 SSE registers result in bigger code size. Discourage
33228 if (FIRST_REX_SSE_REG
<= hard_regno
&& hard_regno
<= LAST_REX_SSE_REG
)
33230 /* Usage of AX register results in smaller code. Prefer it. */
33231 if (hard_regno
== 0)
33236 /* Implement TARGET_PREFERRED_RELOAD_CLASS.
33238 Put float CONST_DOUBLE in the constant pool instead of fp regs.
33239 QImode must go into class Q_REGS.
33240 Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
33241 movdf to do mem-to-mem moves through integer regs. */
33244 ix86_preferred_reload_class (rtx x
, reg_class_t regclass
)
33246 enum machine_mode mode
= GET_MODE (x
);
33248 /* We're only allowed to return a subclass of CLASS. Many of the
33249 following checks fail for NO_REGS, so eliminate that early. */
33250 if (regclass
== NO_REGS
)
33253 /* All classes can load zeros. */
33254 if (x
== CONST0_RTX (mode
))
33257 /* Force constants into memory if we are loading a (nonzero) constant into
33258 an MMX or SSE register. This is because there are no MMX/SSE instructions
33259 to load from a constant. */
33261 && (MAYBE_MMX_CLASS_P (regclass
) || MAYBE_SSE_CLASS_P (regclass
)))
33264 /* Prefer SSE regs only, if we can use them for math. */
33265 if (TARGET_SSE_MATH
&& !TARGET_MIX_SSE_I387
&& SSE_FLOAT_MODE_P (mode
))
33266 return SSE_CLASS_P (regclass
) ? regclass
: NO_REGS
;
33268 /* Floating-point constants need more complex checks. */
33269 if (GET_CODE (x
) == CONST_DOUBLE
&& GET_MODE (x
) != VOIDmode
)
33271 /* General regs can load everything. */
33272 if (reg_class_subset_p (regclass
, GENERAL_REGS
))
33275 /* Floats can load 0 and 1 plus some others. Note that we eliminated
33276 zero above. We only want to wind up preferring 80387 registers if
33277 we plan on doing computation with them. */
33279 && standard_80387_constant_p (x
) > 0)
33281 /* Limit class to non-sse. */
33282 if (regclass
== FLOAT_SSE_REGS
)
33284 if (regclass
== FP_TOP_SSE_REGS
)
33286 if (regclass
== FP_SECOND_SSE_REGS
)
33287 return FP_SECOND_REG
;
33288 if (regclass
== FLOAT_INT_REGS
|| regclass
== FLOAT_REGS
)
33295 /* Generally when we see PLUS here, it's the function invariant
33296 (plus soft-fp const_int). Which can only be computed into general
33298 if (GET_CODE (x
) == PLUS
)
33299 return reg_class_subset_p (regclass
, GENERAL_REGS
) ? regclass
: NO_REGS
;
33301 /* QImode constants are easy to load, but non-constant QImode data
33302 must go into Q_REGS. */
33303 if (GET_MODE (x
) == QImode
&& !CONSTANT_P (x
))
33305 if (reg_class_subset_p (regclass
, Q_REGS
))
33307 if (reg_class_subset_p (Q_REGS
, regclass
))
33315 /* Discourage putting floating-point values in SSE registers unless
33316 SSE math is being used, and likewise for the 387 registers. */
33318 ix86_preferred_output_reload_class (rtx x
, reg_class_t regclass
)
33320 enum machine_mode mode
= GET_MODE (x
);
33322 /* Restrict the output reload class to the register bank that we are doing
33323 math on. If we would like not to return a subset of CLASS, reject this
33324 alternative: if reload cannot do this, it will still use its choice. */
33325 mode
= GET_MODE (x
);
33326 if (TARGET_SSE_MATH
&& SSE_FLOAT_MODE_P (mode
))
33327 return MAYBE_SSE_CLASS_P (regclass
) ? SSE_REGS
: NO_REGS
;
33329 if (X87_FLOAT_MODE_P (mode
))
33331 if (regclass
== FP_TOP_SSE_REGS
)
33333 else if (regclass
== FP_SECOND_SSE_REGS
)
33334 return FP_SECOND_REG
;
33336 return FLOAT_CLASS_P (regclass
) ? regclass
: NO_REGS
;
33343 ix86_secondary_reload (bool in_p
, rtx x
, reg_class_t rclass
,
33344 enum machine_mode mode
, secondary_reload_info
*sri
)
33346 /* Double-word spills from general registers to non-offsettable memory
33347 references (zero-extended addresses) require special handling. */
33350 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
33351 && rclass
== GENERAL_REGS
33352 && !offsettable_memref_p (x
))
33355 ? CODE_FOR_reload_noff_load
33356 : CODE_FOR_reload_noff_store
);
33357 /* Add the cost of moving address to a temporary. */
33358 sri
->extra_cost
= 1;
33363 /* QImode spills from non-QI registers require
33364 intermediate register on 32bit targets. */
33366 && !in_p
&& mode
== QImode
33367 && (rclass
== GENERAL_REGS
33368 || rclass
== LEGACY_REGS
33369 || rclass
== NON_Q_REGS
33372 || rclass
== INDEX_REGS
))
33381 if (regno
>= FIRST_PSEUDO_REGISTER
|| GET_CODE (x
) == SUBREG
)
33382 regno
= true_regnum (x
);
33384 /* Return Q_REGS if the operand is in memory. */
33389 /* This condition handles corner case where an expression involving
33390 pointers gets vectorized. We're trying to use the address of a
33391 stack slot as a vector initializer.
33393 (set (reg:V2DI 74 [ vect_cst_.2 ])
33394 (vec_duplicate:V2DI (reg/f:DI 20 frame)))
33396 Eventually frame gets turned into sp+offset like this:
33398 (set (reg:V2DI 21 xmm0 [orig:74 vect_cst_.2 ] [74])
33399 (vec_duplicate:V2DI (plus:DI (reg/f:DI 7 sp)
33400 (const_int 392 [0x188]))))
33402 That later gets turned into:
33404 (set (reg:V2DI 21 xmm0 [orig:74 vect_cst_.2 ] [74])
33405 (vec_duplicate:V2DI (plus:DI (reg/f:DI 7 sp)
33406 (mem/u/c/i:DI (symbol_ref/u:DI ("*.LC0") [flags 0x2]) [0 S8 A64]))))
33408 We'll have the following reload recorded:
33410 Reload 0: reload_in (DI) =
33411 (plus:DI (reg/f:DI 7 sp)
33412 (mem/u/c/i:DI (symbol_ref/u:DI ("*.LC0") [flags 0x2]) [0 S8 A64]))
33413 reload_out (V2DI) = (reg:V2DI 21 xmm0 [orig:74 vect_cst_.2 ] [74])
33414 SSE_REGS, RELOAD_OTHER (opnum = 0), can't combine
33415 reload_in_reg: (plus:DI (reg/f:DI 7 sp) (const_int 392 [0x188]))
33416 reload_out_reg: (reg:V2DI 21 xmm0 [orig:74 vect_cst_.2 ] [74])
33417 reload_reg_rtx: (reg:V2DI 22 xmm1)
33419 Which isn't going to work since SSE instructions can't handle scalar
33420 additions. Returning GENERAL_REGS forces the addition into integer
33421 register and reload can handle subsequent reloads without problems. */
33423 if (in_p
&& GET_CODE (x
) == PLUS
33424 && SSE_CLASS_P (rclass
)
33425 && SCALAR_INT_MODE_P (mode
))
33426 return GENERAL_REGS
;
33431 /* Implement TARGET_CLASS_LIKELY_SPILLED_P. */
33434 ix86_class_likely_spilled_p (reg_class_t rclass
)
33445 case SSE_FIRST_REG
:
33447 case FP_SECOND_REG
:
33457 /* If we are copying between general and FP registers, we need a memory
33458 location. The same is true for SSE and MMX registers.
33460 To optimize register_move_cost performance, allow inline variant.
33462 The macro can't work reliably when one of the CLASSES is class containing
33463 registers from multiple units (SSE, MMX, integer). We avoid this by never
33464 combining those units in single alternative in the machine description.
33465 Ensure that this constraint holds to avoid unexpected surprises.
33467 When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not
33468 enforce these sanity checks. */
33471 inline_secondary_memory_needed (enum reg_class class1
, enum reg_class class2
,
33472 enum machine_mode mode
, int strict
)
33474 if (MAYBE_FLOAT_CLASS_P (class1
) != FLOAT_CLASS_P (class1
)
33475 || MAYBE_FLOAT_CLASS_P (class2
) != FLOAT_CLASS_P (class2
)
33476 || MAYBE_SSE_CLASS_P (class1
) != SSE_CLASS_P (class1
)
33477 || MAYBE_SSE_CLASS_P (class2
) != SSE_CLASS_P (class2
)
33478 || MAYBE_MMX_CLASS_P (class1
) != MMX_CLASS_P (class1
)
33479 || MAYBE_MMX_CLASS_P (class2
) != MMX_CLASS_P (class2
))
33481 gcc_assert (!strict
|| lra_in_progress
);
33485 if (FLOAT_CLASS_P (class1
) != FLOAT_CLASS_P (class2
))
33488 /* ??? This is a lie. We do have moves between mmx/general, and for
33489 mmx/sse2. But by saying we need secondary memory we discourage the
33490 register allocator from using the mmx registers unless needed. */
33491 if (MMX_CLASS_P (class1
) != MMX_CLASS_P (class2
))
33494 if (SSE_CLASS_P (class1
) != SSE_CLASS_P (class2
))
33496 /* SSE1 doesn't have any direct moves from other classes. */
33500 /* If the target says that inter-unit moves are more expensive
33501 than moving through memory, then don't generate them. */
33502 if (!TARGET_INTER_UNIT_MOVES
)
33505 /* Between SSE and general, we have moves no larger than word size. */
33506 if (GET_MODE_SIZE (mode
) > UNITS_PER_WORD
)
33514 ix86_secondary_memory_needed (enum reg_class class1
, enum reg_class class2
,
33515 enum machine_mode mode
, int strict
)
33517 return inline_secondary_memory_needed (class1
, class2
, mode
, strict
);
33520 /* Implement the TARGET_CLASS_MAX_NREGS hook.
33522 On the 80386, this is the size of MODE in words,
33523 except in the FP regs, where a single reg is always enough. */
33525 static unsigned char
33526 ix86_class_max_nregs (reg_class_t rclass
, enum machine_mode mode
)
33528 if (MAYBE_INTEGER_CLASS_P (rclass
))
33530 if (mode
== XFmode
)
33531 return (TARGET_64BIT
? 2 : 3);
33532 else if (mode
== XCmode
)
33533 return (TARGET_64BIT
? 4 : 6);
33535 return ((GET_MODE_SIZE (mode
) + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
);
33539 if (COMPLEX_MODE_P (mode
))
33546 /* Return true if the registers in CLASS cannot represent the change from
33547 modes FROM to TO. */
33550 ix86_cannot_change_mode_class (enum machine_mode from
, enum machine_mode to
,
33551 enum reg_class regclass
)
33556 /* x87 registers can't do subreg at all, as all values are reformatted
33557 to extended precision. */
33558 if (MAYBE_FLOAT_CLASS_P (regclass
))
33561 if (MAYBE_SSE_CLASS_P (regclass
) || MAYBE_MMX_CLASS_P (regclass
))
33563 /* Vector registers do not support QI or HImode loads. If we don't
33564 disallow a change to these modes, reload will assume it's ok to
33565 drop the subreg from (subreg:SI (reg:HI 100) 0). This affects
33566 the vec_dupv4hi pattern. */
33567 if (GET_MODE_SIZE (from
) < 4)
33570 /* Vector registers do not support subreg with nonzero offsets, which
33571 are otherwise valid for integer registers. Since we can't see
33572 whether we have a nonzero offset from here, prohibit all
33573 nonparadoxical subregs changing size. */
33574 if (GET_MODE_SIZE (to
) < GET_MODE_SIZE (from
))
33581 /* Return the cost of moving data of mode M between a
33582 register and memory. A value of 2 is the default; this cost is
33583 relative to those in `REGISTER_MOVE_COST'.
33585 This function is used extensively by register_move_cost that is used to
33586 build tables at startup. Make it inline in this case.
33587 When IN is 2, return maximum of in and out move cost.
33589 If moving between registers and memory is more expensive than
33590 between two registers, you should define this macro to express the
33593 Model also increased moving costs of QImode registers in non
33597 inline_memory_move_cost (enum machine_mode mode
, enum reg_class regclass
,
33601 if (FLOAT_CLASS_P (regclass
))
33619 return MAX (ix86_cost
->fp_load
[index
], ix86_cost
->fp_store
[index
]);
33620 return in
? ix86_cost
->fp_load
[index
] : ix86_cost
->fp_store
[index
];
33622 if (SSE_CLASS_P (regclass
))
33625 switch (GET_MODE_SIZE (mode
))
33640 return MAX (ix86_cost
->sse_load
[index
], ix86_cost
->sse_store
[index
]);
33641 return in
? ix86_cost
->sse_load
[index
] : ix86_cost
->sse_store
[index
];
33643 if (MMX_CLASS_P (regclass
))
33646 switch (GET_MODE_SIZE (mode
))
33658 return MAX (ix86_cost
->mmx_load
[index
], ix86_cost
->mmx_store
[index
]);
33659 return in
? ix86_cost
->mmx_load
[index
] : ix86_cost
->mmx_store
[index
];
33661 switch (GET_MODE_SIZE (mode
))
33664 if (Q_CLASS_P (regclass
) || TARGET_64BIT
)
33667 return ix86_cost
->int_store
[0];
33668 if (TARGET_PARTIAL_REG_DEPENDENCY
33669 && optimize_function_for_speed_p (cfun
))
33670 cost
= ix86_cost
->movzbl_load
;
33672 cost
= ix86_cost
->int_load
[0];
33674 return MAX (cost
, ix86_cost
->int_store
[0]);
33680 return MAX (ix86_cost
->movzbl_load
, ix86_cost
->int_store
[0] + 4);
33682 return ix86_cost
->movzbl_load
;
33684 return ix86_cost
->int_store
[0] + 4;
33689 return MAX (ix86_cost
->int_load
[1], ix86_cost
->int_store
[1]);
33690 return in
? ix86_cost
->int_load
[1] : ix86_cost
->int_store
[1];
33692 /* Compute number of 32bit moves needed. TFmode is moved as XFmode. */
33693 if (mode
== TFmode
)
33696 cost
= MAX (ix86_cost
->int_load
[2] , ix86_cost
->int_store
[2]);
33698 cost
= ix86_cost
->int_load
[2];
33700 cost
= ix86_cost
->int_store
[2];
33701 return (cost
* (((int) GET_MODE_SIZE (mode
)
33702 + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
));
33707 ix86_memory_move_cost (enum machine_mode mode
, reg_class_t regclass
,
33710 return inline_memory_move_cost (mode
, (enum reg_class
) regclass
, in
? 1 : 0);
33714 /* Return the cost of moving data from a register in class CLASS1 to
33715 one in class CLASS2.
33717 It is not required that the cost always equal 2 when FROM is the same as TO;
33718 on some machines it is expensive to move between registers if they are not
33719 general registers. */
33722 ix86_register_move_cost (enum machine_mode mode
, reg_class_t class1_i
,
33723 reg_class_t class2_i
)
33725 enum reg_class class1
= (enum reg_class
) class1_i
;
33726 enum reg_class class2
= (enum reg_class
) class2_i
;
33728 /* In case we require secondary memory, compute cost of the store followed
33729 by load. In order to avoid bad register allocation choices, we need
33730 for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */
33732 if (inline_secondary_memory_needed (class1
, class2
, mode
, 0))
33736 cost
+= inline_memory_move_cost (mode
, class1
, 2);
33737 cost
+= inline_memory_move_cost (mode
, class2
, 2);
33739 /* In case of copying from general_purpose_register we may emit multiple
33740 stores followed by single load causing memory size mismatch stall.
33741 Count this as arbitrarily high cost of 20. */
33742 if (targetm
.class_max_nregs (class1
, mode
)
33743 > targetm
.class_max_nregs (class2
, mode
))
33746 /* In the case of FP/MMX moves, the registers actually overlap, and we
33747 have to switch modes in order to treat them differently. */
33748 if ((MMX_CLASS_P (class1
) && MAYBE_FLOAT_CLASS_P (class2
))
33749 || (MMX_CLASS_P (class2
) && MAYBE_FLOAT_CLASS_P (class1
)))
33755 /* Moves between SSE/MMX and integer unit are expensive. */
33756 if (MMX_CLASS_P (class1
) != MMX_CLASS_P (class2
)
33757 || SSE_CLASS_P (class1
) != SSE_CLASS_P (class2
))
33759 /* ??? By keeping returned value relatively high, we limit the number
33760 of moves between integer and MMX/SSE registers for all targets.
33761 Additionally, high value prevents problem with x86_modes_tieable_p(),
33762 where integer modes in MMX/SSE registers are not tieable
33763 because of missing QImode and HImode moves to, from or between
33764 MMX/SSE registers. */
33765 return MAX (8, ix86_cost
->mmxsse_to_integer
);
33767 if (MAYBE_FLOAT_CLASS_P (class1
))
33768 return ix86_cost
->fp_move
;
33769 if (MAYBE_SSE_CLASS_P (class1
))
33770 return ix86_cost
->sse_move
;
33771 if (MAYBE_MMX_CLASS_P (class1
))
33772 return ix86_cost
->mmx_move
;
33776 /* Return TRUE if hard register REGNO can hold a value of machine-mode
33780 ix86_hard_regno_mode_ok (int regno
, enum machine_mode mode
)
33782 /* Flags and only flags can only hold CCmode values. */
33783 if (CC_REGNO_P (regno
))
33784 return GET_MODE_CLASS (mode
) == MODE_CC
;
33785 if (GET_MODE_CLASS (mode
) == MODE_CC
33786 || GET_MODE_CLASS (mode
) == MODE_RANDOM
33787 || GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
33789 if (STACK_REGNO_P (regno
))
33790 return VALID_FP_MODE_P (mode
);
33791 if (SSE_REGNO_P (regno
))
33793 /* We implement the move patterns for all vector modes into and
33794 out of SSE registers, even when no operation instructions
33795 are available. OImode move is available only when AVX is
33797 return ((TARGET_AVX
&& mode
== OImode
)
33798 || VALID_AVX256_REG_MODE (mode
)
33799 || VALID_SSE_REG_MODE (mode
)
33800 || VALID_SSE2_REG_MODE (mode
)
33801 || VALID_MMX_REG_MODE (mode
)
33802 || VALID_MMX_REG_MODE_3DNOW (mode
));
33804 if (MMX_REGNO_P (regno
))
33806 /* We implement the move patterns for 3DNOW modes even in MMX mode,
33807 so if the register is available at all, then we can move data of
33808 the given mode into or out of it. */
33809 return (VALID_MMX_REG_MODE (mode
)
33810 || VALID_MMX_REG_MODE_3DNOW (mode
));
33813 if (mode
== QImode
)
33815 /* Take care for QImode values - they can be in non-QI regs,
33816 but then they do cause partial register stalls. */
33817 if (TARGET_64BIT
|| QI_REGNO_P (regno
))
33819 if (!TARGET_PARTIAL_REG_STALL
)
33821 return !can_create_pseudo_p ();
33823 /* We handle both integer and floats in the general purpose registers. */
33824 else if (VALID_INT_MODE_P (mode
))
33826 else if (VALID_FP_MODE_P (mode
))
33828 else if (VALID_DFP_MODE_P (mode
))
33830 /* Lots of MMX code casts 8 byte vector modes to DImode. If we then go
33831 on to use that value in smaller contexts, this can easily force a
33832 pseudo to be allocated to GENERAL_REGS. Since this is no worse than
33833 supporting DImode, allow it. */
33834 else if (VALID_MMX_REG_MODE_3DNOW (mode
) || VALID_MMX_REG_MODE (mode
))
33840 /* A subroutine of ix86_modes_tieable_p. Return true if MODE is a
33841 tieable integer mode. */
33844 ix86_tieable_integer_mode_p (enum machine_mode mode
)
33853 return TARGET_64BIT
|| !TARGET_PARTIAL_REG_STALL
;
33856 return TARGET_64BIT
;
33863 /* Return true if MODE1 is accessible in a register that can hold MODE2
33864 without copying. That is, all register classes that can hold MODE2
33865 can also hold MODE1. */
33868 ix86_modes_tieable_p (enum machine_mode mode1
, enum machine_mode mode2
)
33870 if (mode1
== mode2
)
33873 if (ix86_tieable_integer_mode_p (mode1
)
33874 && ix86_tieable_integer_mode_p (mode2
))
33877 /* MODE2 being XFmode implies fp stack or general regs, which means we
33878 can tie any smaller floating point modes to it. Note that we do not
33879 tie this with TFmode. */
33880 if (mode2
== XFmode
)
33881 return mode1
== SFmode
|| mode1
== DFmode
;
33883 /* MODE2 being DFmode implies fp stack, general or sse regs, which means
33884 that we can tie it with SFmode. */
33885 if (mode2
== DFmode
)
33886 return mode1
== SFmode
;
33888 /* If MODE2 is only appropriate for an SSE register, then tie with
33889 any other mode acceptable to SSE registers. */
33890 if (GET_MODE_SIZE (mode2
) == 32
33891 && ix86_hard_regno_mode_ok (FIRST_SSE_REG
, mode2
))
33892 return (GET_MODE_SIZE (mode1
) == 32
33893 && ix86_hard_regno_mode_ok (FIRST_SSE_REG
, mode1
));
33894 if (GET_MODE_SIZE (mode2
) == 16
33895 && ix86_hard_regno_mode_ok (FIRST_SSE_REG
, mode2
))
33896 return (GET_MODE_SIZE (mode1
) == 16
33897 && ix86_hard_regno_mode_ok (FIRST_SSE_REG
, mode1
));
33899 /* If MODE2 is appropriate for an MMX register, then tie
33900 with any other mode acceptable to MMX registers. */
33901 if (GET_MODE_SIZE (mode2
) == 8
33902 && ix86_hard_regno_mode_ok (FIRST_MMX_REG
, mode2
))
33903 return (GET_MODE_SIZE (mode1
) == 8
33904 && ix86_hard_regno_mode_ok (FIRST_MMX_REG
, mode1
));
33909 /* Return the cost of moving between two registers of mode MODE. */
33912 ix86_set_reg_reg_cost (enum machine_mode mode
)
33914 unsigned int units
= UNITS_PER_WORD
;
33916 switch (GET_MODE_CLASS (mode
))
33922 units
= GET_MODE_SIZE (CCmode
);
33926 if ((TARGET_SSE
&& mode
== TFmode
)
33927 || (TARGET_80387
&& mode
== XFmode
)
33928 || ((TARGET_80387
|| TARGET_SSE2
) && mode
== DFmode
)
33929 || ((TARGET_80387
|| TARGET_SSE
) && mode
== SFmode
))
33930 units
= GET_MODE_SIZE (mode
);
33933 case MODE_COMPLEX_FLOAT
:
33934 if ((TARGET_SSE
&& mode
== TCmode
)
33935 || (TARGET_80387
&& mode
== XCmode
)
33936 || ((TARGET_80387
|| TARGET_SSE2
) && mode
== DCmode
)
33937 || ((TARGET_80387
|| TARGET_SSE
) && mode
== SCmode
))
33938 units
= GET_MODE_SIZE (mode
);
33941 case MODE_VECTOR_INT
:
33942 case MODE_VECTOR_FLOAT
:
33943 if ((TARGET_AVX
&& VALID_AVX256_REG_MODE (mode
))
33944 || (TARGET_SSE2
&& VALID_SSE2_REG_MODE (mode
))
33945 || (TARGET_SSE
&& VALID_SSE_REG_MODE (mode
))
33946 || (TARGET_MMX
&& VALID_MMX_REG_MODE (mode
)))
33947 units
= GET_MODE_SIZE (mode
);
33950 /* Return the cost of moving between two registers of mode MODE,
33951 assuming that the move will be in pieces of at most UNITS bytes. */
33952 return COSTS_N_INSNS ((GET_MODE_SIZE (mode
) + units
- 1) / units
);
33955 /* Compute a (partial) cost for rtx X. Return true if the complete
33956 cost has been computed, and false if subexpressions should be
33957 scanned. In either case, *TOTAL contains the cost result. */
33960 ix86_rtx_costs (rtx x
, int code_i
, int outer_code_i
, int opno
, int *total
,
33963 enum rtx_code code
= (enum rtx_code
) code_i
;
33964 enum rtx_code outer_code
= (enum rtx_code
) outer_code_i
;
33965 enum machine_mode mode
= GET_MODE (x
);
33966 const struct processor_costs
*cost
= speed
? ix86_cost
: &ix86_size_cost
;
33971 if (register_operand (SET_DEST (x
), VOIDmode
)
33972 && reg_or_0_operand (SET_SRC (x
), VOIDmode
))
33974 *total
= ix86_set_reg_reg_cost (GET_MODE (SET_DEST (x
)));
33983 if (TARGET_64BIT
&& !x86_64_immediate_operand (x
, VOIDmode
))
33985 else if (TARGET_64BIT
&& !x86_64_zext_immediate_operand (x
, VOIDmode
))
33987 else if (flag_pic
&& SYMBOLIC_CONST (x
)
33989 || (!GET_CODE (x
) != LABEL_REF
33990 && (GET_CODE (x
) != SYMBOL_REF
33991 || !SYMBOL_REF_LOCAL_P (x
)))))
33998 if (mode
== VOIDmode
)
34003 switch (standard_80387_constant_p (x
))
34008 default: /* Other constants */
34015 if (SSE_FLOAT_MODE_P (mode
))
34018 switch (standard_sse_constant_p (x
))
34022 case 1: /* 0: xor eliminates false dependency */
34025 default: /* -1: cmp contains false dependency */
34030 /* Fall back to (MEM (SYMBOL_REF)), since that's where
34031 it'll probably end up. Add a penalty for size. */
34032 *total
= (COSTS_N_INSNS (1)
34033 + (flag_pic
!= 0 && !TARGET_64BIT
)
34034 + (mode
== SFmode
? 0 : mode
== DFmode
? 1 : 2));
34038 /* The zero extensions is often completely free on x86_64, so make
34039 it as cheap as possible. */
34040 if (TARGET_64BIT
&& mode
== DImode
34041 && GET_MODE (XEXP (x
, 0)) == SImode
)
34043 else if (TARGET_ZERO_EXTEND_WITH_AND
)
34044 *total
= cost
->add
;
34046 *total
= cost
->movzx
;
34050 *total
= cost
->movsx
;
34054 if (SCALAR_INT_MODE_P (mode
)
34055 && GET_MODE_SIZE (mode
) < UNITS_PER_WORD
34056 && CONST_INT_P (XEXP (x
, 1)))
34058 HOST_WIDE_INT value
= INTVAL (XEXP (x
, 1));
34061 *total
= cost
->add
;
34064 if ((value
== 2 || value
== 3)
34065 && cost
->lea
<= cost
->shift_const
)
34067 *total
= cost
->lea
;
34077 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
34079 /* ??? Should be SSE vector operation cost. */
34080 /* At least for published AMD latencies, this really is the same
34081 as the latency for a simple fpu operation like fabs. */
34082 /* V*QImode is emulated with 1-11 insns. */
34083 if (mode
== V16QImode
|| mode
== V32QImode
)
34086 if (TARGET_XOP
&& mode
== V16QImode
)
34088 /* For XOP we use vpshab, which requires a broadcast of the
34089 value to the variable shift insn. For constants this
34090 means a V16Q const in mem; even when we can perform the
34091 shift with one insn set the cost to prefer paddb. */
34092 if (CONSTANT_P (XEXP (x
, 1)))
34094 *total
= (cost
->fabs
34095 + rtx_cost (XEXP (x
, 0), code
, 0, speed
)
34096 + (speed
? 2 : COSTS_N_BYTES (16)));
34101 else if (TARGET_SSSE3
)
34103 *total
= cost
->fabs
* count
;
34106 *total
= cost
->fabs
;
34108 else if (GET_MODE_SIZE (mode
) > UNITS_PER_WORD
)
34110 if (CONST_INT_P (XEXP (x
, 1)))
34112 if (INTVAL (XEXP (x
, 1)) > 32)
34113 *total
= cost
->shift_const
+ COSTS_N_INSNS (2);
34115 *total
= cost
->shift_const
* 2;
34119 if (GET_CODE (XEXP (x
, 1)) == AND
)
34120 *total
= cost
->shift_var
* 2;
34122 *total
= cost
->shift_var
* 6 + COSTS_N_INSNS (2);
34127 if (CONST_INT_P (XEXP (x
, 1)))
34128 *total
= cost
->shift_const
;
34130 *total
= cost
->shift_var
;
34138 gcc_assert (FLOAT_MODE_P (mode
));
34139 gcc_assert (TARGET_FMA
|| TARGET_FMA4
);
34141 /* ??? SSE scalar/vector cost should be used here. */
34142 /* ??? Bald assumption that fma has the same cost as fmul. */
34143 *total
= cost
->fmul
;
34144 *total
+= rtx_cost (XEXP (x
, 1), FMA
, 1, speed
);
34146 /* Negate in op0 or op2 is free: FMS, FNMA, FNMS. */
34148 if (GET_CODE (sub
) == NEG
)
34149 sub
= XEXP (sub
, 0);
34150 *total
+= rtx_cost (sub
, FMA
, 0, speed
);
34153 if (GET_CODE (sub
) == NEG
)
34154 sub
= XEXP (sub
, 0);
34155 *total
+= rtx_cost (sub
, FMA
, 2, speed
);
34160 if (SSE_FLOAT_MODE_P (mode
) && TARGET_SSE_MATH
)
34162 /* ??? SSE scalar cost should be used here. */
34163 *total
= cost
->fmul
;
34166 else if (X87_FLOAT_MODE_P (mode
))
34168 *total
= cost
->fmul
;
34171 else if (FLOAT_MODE_P (mode
))
34173 /* ??? SSE vector cost should be used here. */
34174 *total
= cost
->fmul
;
34177 else if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
34179 /* V*QImode is emulated with 7-13 insns. */
34180 if (mode
== V16QImode
|| mode
== V32QImode
)
34183 if (TARGET_XOP
&& mode
== V16QImode
)
34185 else if (TARGET_SSSE3
)
34187 *total
= cost
->fmul
* 2 + cost
->fabs
* extra
;
34189 /* V*DImode is emulated with 5-8 insns. */
34190 else if (mode
== V2DImode
|| mode
== V4DImode
)
34192 if (TARGET_XOP
&& mode
== V2DImode
)
34193 *total
= cost
->fmul
* 2 + cost
->fabs
* 3;
34195 *total
= cost
->fmul
* 3 + cost
->fabs
* 5;
34197 /* Without sse4.1, we don't have PMULLD; it's emulated with 7
34198 insns, including two PMULUDQ. */
34199 else if (mode
== V4SImode
&& !(TARGET_SSE4_1
|| TARGET_AVX
))
34200 *total
= cost
->fmul
* 2 + cost
->fabs
* 5;
34202 *total
= cost
->fmul
;
34207 rtx op0
= XEXP (x
, 0);
34208 rtx op1
= XEXP (x
, 1);
34210 if (CONST_INT_P (XEXP (x
, 1)))
34212 unsigned HOST_WIDE_INT value
= INTVAL (XEXP (x
, 1));
34213 for (nbits
= 0; value
!= 0; value
&= value
- 1)
34217 /* This is arbitrary. */
34220 /* Compute costs correctly for widening multiplication. */
34221 if ((GET_CODE (op0
) == SIGN_EXTEND
|| GET_CODE (op0
) == ZERO_EXTEND
)
34222 && GET_MODE_SIZE (GET_MODE (XEXP (op0
, 0))) * 2
34223 == GET_MODE_SIZE (mode
))
34225 int is_mulwiden
= 0;
34226 enum machine_mode inner_mode
= GET_MODE (op0
);
34228 if (GET_CODE (op0
) == GET_CODE (op1
))
34229 is_mulwiden
= 1, op1
= XEXP (op1
, 0);
34230 else if (CONST_INT_P (op1
))
34232 if (GET_CODE (op0
) == SIGN_EXTEND
)
34233 is_mulwiden
= trunc_int_for_mode (INTVAL (op1
), inner_mode
)
34236 is_mulwiden
= !(INTVAL (op1
) & ~GET_MODE_MASK (inner_mode
));
34240 op0
= XEXP (op0
, 0), mode
= GET_MODE (op0
);
34243 *total
= (cost
->mult_init
[MODE_INDEX (mode
)]
34244 + nbits
* cost
->mult_bit
34245 + rtx_cost (op0
, outer_code
, opno
, speed
)
34246 + rtx_cost (op1
, outer_code
, opno
, speed
));
34255 if (SSE_FLOAT_MODE_P (mode
) && TARGET_SSE_MATH
)
34256 /* ??? SSE cost should be used here. */
34257 *total
= cost
->fdiv
;
34258 else if (X87_FLOAT_MODE_P (mode
))
34259 *total
= cost
->fdiv
;
34260 else if (FLOAT_MODE_P (mode
))
34261 /* ??? SSE vector cost should be used here. */
34262 *total
= cost
->fdiv
;
34264 *total
= cost
->divide
[MODE_INDEX (mode
)];
34268 if (GET_MODE_CLASS (mode
) == MODE_INT
34269 && GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
34271 if (GET_CODE (XEXP (x
, 0)) == PLUS
34272 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == MULT
34273 && CONST_INT_P (XEXP (XEXP (XEXP (x
, 0), 0), 1))
34274 && CONSTANT_P (XEXP (x
, 1)))
34276 HOST_WIDE_INT val
= INTVAL (XEXP (XEXP (XEXP (x
, 0), 0), 1));
34277 if (val
== 2 || val
== 4 || val
== 8)
34279 *total
= cost
->lea
;
34280 *total
+= rtx_cost (XEXP (XEXP (x
, 0), 1),
34281 outer_code
, opno
, speed
);
34282 *total
+= rtx_cost (XEXP (XEXP (XEXP (x
, 0), 0), 0),
34283 outer_code
, opno
, speed
);
34284 *total
+= rtx_cost (XEXP (x
, 1), outer_code
, opno
, speed
);
34288 else if (GET_CODE (XEXP (x
, 0)) == MULT
34289 && CONST_INT_P (XEXP (XEXP (x
, 0), 1)))
34291 HOST_WIDE_INT val
= INTVAL (XEXP (XEXP (x
, 0), 1));
34292 if (val
== 2 || val
== 4 || val
== 8)
34294 *total
= cost
->lea
;
34295 *total
+= rtx_cost (XEXP (XEXP (x
, 0), 0),
34296 outer_code
, opno
, speed
);
34297 *total
+= rtx_cost (XEXP (x
, 1), outer_code
, opno
, speed
);
34301 else if (GET_CODE (XEXP (x
, 0)) == PLUS
)
34303 *total
= cost
->lea
;
34304 *total
+= rtx_cost (XEXP (XEXP (x
, 0), 0),
34305 outer_code
, opno
, speed
);
34306 *total
+= rtx_cost (XEXP (XEXP (x
, 0), 1),
34307 outer_code
, opno
, speed
);
34308 *total
+= rtx_cost (XEXP (x
, 1), outer_code
, opno
, speed
);
34315 if (SSE_FLOAT_MODE_P (mode
) && TARGET_SSE_MATH
)
34317 /* ??? SSE cost should be used here. */
34318 *total
= cost
->fadd
;
34321 else if (X87_FLOAT_MODE_P (mode
))
34323 *total
= cost
->fadd
;
34326 else if (FLOAT_MODE_P (mode
))
34328 /* ??? SSE vector cost should be used here. */
34329 *total
= cost
->fadd
;
34337 if (GET_MODE_CLASS (mode
) == MODE_INT
34338 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
)
34340 *total
= (cost
->add
* 2
34341 + (rtx_cost (XEXP (x
, 0), outer_code
, opno
, speed
)
34342 << (GET_MODE (XEXP (x
, 0)) != DImode
))
34343 + (rtx_cost (XEXP (x
, 1), outer_code
, opno
, speed
)
34344 << (GET_MODE (XEXP (x
, 1)) != DImode
)));
34350 if (SSE_FLOAT_MODE_P (mode
) && TARGET_SSE_MATH
)
34352 /* ??? SSE cost should be used here. */
34353 *total
= cost
->fchs
;
34356 else if (X87_FLOAT_MODE_P (mode
))
34358 *total
= cost
->fchs
;
34361 else if (FLOAT_MODE_P (mode
))
34363 /* ??? SSE vector cost should be used here. */
34364 *total
= cost
->fchs
;
34370 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
34372 /* ??? Should be SSE vector operation cost. */
34373 /* At least for published AMD latencies, this really is the same
34374 as the latency for a simple fpu operation like fabs. */
34375 *total
= cost
->fabs
;
34377 else if (GET_MODE_SIZE (mode
) > UNITS_PER_WORD
)
34378 *total
= cost
->add
* 2;
34380 *total
= cost
->add
;
34384 if (GET_CODE (XEXP (x
, 0)) == ZERO_EXTRACT
34385 && XEXP (XEXP (x
, 0), 1) == const1_rtx
34386 && CONST_INT_P (XEXP (XEXP (x
, 0), 2))
34387 && XEXP (x
, 1) == const0_rtx
)
34389 /* This kind of construct is implemented using test[bwl].
34390 Treat it as if we had an AND. */
34391 *total
= (cost
->add
34392 + rtx_cost (XEXP (XEXP (x
, 0), 0), outer_code
, opno
, speed
)
34393 + rtx_cost (const1_rtx
, outer_code
, opno
, speed
));
34399 if (!(SSE_FLOAT_MODE_P (mode
) && TARGET_SSE_MATH
))
34404 if (SSE_FLOAT_MODE_P (mode
) && TARGET_SSE_MATH
)
34405 /* ??? SSE cost should be used here. */
34406 *total
= cost
->fabs
;
34407 else if (X87_FLOAT_MODE_P (mode
))
34408 *total
= cost
->fabs
;
34409 else if (FLOAT_MODE_P (mode
))
34410 /* ??? SSE vector cost should be used here. */
34411 *total
= cost
->fabs
;
34415 if (SSE_FLOAT_MODE_P (mode
) && TARGET_SSE_MATH
)
34416 /* ??? SSE cost should be used here. */
34417 *total
= cost
->fsqrt
;
34418 else if (X87_FLOAT_MODE_P (mode
))
34419 *total
= cost
->fsqrt
;
34420 else if (FLOAT_MODE_P (mode
))
34421 /* ??? SSE vector cost should be used here. */
34422 *total
= cost
->fsqrt
;
34426 if (XINT (x
, 1) == UNSPEC_TP
)
34433 case VEC_DUPLICATE
:
34434 /* ??? Assume all of these vector manipulation patterns are
34435 recognizable. In which case they all pretty much have the
34437 *total
= cost
->fabs
;
34447 static int current_machopic_label_num
;
34449 /* Given a symbol name and its associated stub, write out the
34450 definition of the stub. */
34453 machopic_output_stub (FILE *file
, const char *symb
, const char *stub
)
34455 unsigned int length
;
34456 char *binder_name
, *symbol_name
, lazy_ptr_name
[32];
34457 int label
= ++current_machopic_label_num
;
34459 /* For 64-bit we shouldn't get here. */
34460 gcc_assert (!TARGET_64BIT
);
34462 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
34463 symb
= targetm
.strip_name_encoding (symb
);
34465 length
= strlen (stub
);
34466 binder_name
= XALLOCAVEC (char, length
+ 32);
34467 GEN_BINDER_NAME_FOR_STUB (binder_name
, stub
, length
);
34469 length
= strlen (symb
);
34470 symbol_name
= XALLOCAVEC (char, length
+ 32);
34471 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name
, symb
, length
);
34473 sprintf (lazy_ptr_name
, "L%d$lz", label
);
34475 if (MACHOPIC_ATT_STUB
)
34476 switch_to_section (darwin_sections
[machopic_picsymbol_stub3_section
]);
34477 else if (MACHOPIC_PURE
)
34478 switch_to_section (darwin_sections
[machopic_picsymbol_stub2_section
]);
34480 switch_to_section (darwin_sections
[machopic_symbol_stub_section
]);
34482 fprintf (file
, "%s:\n", stub
);
34483 fprintf (file
, "\t.indirect_symbol %s\n", symbol_name
);
34485 if (MACHOPIC_ATT_STUB
)
34487 fprintf (file
, "\thlt ; hlt ; hlt ; hlt ; hlt\n");
34489 else if (MACHOPIC_PURE
)
34492 /* 25-byte PIC stub using "CALL get_pc_thunk". */
34493 rtx tmp
= gen_rtx_REG (SImode
, 2 /* ECX */);
34494 output_set_got (tmp
, NULL_RTX
); /* "CALL ___<cpu>.get_pc_thunk.cx". */
34495 fprintf (file
, "LPC$%d:\tmovl\t%s-LPC$%d(%%ecx),%%ecx\n",
34496 label
, lazy_ptr_name
, label
);
34497 fprintf (file
, "\tjmp\t*%%ecx\n");
34500 fprintf (file
, "\tjmp\t*%s\n", lazy_ptr_name
);
34502 /* The AT&T-style ("self-modifying") stub is not lazily bound, thus
34503 it needs no stub-binding-helper. */
34504 if (MACHOPIC_ATT_STUB
)
34507 fprintf (file
, "%s:\n", binder_name
);
34511 fprintf (file
, "\tlea\t%s-%s(%%ecx),%%ecx\n", lazy_ptr_name
, binder_name
);
34512 fprintf (file
, "\tpushl\t%%ecx\n");
34515 fprintf (file
, "\tpushl\t$%s\n", lazy_ptr_name
);
34517 fputs ("\tjmp\tdyld_stub_binding_helper\n", file
);
34519 /* N.B. Keep the correspondence of these
34520 'symbol_ptr/symbol_ptr2/symbol_ptr3' sections consistent with the
34521 old-pic/new-pic/non-pic stubs; altering this will break
34522 compatibility with existing dylibs. */
34525 /* 25-byte PIC stub using "CALL get_pc_thunk". */
34526 switch_to_section (darwin_sections
[machopic_lazy_symbol_ptr2_section
]);
34529 /* 16-byte -mdynamic-no-pic stub. */
34530 switch_to_section(darwin_sections
[machopic_lazy_symbol_ptr3_section
]);
34532 fprintf (file
, "%s:\n", lazy_ptr_name
);
34533 fprintf (file
, "\t.indirect_symbol %s\n", symbol_name
);
34534 fprintf (file
, ASM_LONG
"%s\n", binder_name
);
34536 #endif /* TARGET_MACHO */
34538 /* Order the registers for register allocator. */
34541 x86_order_regs_for_local_alloc (void)
34546 /* First allocate the local general purpose registers. */
34547 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
34548 if (GENERAL_REGNO_P (i
) && call_used_regs
[i
])
34549 reg_alloc_order
[pos
++] = i
;
34551 /* Global general purpose registers. */
34552 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
34553 if (GENERAL_REGNO_P (i
) && !call_used_regs
[i
])
34554 reg_alloc_order
[pos
++] = i
;
34556 /* x87 registers come first in case we are doing FP math
34558 if (!TARGET_SSE_MATH
)
34559 for (i
= FIRST_STACK_REG
; i
<= LAST_STACK_REG
; i
++)
34560 reg_alloc_order
[pos
++] = i
;
34562 /* SSE registers. */
34563 for (i
= FIRST_SSE_REG
; i
<= LAST_SSE_REG
; i
++)
34564 reg_alloc_order
[pos
++] = i
;
34565 for (i
= FIRST_REX_SSE_REG
; i
<= LAST_REX_SSE_REG
; i
++)
34566 reg_alloc_order
[pos
++] = i
;
34568 /* x87 registers. */
34569 if (TARGET_SSE_MATH
)
34570 for (i
= FIRST_STACK_REG
; i
<= LAST_STACK_REG
; i
++)
34571 reg_alloc_order
[pos
++] = i
;
34573 for (i
= FIRST_MMX_REG
; i
<= LAST_MMX_REG
; i
++)
34574 reg_alloc_order
[pos
++] = i
;
34576 /* Initialize the rest of array as we do not allocate some registers
34578 while (pos
< FIRST_PSEUDO_REGISTER
)
34579 reg_alloc_order
[pos
++] = 0;
34582 /* Handle a "callee_pop_aggregate_return" attribute; arguments as
34583 in struct attribute_spec handler. */
34585 ix86_handle_callee_pop_aggregate_return (tree
*node
, tree name
,
34587 int flags ATTRIBUTE_UNUSED
,
34588 bool *no_add_attrs
)
34590 if (TREE_CODE (*node
) != FUNCTION_TYPE
34591 && TREE_CODE (*node
) != METHOD_TYPE
34592 && TREE_CODE (*node
) != FIELD_DECL
34593 && TREE_CODE (*node
) != TYPE_DECL
)
34595 warning (OPT_Wattributes
, "%qE attribute only applies to functions",
34597 *no_add_attrs
= true;
34602 warning (OPT_Wattributes
, "%qE attribute only available for 32-bit",
34604 *no_add_attrs
= true;
34607 if (is_attribute_p ("callee_pop_aggregate_return", name
))
34611 cst
= TREE_VALUE (args
);
34612 if (TREE_CODE (cst
) != INTEGER_CST
)
34614 warning (OPT_Wattributes
,
34615 "%qE attribute requires an integer constant argument",
34617 *no_add_attrs
= true;
34619 else if (compare_tree_int (cst
, 0) != 0
34620 && compare_tree_int (cst
, 1) != 0)
34622 warning (OPT_Wattributes
,
34623 "argument to %qE attribute is neither zero, nor one",
34625 *no_add_attrs
= true;
34634 /* Handle a "ms_abi" or "sysv" attribute; arguments as in
34635 struct attribute_spec.handler. */
34637 ix86_handle_abi_attribute (tree
*node
, tree name
,
34638 tree args ATTRIBUTE_UNUSED
,
34639 int flags ATTRIBUTE_UNUSED
, bool *no_add_attrs
)
34641 if (TREE_CODE (*node
) != FUNCTION_TYPE
34642 && TREE_CODE (*node
) != METHOD_TYPE
34643 && TREE_CODE (*node
) != FIELD_DECL
34644 && TREE_CODE (*node
) != TYPE_DECL
)
34646 warning (OPT_Wattributes
, "%qE attribute only applies to functions",
34648 *no_add_attrs
= true;
34652 /* Can combine regparm with all attributes but fastcall. */
34653 if (is_attribute_p ("ms_abi", name
))
34655 if (lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (*node
)))
34657 error ("ms_abi and sysv_abi attributes are not compatible");
34662 else if (is_attribute_p ("sysv_abi", name
))
34664 if (lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (*node
)))
34666 error ("ms_abi and sysv_abi attributes are not compatible");
34675 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
34676 struct attribute_spec.handler. */
34678 ix86_handle_struct_attribute (tree
*node
, tree name
,
34679 tree args ATTRIBUTE_UNUSED
,
34680 int flags ATTRIBUTE_UNUSED
, bool *no_add_attrs
)
34683 if (DECL_P (*node
))
34685 if (TREE_CODE (*node
) == TYPE_DECL
)
34686 type
= &TREE_TYPE (*node
);
34691 if (!(type
&& RECORD_OR_UNION_TYPE_P (*type
)))
34693 warning (OPT_Wattributes
, "%qE attribute ignored",
34695 *no_add_attrs
= true;
34698 else if ((is_attribute_p ("ms_struct", name
)
34699 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type
)))
34700 || ((is_attribute_p ("gcc_struct", name
)
34701 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type
)))))
34703 warning (OPT_Wattributes
, "%qE incompatible attribute ignored",
34705 *no_add_attrs
= true;
34712 ix86_handle_fndecl_attribute (tree
*node
, tree name
,
34713 tree args ATTRIBUTE_UNUSED
,
34714 int flags ATTRIBUTE_UNUSED
, bool *no_add_attrs
)
34716 if (TREE_CODE (*node
) != FUNCTION_DECL
)
34718 warning (OPT_Wattributes
, "%qE attribute only applies to functions",
34720 *no_add_attrs
= true;
34726 ix86_ms_bitfield_layout_p (const_tree record_type
)
34728 return ((TARGET_MS_BITFIELD_LAYOUT
34729 && !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type
)))
34730 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type
)));
34733 /* Returns an expression indicating where the this parameter is
34734 located on entry to the FUNCTION. */
34737 x86_this_parameter (tree function
)
34739 tree type
= TREE_TYPE (function
);
34740 bool aggr
= aggregate_value_p (TREE_TYPE (type
), type
) != 0;
34745 const int *parm_regs
;
34747 if (ix86_function_type_abi (type
) == MS_ABI
)
34748 parm_regs
= x86_64_ms_abi_int_parameter_registers
;
34750 parm_regs
= x86_64_int_parameter_registers
;
34751 return gen_rtx_REG (Pmode
, parm_regs
[aggr
]);
34754 nregs
= ix86_function_regparm (type
, function
);
34756 if (nregs
> 0 && !stdarg_p (type
))
34759 unsigned int ccvt
= ix86_get_callcvt (type
);
34761 if ((ccvt
& IX86_CALLCVT_FASTCALL
) != 0)
34762 regno
= aggr
? DX_REG
: CX_REG
;
34763 else if ((ccvt
& IX86_CALLCVT_THISCALL
) != 0)
34767 return gen_rtx_MEM (SImode
,
34768 plus_constant (Pmode
, stack_pointer_rtx
, 4));
34777 return gen_rtx_MEM (SImode
,
34778 plus_constant (Pmode
,
34779 stack_pointer_rtx
, 4));
34782 return gen_rtx_REG (SImode
, regno
);
34785 return gen_rtx_MEM (SImode
, plus_constant (Pmode
, stack_pointer_rtx
,
34789 /* Determine whether x86_output_mi_thunk can succeed. */
34792 x86_can_output_mi_thunk (const_tree thunk ATTRIBUTE_UNUSED
,
34793 HOST_WIDE_INT delta ATTRIBUTE_UNUSED
,
34794 HOST_WIDE_INT vcall_offset
, const_tree function
)
34796 /* 64-bit can handle anything. */
34800 /* For 32-bit, everything's fine if we have one free register. */
34801 if (ix86_function_regparm (TREE_TYPE (function
), function
) < 3)
34804 /* Need a free register for vcall_offset. */
34808 /* Need a free register for GOT references. */
34809 if (flag_pic
&& !targetm
.binds_local_p (function
))
34812 /* Otherwise ok. */
34816 /* Output the assembler code for a thunk function. THUNK_DECL is the
34817 declaration for the thunk function itself, FUNCTION is the decl for
34818 the target function. DELTA is an immediate constant offset to be
34819 added to THIS. If VCALL_OFFSET is nonzero, the word at
34820 *(*this + vcall_offset) should be added to THIS. */
34823 x86_output_mi_thunk (FILE *file
,
34824 tree thunk ATTRIBUTE_UNUSED
, HOST_WIDE_INT delta
,
34825 HOST_WIDE_INT vcall_offset
, tree function
)
34827 rtx this_param
= x86_this_parameter (function
);
34828 rtx this_reg
, tmp
, fnaddr
;
34829 unsigned int tmp_regno
;
34832 tmp_regno
= R10_REG
;
34835 unsigned int ccvt
= ix86_get_callcvt (TREE_TYPE (function
));
34836 if ((ccvt
& IX86_CALLCVT_FASTCALL
) != 0)
34837 tmp_regno
= AX_REG
;
34838 else if ((ccvt
& IX86_CALLCVT_THISCALL
) != 0)
34839 tmp_regno
= DX_REG
;
34841 tmp_regno
= CX_REG
;
34844 emit_note (NOTE_INSN_PROLOGUE_END
);
34846 /* If VCALL_OFFSET, we'll need THIS in a register. Might as well
34847 pull it in now and let DELTA benefit. */
34848 if (REG_P (this_param
))
34849 this_reg
= this_param
;
34850 else if (vcall_offset
)
34852 /* Put the this parameter into %eax. */
34853 this_reg
= gen_rtx_REG (Pmode
, AX_REG
);
34854 emit_move_insn (this_reg
, this_param
);
34857 this_reg
= NULL_RTX
;
34859 /* Adjust the this parameter by a fixed constant. */
34862 rtx delta_rtx
= GEN_INT (delta
);
34863 rtx delta_dst
= this_reg
? this_reg
: this_param
;
34867 if (!x86_64_general_operand (delta_rtx
, Pmode
))
34869 tmp
= gen_rtx_REG (Pmode
, tmp_regno
);
34870 emit_move_insn (tmp
, delta_rtx
);
34875 ix86_emit_binop (PLUS
, Pmode
, delta_dst
, delta_rtx
);
34878 /* Adjust the this parameter by a value stored in the vtable. */
34881 rtx vcall_addr
, vcall_mem
, this_mem
;
34883 tmp
= gen_rtx_REG (Pmode
, tmp_regno
);
34885 this_mem
= gen_rtx_MEM (ptr_mode
, this_reg
);
34886 if (Pmode
!= ptr_mode
)
34887 this_mem
= gen_rtx_ZERO_EXTEND (Pmode
, this_mem
);
34888 emit_move_insn (tmp
, this_mem
);
34890 /* Adjust the this parameter. */
34891 vcall_addr
= plus_constant (Pmode
, tmp
, vcall_offset
);
34893 && !ix86_legitimate_address_p (ptr_mode
, vcall_addr
, true))
34895 rtx tmp2
= gen_rtx_REG (Pmode
, R11_REG
);
34896 emit_move_insn (tmp2
, GEN_INT (vcall_offset
));
34897 vcall_addr
= gen_rtx_PLUS (Pmode
, tmp
, tmp2
);
34900 vcall_mem
= gen_rtx_MEM (ptr_mode
, vcall_addr
);
34901 if (Pmode
!= ptr_mode
)
34902 emit_insn (gen_addsi_1_zext (this_reg
,
34903 gen_rtx_REG (ptr_mode
,
34907 ix86_emit_binop (PLUS
, Pmode
, this_reg
, vcall_mem
);
34910 /* If necessary, drop THIS back to its stack slot. */
34911 if (this_reg
&& this_reg
!= this_param
)
34912 emit_move_insn (this_param
, this_reg
);
34914 fnaddr
= XEXP (DECL_RTL (function
), 0);
34917 if (!flag_pic
|| targetm
.binds_local_p (function
)
34918 || cfun
->machine
->call_abi
== MS_ABI
)
34922 tmp
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, fnaddr
), UNSPEC_GOTPCREL
);
34923 tmp
= gen_rtx_CONST (Pmode
, tmp
);
34924 fnaddr
= gen_rtx_MEM (Pmode
, tmp
);
34929 if (!flag_pic
|| targetm
.binds_local_p (function
))
34932 else if (TARGET_MACHO
)
34934 fnaddr
= machopic_indirect_call_target (DECL_RTL (function
));
34935 fnaddr
= XEXP (fnaddr
, 0);
34937 #endif /* TARGET_MACHO */
34940 tmp
= gen_rtx_REG (Pmode
, CX_REG
);
34941 output_set_got (tmp
, NULL_RTX
);
34943 fnaddr
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, fnaddr
), UNSPEC_GOT
);
34944 fnaddr
= gen_rtx_PLUS (Pmode
, fnaddr
, tmp
);
34945 fnaddr
= gen_rtx_MEM (Pmode
, fnaddr
);
34949 /* Our sibling call patterns do not allow memories, because we have no
34950 predicate that can distinguish between frame and non-frame memory.
34951 For our purposes here, we can get away with (ab)using a jump pattern,
34952 because we're going to do no optimization. */
34953 if (MEM_P (fnaddr
))
34954 emit_jump_insn (gen_indirect_jump (fnaddr
));
34957 if (ix86_cmodel
== CM_LARGE_PIC
&& SYMBOLIC_CONST (fnaddr
))
34958 fnaddr
= legitimize_pic_address (fnaddr
,
34959 gen_rtx_REG (Pmode
, tmp_regno
));
34961 if (!sibcall_insn_operand (fnaddr
, word_mode
))
34963 tmp
= gen_rtx_REG (word_mode
, tmp_regno
);
34964 if (GET_MODE (fnaddr
) != word_mode
)
34965 fnaddr
= gen_rtx_ZERO_EXTEND (word_mode
, fnaddr
);
34966 emit_move_insn (tmp
, fnaddr
);
34970 tmp
= gen_rtx_MEM (QImode
, fnaddr
);
34971 tmp
= gen_rtx_CALL (VOIDmode
, tmp
, const0_rtx
);
34972 tmp
= emit_call_insn (tmp
);
34973 SIBLING_CALL_P (tmp
) = 1;
34977 /* Emit just enough of rest_of_compilation to get the insns emitted.
34978 Note that use_thunk calls assemble_start_function et al. */
34979 tmp
= get_insns ();
34980 shorten_branches (tmp
);
34981 final_start_function (tmp
, file
, 1);
34982 final (tmp
, file
, 1);
34983 final_end_function ();
34987 x86_file_start (void)
34989 default_file_start ();
34991 darwin_file_start ();
34993 if (X86_FILE_START_VERSION_DIRECTIVE
)
34994 fputs ("\t.version\t\"01.01\"\n", asm_out_file
);
34995 if (X86_FILE_START_FLTUSED
)
34996 fputs ("\t.global\t__fltused\n", asm_out_file
);
34997 if (ix86_asm_dialect
== ASM_INTEL
)
34998 fputs ("\t.intel_syntax noprefix\n", asm_out_file
);
35002 x86_field_alignment (tree field
, int computed
)
35004 enum machine_mode mode
;
35005 tree type
= TREE_TYPE (field
);
35007 if (TARGET_64BIT
|| TARGET_ALIGN_DOUBLE
)
35009 mode
= TYPE_MODE (strip_array_types (type
));
35010 if (mode
== DFmode
|| mode
== DCmode
35011 || GET_MODE_CLASS (mode
) == MODE_INT
35012 || GET_MODE_CLASS (mode
) == MODE_COMPLEX_INT
)
35013 return MIN (32, computed
);
35017 /* Output assembler code to FILE to increment profiler label # LABELNO
35018 for profiling a function entry. */
35020 x86_function_profiler (FILE *file
, int labelno ATTRIBUTE_UNUSED
)
35022 const char *mcount_name
= (flag_fentry
? MCOUNT_NAME_BEFORE_PROLOGUE
35027 #ifndef NO_PROFILE_COUNTERS
35028 fprintf (file
, "\tleaq\t%sP%d(%%rip),%%r11\n", LPREFIX
, labelno
);
35031 if (DEFAULT_ABI
== SYSV_ABI
&& flag_pic
)
35032 fprintf (file
, "\tcall\t*%s@GOTPCREL(%%rip)\n", mcount_name
);
35034 fprintf (file
, "\tcall\t%s\n", mcount_name
);
35038 #ifndef NO_PROFILE_COUNTERS
35039 fprintf (file
, "\tleal\t%sP%d@GOTOFF(%%ebx),%%" PROFILE_COUNT_REGISTER
"\n",
35042 fprintf (file
, "\tcall\t*%s@GOT(%%ebx)\n", mcount_name
);
35046 #ifndef NO_PROFILE_COUNTERS
35047 fprintf (file
, "\tmovl\t$%sP%d,%%" PROFILE_COUNT_REGISTER
"\n",
35050 fprintf (file
, "\tcall\t%s\n", mcount_name
);
35054 /* We don't have exact information about the insn sizes, but we may assume
35055 quite safely that we are informed about all 1 byte insns and memory
35056 address sizes. This is enough to eliminate unnecessary padding in
35060 min_insn_size (rtx insn
)
35064 if (!INSN_P (insn
) || !active_insn_p (insn
))
35067 /* Discard alignments we've emit and jump instructions. */
35068 if (GET_CODE (PATTERN (insn
)) == UNSPEC_VOLATILE
35069 && XINT (PATTERN (insn
), 1) == UNSPECV_ALIGN
)
35071 if (JUMP_TABLE_DATA_P (insn
))
35074 /* Important case - calls are always 5 bytes.
35075 It is common to have many calls in the row. */
35077 && symbolic_reference_mentioned_p (PATTERN (insn
))
35078 && !SIBLING_CALL_P (insn
))
35080 len
= get_attr_length (insn
);
35084 /* For normal instructions we rely on get_attr_length being exact,
35085 with a few exceptions. */
35086 if (!JUMP_P (insn
))
35088 enum attr_type type
= get_attr_type (insn
);
35093 if (GET_CODE (PATTERN (insn
)) == ASM_INPUT
35094 || asm_noperands (PATTERN (insn
)) >= 0)
35101 /* Otherwise trust get_attr_length. */
35105 l
= get_attr_length_address (insn
);
35106 if (l
< 4 && symbolic_reference_mentioned_p (PATTERN (insn
)))
35115 #ifdef ASM_OUTPUT_MAX_SKIP_PAD
35117 /* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte
35121 ix86_avoid_jump_mispredicts (void)
35123 rtx insn
, start
= get_insns ();
35124 int nbytes
= 0, njumps
= 0;
35127 /* Look for all minimal intervals of instructions containing 4 jumps.
35128 The intervals are bounded by START and INSN. NBYTES is the total
35129 size of instructions in the interval including INSN and not including
35130 START. When the NBYTES is smaller than 16 bytes, it is possible
35131 that the end of START and INSN ends up in the same 16byte page.
35133 The smallest offset in the page INSN can start is the case where START
35134 ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN).
35135 We add p2align to 16byte window with maxskip 15 - NBYTES + sizeof (INSN).
35137 for (insn
= start
; insn
; insn
= NEXT_INSN (insn
))
35141 if (LABEL_P (insn
))
35143 int align
= label_to_alignment (insn
);
35144 int max_skip
= label_to_max_skip (insn
);
35148 /* If align > 3, only up to 16 - max_skip - 1 bytes can be
35149 already in the current 16 byte page, because otherwise
35150 ASM_OUTPUT_MAX_SKIP_ALIGN could skip max_skip or fewer
35151 bytes to reach 16 byte boundary. */
35153 || (align
<= 3 && max_skip
!= (1 << align
) - 1))
35156 fprintf (dump_file
, "Label %i with max_skip %i\n",
35157 INSN_UID (insn
), max_skip
);
35160 while (nbytes
+ max_skip
>= 16)
35162 start
= NEXT_INSN (start
);
35163 if ((JUMP_P (start
)
35164 && GET_CODE (PATTERN (start
)) != ADDR_VEC
35165 && GET_CODE (PATTERN (start
)) != ADDR_DIFF_VEC
)
35167 njumps
--, isjump
= 1;
35170 nbytes
-= min_insn_size (start
);
35176 min_size
= min_insn_size (insn
);
35177 nbytes
+= min_size
;
35179 fprintf (dump_file
, "Insn %i estimated to %i bytes\n",
35180 INSN_UID (insn
), min_size
);
35182 && GET_CODE (PATTERN (insn
)) != ADDR_VEC
35183 && GET_CODE (PATTERN (insn
)) != ADDR_DIFF_VEC
)
35191 start
= NEXT_INSN (start
);
35192 if ((JUMP_P (start
)
35193 && GET_CODE (PATTERN (start
)) != ADDR_VEC
35194 && GET_CODE (PATTERN (start
)) != ADDR_DIFF_VEC
)
35196 njumps
--, isjump
= 1;
35199 nbytes
-= min_insn_size (start
);
35201 gcc_assert (njumps
>= 0);
35203 fprintf (dump_file
, "Interval %i to %i has %i bytes\n",
35204 INSN_UID (start
), INSN_UID (insn
), nbytes
);
35206 if (njumps
== 3 && isjump
&& nbytes
< 16)
35208 int padsize
= 15 - nbytes
+ min_insn_size (insn
);
35211 fprintf (dump_file
, "Padding insn %i by %i bytes!\n",
35212 INSN_UID (insn
), padsize
);
35213 emit_insn_before (gen_pad (GEN_INT (padsize
)), insn
);
35219 /* AMD Athlon works faster
35220 when RET is not destination of conditional jump or directly preceded
35221 by other jump instruction. We avoid the penalty by inserting NOP just
35222 before the RET instructions in such cases. */
35224 ix86_pad_returns (void)
35229 FOR_EACH_EDGE (e
, ei
, EXIT_BLOCK_PTR
->preds
)
35231 basic_block bb
= e
->src
;
35232 rtx ret
= BB_END (bb
);
35234 bool replace
= false;
35236 if (!JUMP_P (ret
) || !ANY_RETURN_P (PATTERN (ret
))
35237 || optimize_bb_for_size_p (bb
))
35239 for (prev
= PREV_INSN (ret
); prev
; prev
= PREV_INSN (prev
))
35240 if (active_insn_p (prev
) || LABEL_P (prev
))
35242 if (prev
&& LABEL_P (prev
))
35247 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
35248 if (EDGE_FREQUENCY (e
) && e
->src
->index
>= 0
35249 && !(e
->flags
& EDGE_FALLTHRU
))
35254 prev
= prev_active_insn (ret
);
35256 && ((JUMP_P (prev
) && any_condjump_p (prev
))
35259 /* Empty functions get branch mispredict even when
35260 the jump destination is not visible to us. */
35261 if (!prev
&& !optimize_function_for_size_p (cfun
))
35266 emit_jump_insn_before (gen_simple_return_internal_long (), ret
);
35272 /* Count the minimum number of instructions in BB. Return 4 if the
35273 number of instructions >= 4. */
35276 ix86_count_insn_bb (basic_block bb
)
35279 int insn_count
= 0;
35281 /* Count number of instructions in this block. Return 4 if the number
35282 of instructions >= 4. */
35283 FOR_BB_INSNS (bb
, insn
)
35285 /* Only happen in exit blocks. */
35287 && ANY_RETURN_P (PATTERN (insn
)))
35290 if (NONDEBUG_INSN_P (insn
)
35291 && GET_CODE (PATTERN (insn
)) != USE
35292 && GET_CODE (PATTERN (insn
)) != CLOBBER
)
35295 if (insn_count
>= 4)
35304 /* Count the minimum number of instructions in code path in BB.
35305 Return 4 if the number of instructions >= 4. */
35308 ix86_count_insn (basic_block bb
)
35312 int min_prev_count
;
35314 /* Only bother counting instructions along paths with no
35315 more than 2 basic blocks between entry and exit. Given
35316 that BB has an edge to exit, determine if a predecessor
35317 of BB has an edge from entry. If so, compute the number
35318 of instructions in the predecessor block. If there
35319 happen to be multiple such blocks, compute the minimum. */
35320 min_prev_count
= 4;
35321 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
35324 edge_iterator prev_ei
;
35326 if (e
->src
== ENTRY_BLOCK_PTR
)
35328 min_prev_count
= 0;
35331 FOR_EACH_EDGE (prev_e
, prev_ei
, e
->src
->preds
)
35333 if (prev_e
->src
== ENTRY_BLOCK_PTR
)
35335 int count
= ix86_count_insn_bb (e
->src
);
35336 if (count
< min_prev_count
)
35337 min_prev_count
= count
;
35343 if (min_prev_count
< 4)
35344 min_prev_count
+= ix86_count_insn_bb (bb
);
35346 return min_prev_count
;
35349 /* Pad short function to 4 instructions. */
35352 ix86_pad_short_function (void)
35357 FOR_EACH_EDGE (e
, ei
, EXIT_BLOCK_PTR
->preds
)
35359 rtx ret
= BB_END (e
->src
);
35360 if (JUMP_P (ret
) && ANY_RETURN_P (PATTERN (ret
)))
35362 int insn_count
= ix86_count_insn (e
->src
);
35364 /* Pad short function. */
35365 if (insn_count
< 4)
35369 /* Find epilogue. */
35372 || NOTE_KIND (insn
) != NOTE_INSN_EPILOGUE_BEG
))
35373 insn
= PREV_INSN (insn
);
35378 /* Two NOPs count as one instruction. */
35379 insn_count
= 2 * (4 - insn_count
);
35380 emit_insn_before (gen_nops (GEN_INT (insn_count
)), insn
);
35386 /* Implement machine specific optimizations. We implement padding of returns
35387 for K8 CPUs and pass to avoid 4 jumps in the single 16 byte window. */
35391 /* We are freeing block_for_insn in the toplev to keep compatibility
35392 with old MDEP_REORGS that are not CFG based. Recompute it now. */
35393 compute_bb_for_insn ();
35395 if (optimize
&& optimize_function_for_speed_p (cfun
))
35397 if (TARGET_PAD_SHORT_FUNCTION
)
35398 ix86_pad_short_function ();
35399 else if (TARGET_PAD_RETURNS
)
35400 ix86_pad_returns ();
35401 #ifdef ASM_OUTPUT_MAX_SKIP_PAD
35402 if (TARGET_FOUR_JUMP_LIMIT
)
35403 ix86_avoid_jump_mispredicts ();
35408 /* Return nonzero when QImode register that must be represented via REX prefix
35411 x86_extended_QIreg_mentioned_p (rtx insn
)
35414 extract_insn_cached (insn
);
35415 for (i
= 0; i
< recog_data
.n_operands
; i
++)
35416 if (GENERAL_REG_P (recog_data
.operand
[i
])
35417 && !QI_REGNO_P (REGNO (recog_data
.operand
[i
])))
35422 /* Return nonzero when P points to register encoded via REX prefix.
35423 Called via for_each_rtx. */
35425 extended_reg_mentioned_1 (rtx
*p
, void *data ATTRIBUTE_UNUSED
)
35427 unsigned int regno
;
35430 regno
= REGNO (*p
);
35431 return REX_INT_REGNO_P (regno
) || REX_SSE_REGNO_P (regno
);
35434 /* Return true when INSN mentions register that must be encoded using REX
35437 x86_extended_reg_mentioned_p (rtx insn
)
35439 return for_each_rtx (INSN_P (insn
) ? &PATTERN (insn
) : &insn
,
35440 extended_reg_mentioned_1
, NULL
);
35443 /* If profitable, negate (without causing overflow) integer constant
35444 of mode MODE at location LOC. Return true in this case. */
35446 x86_maybe_negate_const_int (rtx
*loc
, enum machine_mode mode
)
35450 if (!CONST_INT_P (*loc
))
35456 /* DImode x86_64 constants must fit in 32 bits. */
35457 gcc_assert (x86_64_immediate_operand (*loc
, mode
));
35468 gcc_unreachable ();
35471 /* Avoid overflows. */
35472 if (mode_signbit_p (mode
, *loc
))
35475 val
= INTVAL (*loc
);
35477 /* Make things pretty and `subl $4,%eax' rather than `addl $-4,%eax'.
35478 Exceptions: -128 encodes smaller than 128, so swap sign and op. */
35479 if ((val
< 0 && val
!= -128)
35482 *loc
= GEN_INT (-val
);
35489 /* Generate an unsigned DImode/SImode to FP conversion. This is the same code
35490 optabs would emit if we didn't have TFmode patterns. */
35493 x86_emit_floatuns (rtx operands
[2])
35495 rtx neglab
, donelab
, i0
, i1
, f0
, in
, out
;
35496 enum machine_mode mode
, inmode
;
35498 inmode
= GET_MODE (operands
[1]);
35499 gcc_assert (inmode
== SImode
|| inmode
== DImode
);
35502 in
= force_reg (inmode
, operands
[1]);
35503 mode
= GET_MODE (out
);
35504 neglab
= gen_label_rtx ();
35505 donelab
= gen_label_rtx ();
35506 f0
= gen_reg_rtx (mode
);
35508 emit_cmp_and_jump_insns (in
, const0_rtx
, LT
, const0_rtx
, inmode
, 0, neglab
);
35510 expand_float (out
, in
, 0);
35512 emit_jump_insn (gen_jump (donelab
));
35515 emit_label (neglab
);
35517 i0
= expand_simple_binop (inmode
, LSHIFTRT
, in
, const1_rtx
, NULL
,
35519 i1
= expand_simple_binop (inmode
, AND
, in
, const1_rtx
, NULL
,
35521 i0
= expand_simple_binop (inmode
, IOR
, i0
, i1
, i0
, 1, OPTAB_DIRECT
);
35523 expand_float (f0
, i0
, 0);
35525 emit_insn (gen_rtx_SET (VOIDmode
, out
, gen_rtx_PLUS (mode
, f0
, f0
)));
35527 emit_label (donelab
);
35530 /* AVX2 does support 32-byte integer vector operations,
35531 thus the longest vector we are faced with is V32QImode. */
35532 #define MAX_VECT_LEN 32
35534 struct expand_vec_perm_d
35536 rtx target
, op0
, op1
;
35537 unsigned char perm
[MAX_VECT_LEN
];
35538 enum machine_mode vmode
;
35539 unsigned char nelt
;
35540 bool one_operand_p
;
35544 static bool canonicalize_perm (struct expand_vec_perm_d
*d
);
35545 static bool expand_vec_perm_1 (struct expand_vec_perm_d
*d
);
35546 static bool expand_vec_perm_broadcast_1 (struct expand_vec_perm_d
*d
);
35548 /* Get a vector mode of the same size as the original but with elements
35549 twice as wide. This is only guaranteed to apply to integral vectors. */
35551 static inline enum machine_mode
35552 get_mode_wider_vector (enum machine_mode o
)
35554 /* ??? Rely on the ordering that genmodes.c gives to vectors. */
35555 enum machine_mode n
= GET_MODE_WIDER_MODE (o
);
35556 gcc_assert (GET_MODE_NUNITS (o
) == GET_MODE_NUNITS (n
) * 2);
35557 gcc_assert (GET_MODE_SIZE (o
) == GET_MODE_SIZE (n
));
35561 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
35562 with all elements equal to VAR. Return true if successful. */
35565 ix86_expand_vector_init_duplicate (bool mmx_ok
, enum machine_mode mode
,
35566 rtx target
, rtx val
)
35589 /* First attempt to recognize VAL as-is. */
35590 dup
= gen_rtx_VEC_DUPLICATE (mode
, val
);
35591 insn
= emit_insn (gen_rtx_SET (VOIDmode
, target
, dup
));
35592 if (recog_memoized (insn
) < 0)
35595 /* If that fails, force VAL into a register. */
35598 XEXP (dup
, 0) = force_reg (GET_MODE_INNER (mode
), val
);
35599 seq
= get_insns ();
35602 emit_insn_before (seq
, insn
);
35604 ok
= recog_memoized (insn
) >= 0;
35613 if (TARGET_SSE
|| TARGET_3DNOW_A
)
35617 val
= gen_lowpart (SImode
, val
);
35618 x
= gen_rtx_TRUNCATE (HImode
, val
);
35619 x
= gen_rtx_VEC_DUPLICATE (mode
, x
);
35620 emit_insn (gen_rtx_SET (VOIDmode
, target
, x
));
35633 struct expand_vec_perm_d dperm
;
35637 memset (&dperm
, 0, sizeof (dperm
));
35638 dperm
.target
= target
;
35639 dperm
.vmode
= mode
;
35640 dperm
.nelt
= GET_MODE_NUNITS (mode
);
35641 dperm
.op0
= dperm
.op1
= gen_reg_rtx (mode
);
35642 dperm
.one_operand_p
= true;
35644 /* Extend to SImode using a paradoxical SUBREG. */
35645 tmp1
= gen_reg_rtx (SImode
);
35646 emit_move_insn (tmp1
, gen_lowpart (SImode
, val
));
35648 /* Insert the SImode value as low element of a V4SImode vector. */
35649 tmp2
= gen_lowpart (V4SImode
, dperm
.op0
);
35650 emit_insn (gen_vec_setv4si_0 (tmp2
, CONST0_RTX (V4SImode
), tmp1
));
35652 ok
= (expand_vec_perm_1 (&dperm
)
35653 || expand_vec_perm_broadcast_1 (&dperm
));
35665 /* Replicate the value once into the next wider mode and recurse. */
35667 enum machine_mode smode
, wsmode
, wvmode
;
35670 smode
= GET_MODE_INNER (mode
);
35671 wvmode
= get_mode_wider_vector (mode
);
35672 wsmode
= GET_MODE_INNER (wvmode
);
35674 val
= convert_modes (wsmode
, smode
, val
, true);
35675 x
= expand_simple_binop (wsmode
, ASHIFT
, val
,
35676 GEN_INT (GET_MODE_BITSIZE (smode
)),
35677 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
35678 val
= expand_simple_binop (wsmode
, IOR
, val
, x
, x
, 1, OPTAB_LIB_WIDEN
);
35680 x
= gen_lowpart (wvmode
, target
);
35681 ok
= ix86_expand_vector_init_duplicate (mmx_ok
, wvmode
, x
, val
);
35689 enum machine_mode hvmode
= (mode
== V16HImode
? V8HImode
: V16QImode
);
35690 rtx x
= gen_reg_rtx (hvmode
);
35692 ok
= ix86_expand_vector_init_duplicate (false, hvmode
, x
, val
);
35695 x
= gen_rtx_VEC_CONCAT (mode
, x
, x
);
35696 emit_insn (gen_rtx_SET (VOIDmode
, target
, x
));
35705 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
35706 whose ONE_VAR element is VAR, and other elements are zero. Return true
35710 ix86_expand_vector_init_one_nonzero (bool mmx_ok
, enum machine_mode mode
,
35711 rtx target
, rtx var
, int one_var
)
35713 enum machine_mode vsimode
;
35716 bool use_vector_set
= false;
35721 /* For SSE4.1, we normally use vector set. But if the second
35722 element is zero and inter-unit moves are OK, we use movq
35724 use_vector_set
= (TARGET_64BIT
35726 && !(TARGET_INTER_UNIT_MOVES
35732 use_vector_set
= TARGET_SSE4_1
;
35735 use_vector_set
= TARGET_SSE2
;
35738 use_vector_set
= TARGET_SSE
|| TARGET_3DNOW_A
;
35745 use_vector_set
= TARGET_AVX
;
35748 /* Use ix86_expand_vector_set in 64bit mode only. */
35749 use_vector_set
= TARGET_AVX
&& TARGET_64BIT
;
35755 if (use_vector_set
)
35757 emit_insn (gen_rtx_SET (VOIDmode
, target
, CONST0_RTX (mode
)));
35758 var
= force_reg (GET_MODE_INNER (mode
), var
);
35759 ix86_expand_vector_set (mmx_ok
, target
, var
, one_var
);
35775 var
= force_reg (GET_MODE_INNER (mode
), var
);
35776 x
= gen_rtx_VEC_CONCAT (mode
, var
, CONST0_RTX (GET_MODE_INNER (mode
)));
35777 emit_insn (gen_rtx_SET (VOIDmode
, target
, x
));
35782 if (!REG_P (target
) || REGNO (target
) < FIRST_PSEUDO_REGISTER
)
35783 new_target
= gen_reg_rtx (mode
);
35785 new_target
= target
;
35786 var
= force_reg (GET_MODE_INNER (mode
), var
);
35787 x
= gen_rtx_VEC_DUPLICATE (mode
, var
);
35788 x
= gen_rtx_VEC_MERGE (mode
, x
, CONST0_RTX (mode
), const1_rtx
);
35789 emit_insn (gen_rtx_SET (VOIDmode
, new_target
, x
));
35792 /* We need to shuffle the value to the correct position, so
35793 create a new pseudo to store the intermediate result. */
35795 /* With SSE2, we can use the integer shuffle insns. */
35796 if (mode
!= V4SFmode
&& TARGET_SSE2
)
35798 emit_insn (gen_sse2_pshufd_1 (new_target
, new_target
,
35800 GEN_INT (one_var
== 1 ? 0 : 1),
35801 GEN_INT (one_var
== 2 ? 0 : 1),
35802 GEN_INT (one_var
== 3 ? 0 : 1)));
35803 if (target
!= new_target
)
35804 emit_move_insn (target
, new_target
);
35808 /* Otherwise convert the intermediate result to V4SFmode and
35809 use the SSE1 shuffle instructions. */
35810 if (mode
!= V4SFmode
)
35812 tmp
= gen_reg_rtx (V4SFmode
);
35813 emit_move_insn (tmp
, gen_lowpart (V4SFmode
, new_target
));
35818 emit_insn (gen_sse_shufps_v4sf (tmp
, tmp
, tmp
,
35820 GEN_INT (one_var
== 1 ? 0 : 1),
35821 GEN_INT (one_var
== 2 ? 0+4 : 1+4),
35822 GEN_INT (one_var
== 3 ? 0+4 : 1+4)));
35824 if (mode
!= V4SFmode
)
35825 emit_move_insn (target
, gen_lowpart (V4SImode
, tmp
));
35826 else if (tmp
!= target
)
35827 emit_move_insn (target
, tmp
);
35829 else if (target
!= new_target
)
35830 emit_move_insn (target
, new_target
);
35835 vsimode
= V4SImode
;
35841 vsimode
= V2SImode
;
35847 /* Zero extend the variable element to SImode and recurse. */
35848 var
= convert_modes (SImode
, GET_MODE_INNER (mode
), var
, true);
35850 x
= gen_reg_rtx (vsimode
);
35851 if (!ix86_expand_vector_init_one_nonzero (mmx_ok
, vsimode
, x
,
35853 gcc_unreachable ();
35855 emit_move_insn (target
, gen_lowpart (mode
, x
));
35863 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
35864 consisting of the values in VALS. It is known that all elements
35865 except ONE_VAR are constants. Return true if successful. */
35868 ix86_expand_vector_init_one_var (bool mmx_ok
, enum machine_mode mode
,
35869 rtx target
, rtx vals
, int one_var
)
35871 rtx var
= XVECEXP (vals
, 0, one_var
);
35872 enum machine_mode wmode
;
35875 const_vec
= copy_rtx (vals
);
35876 XVECEXP (const_vec
, 0, one_var
) = CONST0_RTX (GET_MODE_INNER (mode
));
35877 const_vec
= gen_rtx_CONST_VECTOR (mode
, XVEC (const_vec
, 0));
35885 /* For the two element vectors, it's just as easy to use
35886 the general case. */
35890 /* Use ix86_expand_vector_set in 64bit mode only. */
35913 /* There's no way to set one QImode entry easily. Combine
35914 the variable value with its adjacent constant value, and
35915 promote to an HImode set. */
35916 x
= XVECEXP (vals
, 0, one_var
^ 1);
35919 var
= convert_modes (HImode
, QImode
, var
, true);
35920 var
= expand_simple_binop (HImode
, ASHIFT
, var
, GEN_INT (8),
35921 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
35922 x
= GEN_INT (INTVAL (x
) & 0xff);
35926 var
= convert_modes (HImode
, QImode
, var
, true);
35927 x
= gen_int_mode (INTVAL (x
) << 8, HImode
);
35929 if (x
!= const0_rtx
)
35930 var
= expand_simple_binop (HImode
, IOR
, var
, x
, var
,
35931 1, OPTAB_LIB_WIDEN
);
35933 x
= gen_reg_rtx (wmode
);
35934 emit_move_insn (x
, gen_lowpart (wmode
, const_vec
));
35935 ix86_expand_vector_set (mmx_ok
, x
, var
, one_var
>> 1);
35937 emit_move_insn (target
, gen_lowpart (mode
, x
));
35944 emit_move_insn (target
, const_vec
);
35945 ix86_expand_vector_set (mmx_ok
, target
, var
, one_var
);
35949 /* A subroutine of ix86_expand_vector_init_general. Use vector
35950 concatenate to handle the most general case: all values variable,
35951 and none identical. */
35954 ix86_expand_vector_init_concat (enum machine_mode mode
,
35955 rtx target
, rtx
*ops
, int n
)
35957 enum machine_mode cmode
, hmode
= VOIDmode
;
35958 rtx first
[8], second
[4];
35998 gcc_unreachable ();
36001 if (!register_operand (ops
[1], cmode
))
36002 ops
[1] = force_reg (cmode
, ops
[1]);
36003 if (!register_operand (ops
[0], cmode
))
36004 ops
[0] = force_reg (cmode
, ops
[0]);
36005 emit_insn (gen_rtx_SET (VOIDmode
, target
,
36006 gen_rtx_VEC_CONCAT (mode
, ops
[0],
36026 gcc_unreachable ();
36042 gcc_unreachable ();
36047 /* FIXME: We process inputs backward to help RA. PR 36222. */
36050 for (; i
> 0; i
-= 2, j
--)
36052 first
[j
] = gen_reg_rtx (cmode
);
36053 v
= gen_rtvec (2, ops
[i
- 1], ops
[i
]);
36054 ix86_expand_vector_init (false, first
[j
],
36055 gen_rtx_PARALLEL (cmode
, v
));
36061 gcc_assert (hmode
!= VOIDmode
);
36062 for (i
= j
= 0; i
< n
; i
+= 2, j
++)
36064 second
[j
] = gen_reg_rtx (hmode
);
36065 ix86_expand_vector_init_concat (hmode
, second
[j
],
36069 ix86_expand_vector_init_concat (mode
, target
, second
, n
);
36072 ix86_expand_vector_init_concat (mode
, target
, first
, n
);
36076 gcc_unreachable ();
36080 /* A subroutine of ix86_expand_vector_init_general. Use vector
36081 interleave to handle the most general case: all values variable,
36082 and none identical. */
36085 ix86_expand_vector_init_interleave (enum machine_mode mode
,
36086 rtx target
, rtx
*ops
, int n
)
36088 enum machine_mode first_imode
, second_imode
, third_imode
, inner_mode
;
36091 rtx (*gen_load_even
) (rtx
, rtx
, rtx
);
36092 rtx (*gen_interleave_first_low
) (rtx
, rtx
, rtx
);
36093 rtx (*gen_interleave_second_low
) (rtx
, rtx
, rtx
);
36098 gen_load_even
= gen_vec_setv8hi
;
36099 gen_interleave_first_low
= gen_vec_interleave_lowv4si
;
36100 gen_interleave_second_low
= gen_vec_interleave_lowv2di
;
36101 inner_mode
= HImode
;
36102 first_imode
= V4SImode
;
36103 second_imode
= V2DImode
;
36104 third_imode
= VOIDmode
;
36107 gen_load_even
= gen_vec_setv16qi
;
36108 gen_interleave_first_low
= gen_vec_interleave_lowv8hi
;
36109 gen_interleave_second_low
= gen_vec_interleave_lowv4si
;
36110 inner_mode
= QImode
;
36111 first_imode
= V8HImode
;
36112 second_imode
= V4SImode
;
36113 third_imode
= V2DImode
;
36116 gcc_unreachable ();
36119 for (i
= 0; i
< n
; i
++)
36121 /* Extend the odd elment to SImode using a paradoxical SUBREG. */
36122 op0
= gen_reg_rtx (SImode
);
36123 emit_move_insn (op0
, gen_lowpart (SImode
, ops
[i
+ i
]));
36125 /* Insert the SImode value as low element of V4SImode vector. */
36126 op1
= gen_reg_rtx (V4SImode
);
36127 op0
= gen_rtx_VEC_MERGE (V4SImode
,
36128 gen_rtx_VEC_DUPLICATE (V4SImode
,
36130 CONST0_RTX (V4SImode
),
36132 emit_insn (gen_rtx_SET (VOIDmode
, op1
, op0
));
36134 /* Cast the V4SImode vector back to a vector in orignal mode. */
36135 op0
= gen_reg_rtx (mode
);
36136 emit_move_insn (op0
, gen_lowpart (mode
, op1
));
36138 /* Load even elements into the second positon. */
36139 emit_insn (gen_load_even (op0
,
36140 force_reg (inner_mode
,
36144 /* Cast vector to FIRST_IMODE vector. */
36145 ops
[i
] = gen_reg_rtx (first_imode
);
36146 emit_move_insn (ops
[i
], gen_lowpart (first_imode
, op0
));
36149 /* Interleave low FIRST_IMODE vectors. */
36150 for (i
= j
= 0; i
< n
; i
+= 2, j
++)
36152 op0
= gen_reg_rtx (first_imode
);
36153 emit_insn (gen_interleave_first_low (op0
, ops
[i
], ops
[i
+ 1]));
36155 /* Cast FIRST_IMODE vector to SECOND_IMODE vector. */
36156 ops
[j
] = gen_reg_rtx (second_imode
);
36157 emit_move_insn (ops
[j
], gen_lowpart (second_imode
, op0
));
36160 /* Interleave low SECOND_IMODE vectors. */
36161 switch (second_imode
)
36164 for (i
= j
= 0; i
< n
/ 2; i
+= 2, j
++)
36166 op0
= gen_reg_rtx (second_imode
);
36167 emit_insn (gen_interleave_second_low (op0
, ops
[i
],
36170 /* Cast the SECOND_IMODE vector to the THIRD_IMODE
36172 ops
[j
] = gen_reg_rtx (third_imode
);
36173 emit_move_insn (ops
[j
], gen_lowpart (third_imode
, op0
));
36175 second_imode
= V2DImode
;
36176 gen_interleave_second_low
= gen_vec_interleave_lowv2di
;
36180 op0
= gen_reg_rtx (second_imode
);
36181 emit_insn (gen_interleave_second_low (op0
, ops
[0],
36184 /* Cast the SECOND_IMODE vector back to a vector on original
36186 emit_insn (gen_rtx_SET (VOIDmode
, target
,
36187 gen_lowpart (mode
, op0
)));
36191 gcc_unreachable ();
36195 /* A subroutine of ix86_expand_vector_init. Handle the most general case:
36196 all values variable, and none identical. */
36199 ix86_expand_vector_init_general (bool mmx_ok
, enum machine_mode mode
,
36200 rtx target
, rtx vals
)
36202 rtx ops
[32], op0
, op1
;
36203 enum machine_mode half_mode
= VOIDmode
;
36210 if (!mmx_ok
&& !TARGET_SSE
)
36222 n
= GET_MODE_NUNITS (mode
);
36223 for (i
= 0; i
< n
; i
++)
36224 ops
[i
] = XVECEXP (vals
, 0, i
);
36225 ix86_expand_vector_init_concat (mode
, target
, ops
, n
);
36229 half_mode
= V16QImode
;
36233 half_mode
= V8HImode
;
36237 n
= GET_MODE_NUNITS (mode
);
36238 for (i
= 0; i
< n
; i
++)
36239 ops
[i
] = XVECEXP (vals
, 0, i
);
36240 op0
= gen_reg_rtx (half_mode
);
36241 op1
= gen_reg_rtx (half_mode
);
36242 ix86_expand_vector_init_interleave (half_mode
, op0
, ops
,
36244 ix86_expand_vector_init_interleave (half_mode
, op1
,
36245 &ops
[n
>> 1], n
>> 2);
36246 emit_insn (gen_rtx_SET (VOIDmode
, target
,
36247 gen_rtx_VEC_CONCAT (mode
, op0
, op1
)));
36251 if (!TARGET_SSE4_1
)
36259 /* Don't use ix86_expand_vector_init_interleave if we can't
36260 move from GPR to SSE register directly. */
36261 if (!TARGET_INTER_UNIT_MOVES
)
36264 n
= GET_MODE_NUNITS (mode
);
36265 for (i
= 0; i
< n
; i
++)
36266 ops
[i
] = XVECEXP (vals
, 0, i
);
36267 ix86_expand_vector_init_interleave (mode
, target
, ops
, n
>> 1);
36275 gcc_unreachable ();
36279 int i
, j
, n_elts
, n_words
, n_elt_per_word
;
36280 enum machine_mode inner_mode
;
36281 rtx words
[4], shift
;
36283 inner_mode
= GET_MODE_INNER (mode
);
36284 n_elts
= GET_MODE_NUNITS (mode
);
36285 n_words
= GET_MODE_SIZE (mode
) / UNITS_PER_WORD
;
36286 n_elt_per_word
= n_elts
/ n_words
;
36287 shift
= GEN_INT (GET_MODE_BITSIZE (inner_mode
));
36289 for (i
= 0; i
< n_words
; ++i
)
36291 rtx word
= NULL_RTX
;
36293 for (j
= 0; j
< n_elt_per_word
; ++j
)
36295 rtx elt
= XVECEXP (vals
, 0, (i
+1)*n_elt_per_word
- j
- 1);
36296 elt
= convert_modes (word_mode
, inner_mode
, elt
, true);
36302 word
= expand_simple_binop (word_mode
, ASHIFT
, word
, shift
,
36303 word
, 1, OPTAB_LIB_WIDEN
);
36304 word
= expand_simple_binop (word_mode
, IOR
, word
, elt
,
36305 word
, 1, OPTAB_LIB_WIDEN
);
36313 emit_move_insn (target
, gen_lowpart (mode
, words
[0]));
36314 else if (n_words
== 2)
36316 rtx tmp
= gen_reg_rtx (mode
);
36317 emit_clobber (tmp
);
36318 emit_move_insn (gen_lowpart (word_mode
, tmp
), words
[0]);
36319 emit_move_insn (gen_highpart (word_mode
, tmp
), words
[1]);
36320 emit_move_insn (target
, tmp
);
36322 else if (n_words
== 4)
36324 rtx tmp
= gen_reg_rtx (V4SImode
);
36325 gcc_assert (word_mode
== SImode
);
36326 vals
= gen_rtx_PARALLEL (V4SImode
, gen_rtvec_v (4, words
));
36327 ix86_expand_vector_init_general (false, V4SImode
, tmp
, vals
);
36328 emit_move_insn (target
, gen_lowpart (mode
, tmp
));
36331 gcc_unreachable ();
36335 /* Initialize vector TARGET via VALS. Suppress the use of MMX
36336 instructions unless MMX_OK is true. */
36339 ix86_expand_vector_init (bool mmx_ok
, rtx target
, rtx vals
)
36341 enum machine_mode mode
= GET_MODE (target
);
36342 enum machine_mode inner_mode
= GET_MODE_INNER (mode
);
36343 int n_elts
= GET_MODE_NUNITS (mode
);
36344 int n_var
= 0, one_var
= -1;
36345 bool all_same
= true, all_const_zero
= true;
36349 for (i
= 0; i
< n_elts
; ++i
)
36351 x
= XVECEXP (vals
, 0, i
);
36352 if (!(CONST_INT_P (x
)
36353 || GET_CODE (x
) == CONST_DOUBLE
36354 || GET_CODE (x
) == CONST_FIXED
))
36355 n_var
++, one_var
= i
;
36356 else if (x
!= CONST0_RTX (inner_mode
))
36357 all_const_zero
= false;
36358 if (i
> 0 && !rtx_equal_p (x
, XVECEXP (vals
, 0, 0)))
36362 /* Constants are best loaded from the constant pool. */
36365 emit_move_insn (target
, gen_rtx_CONST_VECTOR (mode
, XVEC (vals
, 0)));
36369 /* If all values are identical, broadcast the value. */
36371 && ix86_expand_vector_init_duplicate (mmx_ok
, mode
, target
,
36372 XVECEXP (vals
, 0, 0)))
36375 /* Values where only one field is non-constant are best loaded from
36376 the pool and overwritten via move later. */
36380 && ix86_expand_vector_init_one_nonzero (mmx_ok
, mode
, target
,
36381 XVECEXP (vals
, 0, one_var
),
36385 if (ix86_expand_vector_init_one_var (mmx_ok
, mode
, target
, vals
, one_var
))
36389 ix86_expand_vector_init_general (mmx_ok
, mode
, target
, vals
);
36393 ix86_expand_vector_set (bool mmx_ok
, rtx target
, rtx val
, int elt
)
36395 enum machine_mode mode
= GET_MODE (target
);
36396 enum machine_mode inner_mode
= GET_MODE_INNER (mode
);
36397 enum machine_mode half_mode
;
36398 bool use_vec_merge
= false;
36400 static rtx (*gen_extract
[6][2]) (rtx
, rtx
)
36402 { gen_vec_extract_lo_v32qi
, gen_vec_extract_hi_v32qi
},
36403 { gen_vec_extract_lo_v16hi
, gen_vec_extract_hi_v16hi
},
36404 { gen_vec_extract_lo_v8si
, gen_vec_extract_hi_v8si
},
36405 { gen_vec_extract_lo_v4di
, gen_vec_extract_hi_v4di
},
36406 { gen_vec_extract_lo_v8sf
, gen_vec_extract_hi_v8sf
},
36407 { gen_vec_extract_lo_v4df
, gen_vec_extract_hi_v4df
}
36409 static rtx (*gen_insert
[6][2]) (rtx
, rtx
, rtx
)
36411 { gen_vec_set_lo_v32qi
, gen_vec_set_hi_v32qi
},
36412 { gen_vec_set_lo_v16hi
, gen_vec_set_hi_v16hi
},
36413 { gen_vec_set_lo_v8si
, gen_vec_set_hi_v8si
},
36414 { gen_vec_set_lo_v4di
, gen_vec_set_hi_v4di
},
36415 { gen_vec_set_lo_v8sf
, gen_vec_set_hi_v8sf
},
36416 { gen_vec_set_lo_v4df
, gen_vec_set_hi_v4df
}
36426 tmp
= gen_reg_rtx (GET_MODE_INNER (mode
));
36427 ix86_expand_vector_extract (true, tmp
, target
, 1 - elt
);
36429 tmp
= gen_rtx_VEC_CONCAT (mode
, val
, tmp
);
36431 tmp
= gen_rtx_VEC_CONCAT (mode
, tmp
, val
);
36432 emit_insn (gen_rtx_SET (VOIDmode
, target
, tmp
));
36438 use_vec_merge
= TARGET_SSE4_1
&& TARGET_64BIT
;
36442 tmp
= gen_reg_rtx (GET_MODE_INNER (mode
));
36443 ix86_expand_vector_extract (false, tmp
, target
, 1 - elt
);
36445 tmp
= gen_rtx_VEC_CONCAT (mode
, val
, tmp
);
36447 tmp
= gen_rtx_VEC_CONCAT (mode
, tmp
, val
);
36448 emit_insn (gen_rtx_SET (VOIDmode
, target
, tmp
));
36455 /* For the two element vectors, we implement a VEC_CONCAT with
36456 the extraction of the other element. */
36458 tmp
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (1, GEN_INT (1 - elt
)));
36459 tmp
= gen_rtx_VEC_SELECT (inner_mode
, target
, tmp
);
36462 op0
= val
, op1
= tmp
;
36464 op0
= tmp
, op1
= val
;
36466 tmp
= gen_rtx_VEC_CONCAT (mode
, op0
, op1
);
36467 emit_insn (gen_rtx_SET (VOIDmode
, target
, tmp
));
36472 use_vec_merge
= TARGET_SSE4_1
;
36479 use_vec_merge
= true;
36483 /* tmp = target = A B C D */
36484 tmp
= copy_to_reg (target
);
36485 /* target = A A B B */
36486 emit_insn (gen_vec_interleave_lowv4sf (target
, target
, target
));
36487 /* target = X A B B */
36488 ix86_expand_vector_set (false, target
, val
, 0);
36489 /* target = A X C D */
36490 emit_insn (gen_sse_shufps_v4sf (target
, target
, tmp
,
36491 const1_rtx
, const0_rtx
,
36492 GEN_INT (2+4), GEN_INT (3+4)));
36496 /* tmp = target = A B C D */
36497 tmp
= copy_to_reg (target
);
36498 /* tmp = X B C D */
36499 ix86_expand_vector_set (false, tmp
, val
, 0);
36500 /* target = A B X D */
36501 emit_insn (gen_sse_shufps_v4sf (target
, target
, tmp
,
36502 const0_rtx
, const1_rtx
,
36503 GEN_INT (0+4), GEN_INT (3+4)));
36507 /* tmp = target = A B C D */
36508 tmp
= copy_to_reg (target
);
36509 /* tmp = X B C D */
36510 ix86_expand_vector_set (false, tmp
, val
, 0);
36511 /* target = A B X D */
36512 emit_insn (gen_sse_shufps_v4sf (target
, target
, tmp
,
36513 const0_rtx
, const1_rtx
,
36514 GEN_INT (2+4), GEN_INT (0+4)));
36518 gcc_unreachable ();
36523 use_vec_merge
= TARGET_SSE4_1
;
36527 /* Element 0 handled by vec_merge below. */
36530 use_vec_merge
= true;
36536 /* With SSE2, use integer shuffles to swap element 0 and ELT,
36537 store into element 0, then shuffle them back. */
36541 order
[0] = GEN_INT (elt
);
36542 order
[1] = const1_rtx
;
36543 order
[2] = const2_rtx
;
36544 order
[3] = GEN_INT (3);
36545 order
[elt
] = const0_rtx
;
36547 emit_insn (gen_sse2_pshufd_1 (target
, target
, order
[0],
36548 order
[1], order
[2], order
[3]));
36550 ix86_expand_vector_set (false, target
, val
, 0);
36552 emit_insn (gen_sse2_pshufd_1 (target
, target
, order
[0],
36553 order
[1], order
[2], order
[3]));
36557 /* For SSE1, we have to reuse the V4SF code. */
36558 ix86_expand_vector_set (false, gen_lowpart (V4SFmode
, target
),
36559 gen_lowpart (SFmode
, val
), elt
);
36564 use_vec_merge
= TARGET_SSE2
;
36567 use_vec_merge
= mmx_ok
&& (TARGET_SSE
|| TARGET_3DNOW_A
);
36571 use_vec_merge
= TARGET_SSE4_1
;
36578 half_mode
= V16QImode
;
36584 half_mode
= V8HImode
;
36590 half_mode
= V4SImode
;
36596 half_mode
= V2DImode
;
36602 half_mode
= V4SFmode
;
36608 half_mode
= V2DFmode
;
36614 /* Compute offset. */
36618 gcc_assert (i
<= 1);
36620 /* Extract the half. */
36621 tmp
= gen_reg_rtx (half_mode
);
36622 emit_insn (gen_extract
[j
][i
] (tmp
, target
));
36624 /* Put val in tmp at elt. */
36625 ix86_expand_vector_set (false, tmp
, val
, elt
);
36628 emit_insn (gen_insert
[j
][i
] (target
, target
, tmp
));
36637 tmp
= gen_rtx_VEC_DUPLICATE (mode
, val
);
36638 tmp
= gen_rtx_VEC_MERGE (mode
, tmp
, target
, GEN_INT (1 << elt
));
36639 emit_insn (gen_rtx_SET (VOIDmode
, target
, tmp
));
36643 rtx mem
= assign_stack_temp (mode
, GET_MODE_SIZE (mode
));
36645 emit_move_insn (mem
, target
);
36647 tmp
= adjust_address (mem
, inner_mode
, elt
*GET_MODE_SIZE (inner_mode
));
36648 emit_move_insn (tmp
, val
);
36650 emit_move_insn (target
, mem
);
36655 ix86_expand_vector_extract (bool mmx_ok
, rtx target
, rtx vec
, int elt
)
36657 enum machine_mode mode
= GET_MODE (vec
);
36658 enum machine_mode inner_mode
= GET_MODE_INNER (mode
);
36659 bool use_vec_extr
= false;
36672 use_vec_extr
= true;
36676 use_vec_extr
= TARGET_SSE4_1
;
36688 tmp
= gen_reg_rtx (mode
);
36689 emit_insn (gen_sse_shufps_v4sf (tmp
, vec
, vec
,
36690 GEN_INT (elt
), GEN_INT (elt
),
36691 GEN_INT (elt
+4), GEN_INT (elt
+4)));
36695 tmp
= gen_reg_rtx (mode
);
36696 emit_insn (gen_vec_interleave_highv4sf (tmp
, vec
, vec
));
36700 gcc_unreachable ();
36703 use_vec_extr
= true;
36708 use_vec_extr
= TARGET_SSE4_1
;
36722 tmp
= gen_reg_rtx (mode
);
36723 emit_insn (gen_sse2_pshufd_1 (tmp
, vec
,
36724 GEN_INT (elt
), GEN_INT (elt
),
36725 GEN_INT (elt
), GEN_INT (elt
)));
36729 tmp
= gen_reg_rtx (mode
);
36730 emit_insn (gen_vec_interleave_highv4si (tmp
, vec
, vec
));
36734 gcc_unreachable ();
36737 use_vec_extr
= true;
36742 /* For SSE1, we have to reuse the V4SF code. */
36743 ix86_expand_vector_extract (false, gen_lowpart (SFmode
, target
),
36744 gen_lowpart (V4SFmode
, vec
), elt
);
36750 use_vec_extr
= TARGET_SSE2
;
36753 use_vec_extr
= mmx_ok
&& (TARGET_SSE
|| TARGET_3DNOW_A
);
36757 use_vec_extr
= TARGET_SSE4_1
;
36763 tmp
= gen_reg_rtx (V4SFmode
);
36765 emit_insn (gen_vec_extract_lo_v8sf (tmp
, vec
));
36767 emit_insn (gen_vec_extract_hi_v8sf (tmp
, vec
));
36768 ix86_expand_vector_extract (false, target
, tmp
, elt
& 3);
36776 tmp
= gen_reg_rtx (V2DFmode
);
36778 emit_insn (gen_vec_extract_lo_v4df (tmp
, vec
));
36780 emit_insn (gen_vec_extract_hi_v4df (tmp
, vec
));
36781 ix86_expand_vector_extract (false, target
, tmp
, elt
& 1);
36789 tmp
= gen_reg_rtx (V16QImode
);
36791 emit_insn (gen_vec_extract_lo_v32qi (tmp
, vec
));
36793 emit_insn (gen_vec_extract_hi_v32qi (tmp
, vec
));
36794 ix86_expand_vector_extract (false, target
, tmp
, elt
& 15);
36802 tmp
= gen_reg_rtx (V8HImode
);
36804 emit_insn (gen_vec_extract_lo_v16hi (tmp
, vec
));
36806 emit_insn (gen_vec_extract_hi_v16hi (tmp
, vec
));
36807 ix86_expand_vector_extract (false, target
, tmp
, elt
& 7);
36815 tmp
= gen_reg_rtx (V4SImode
);
36817 emit_insn (gen_vec_extract_lo_v8si (tmp
, vec
));
36819 emit_insn (gen_vec_extract_hi_v8si (tmp
, vec
));
36820 ix86_expand_vector_extract (false, target
, tmp
, elt
& 3);
36828 tmp
= gen_reg_rtx (V2DImode
);
36830 emit_insn (gen_vec_extract_lo_v4di (tmp
, vec
));
36832 emit_insn (gen_vec_extract_hi_v4di (tmp
, vec
));
36833 ix86_expand_vector_extract (false, target
, tmp
, elt
& 1);
36839 /* ??? Could extract the appropriate HImode element and shift. */
36846 tmp
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (1, GEN_INT (elt
)));
36847 tmp
= gen_rtx_VEC_SELECT (inner_mode
, vec
, tmp
);
36849 /* Let the rtl optimizers know about the zero extension performed. */
36850 if (inner_mode
== QImode
|| inner_mode
== HImode
)
36852 tmp
= gen_rtx_ZERO_EXTEND (SImode
, tmp
);
36853 target
= gen_lowpart (SImode
, target
);
36856 emit_insn (gen_rtx_SET (VOIDmode
, target
, tmp
));
36860 rtx mem
= assign_stack_temp (mode
, GET_MODE_SIZE (mode
));
36862 emit_move_insn (mem
, vec
);
36864 tmp
= adjust_address (mem
, inner_mode
, elt
*GET_MODE_SIZE (inner_mode
));
36865 emit_move_insn (target
, tmp
);
36869 /* Generate code to copy vector bits i / 2 ... i - 1 from vector SRC
36870 to bits 0 ... i / 2 - 1 of vector DEST, which has the same mode.
36871 The upper bits of DEST are undefined, though they shouldn't cause
36872 exceptions (some bits from src or all zeros are ok). */
36875 emit_reduc_half (rtx dest
, rtx src
, int i
)
36878 switch (GET_MODE (src
))
36882 tem
= gen_sse_movhlps (dest
, src
, src
);
36884 tem
= gen_sse_shufps_v4sf (dest
, src
, src
, const1_rtx
, const1_rtx
,
36885 GEN_INT (1 + 4), GEN_INT (1 + 4));
36888 tem
= gen_vec_interleave_highv2df (dest
, src
, src
);
36894 tem
= gen_sse2_lshrv1ti3 (gen_lowpart (V1TImode
, dest
),
36895 gen_lowpart (V1TImode
, src
),
36900 tem
= gen_avx_vperm2f128v8sf3 (dest
, src
, src
, const1_rtx
);
36902 tem
= gen_avx_shufps256 (dest
, src
, src
,
36903 GEN_INT (i
== 128 ? 2 + (3 << 2) : 1));
36907 tem
= gen_avx_vperm2f128v4df3 (dest
, src
, src
, const1_rtx
);
36909 tem
= gen_avx_shufpd256 (dest
, src
, src
, const1_rtx
);
36916 tem
= gen_avx2_permv2ti (gen_lowpart (V4DImode
, dest
),
36917 gen_lowpart (V4DImode
, src
),
36918 gen_lowpart (V4DImode
, src
),
36921 tem
= gen_avx2_lshrv2ti3 (gen_lowpart (V2TImode
, dest
),
36922 gen_lowpart (V2TImode
, src
),
36926 gcc_unreachable ();
36931 /* Expand a vector reduction. FN is the binary pattern to reduce;
36932 DEST is the destination; IN is the input vector. */
36935 ix86_expand_reduc (rtx (*fn
) (rtx
, rtx
, rtx
), rtx dest
, rtx in
)
36937 rtx half
, dst
, vec
= in
;
36938 enum machine_mode mode
= GET_MODE (in
);
36941 /* SSE4 has a special instruction for V8HImode UMIN reduction. */
36943 && mode
== V8HImode
36944 && fn
== gen_uminv8hi3
)
36946 emit_insn (gen_sse4_1_phminposuw (dest
, in
));
36950 for (i
= GET_MODE_BITSIZE (mode
);
36951 i
> GET_MODE_BITSIZE (GET_MODE_INNER (mode
));
36954 half
= gen_reg_rtx (mode
);
36955 emit_reduc_half (half
, vec
, i
);
36956 if (i
== GET_MODE_BITSIZE (GET_MODE_INNER (mode
)) * 2)
36959 dst
= gen_reg_rtx (mode
);
36960 emit_insn (fn (dst
, half
, vec
));
36965 /* Target hook for scalar_mode_supported_p. */
36967 ix86_scalar_mode_supported_p (enum machine_mode mode
)
36969 if (DECIMAL_FLOAT_MODE_P (mode
))
36970 return default_decimal_float_supported_p ();
36971 else if (mode
== TFmode
)
36974 return default_scalar_mode_supported_p (mode
);
36977 /* Implements target hook vector_mode_supported_p. */
36979 ix86_vector_mode_supported_p (enum machine_mode mode
)
36981 if (TARGET_SSE
&& VALID_SSE_REG_MODE (mode
))
36983 if (TARGET_SSE2
&& VALID_SSE2_REG_MODE (mode
))
36985 if (TARGET_AVX
&& VALID_AVX256_REG_MODE (mode
))
36987 if (TARGET_MMX
&& VALID_MMX_REG_MODE (mode
))
36989 if (TARGET_3DNOW
&& VALID_MMX_REG_MODE_3DNOW (mode
))
36994 /* Target hook for c_mode_for_suffix. */
36995 static enum machine_mode
36996 ix86_c_mode_for_suffix (char suffix
)
37006 /* Worker function for TARGET_MD_ASM_CLOBBERS.
37008 We do this in the new i386 backend to maintain source compatibility
37009 with the old cc0-based compiler. */
37012 ix86_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED
,
37013 tree inputs ATTRIBUTE_UNUSED
,
37016 clobbers
= tree_cons (NULL_TREE
, build_string (5, "flags"),
37018 clobbers
= tree_cons (NULL_TREE
, build_string (4, "fpsr"),
37023 /* Implements target vector targetm.asm.encode_section_info. */
37025 static void ATTRIBUTE_UNUSED
37026 ix86_encode_section_info (tree decl
, rtx rtl
, int first
)
37028 default_encode_section_info (decl
, rtl
, first
);
37030 if (TREE_CODE (decl
) == VAR_DECL
37031 && (TREE_STATIC (decl
) || DECL_EXTERNAL (decl
))
37032 && ix86_in_large_data_p (decl
))
37033 SYMBOL_REF_FLAGS (XEXP (rtl
, 0)) |= SYMBOL_FLAG_FAR_ADDR
;
37036 /* Worker function for REVERSE_CONDITION. */
37039 ix86_reverse_condition (enum rtx_code code
, enum machine_mode mode
)
37041 return (mode
!= CCFPmode
&& mode
!= CCFPUmode
37042 ? reverse_condition (code
)
37043 : reverse_condition_maybe_unordered (code
));
37046 /* Output code to perform an x87 FP register move, from OPERANDS[1]
37050 output_387_reg_move (rtx insn
, rtx
*operands
)
37052 if (REG_P (operands
[0]))
37054 if (REG_P (operands
[1])
37055 && find_regno_note (insn
, REG_DEAD
, REGNO (operands
[1])))
37057 if (REGNO (operands
[0]) == FIRST_STACK_REG
)
37058 return output_387_ffreep (operands
, 0);
37059 return "fstp\t%y0";
37061 if (STACK_TOP_P (operands
[0]))
37062 return "fld%Z1\t%y1";
37065 else if (MEM_P (operands
[0]))
37067 gcc_assert (REG_P (operands
[1]));
37068 if (find_regno_note (insn
, REG_DEAD
, REGNO (operands
[1])))
37069 return "fstp%Z0\t%y0";
37072 /* There is no non-popping store to memory for XFmode.
37073 So if we need one, follow the store with a load. */
37074 if (GET_MODE (operands
[0]) == XFmode
)
37075 return "fstp%Z0\t%y0\n\tfld%Z0\t%y0";
37077 return "fst%Z0\t%y0";
37084 /* Output code to perform a conditional jump to LABEL, if C2 flag in
37085 FP status register is set. */
37088 ix86_emit_fp_unordered_jump (rtx label
)
37090 rtx reg
= gen_reg_rtx (HImode
);
37093 emit_insn (gen_x86_fnstsw_1 (reg
));
37095 if (TARGET_SAHF
&& (TARGET_USE_SAHF
|| optimize_insn_for_size_p ()))
37097 emit_insn (gen_x86_sahf_1 (reg
));
37099 temp
= gen_rtx_REG (CCmode
, FLAGS_REG
);
37100 temp
= gen_rtx_UNORDERED (VOIDmode
, temp
, const0_rtx
);
37104 emit_insn (gen_testqi_ext_ccno_0 (reg
, GEN_INT (0x04)));
37106 temp
= gen_rtx_REG (CCNOmode
, FLAGS_REG
);
37107 temp
= gen_rtx_NE (VOIDmode
, temp
, const0_rtx
);
37110 temp
= gen_rtx_IF_THEN_ELSE (VOIDmode
, temp
,
37111 gen_rtx_LABEL_REF (VOIDmode
, label
),
37113 temp
= gen_rtx_SET (VOIDmode
, pc_rtx
, temp
);
37115 emit_jump_insn (temp
);
37116 predict_jump (REG_BR_PROB_BASE
* 10 / 100);
37119 /* Output code to perform a log1p XFmode calculation. */
37121 void ix86_emit_i387_log1p (rtx op0
, rtx op1
)
37123 rtx label1
= gen_label_rtx ();
37124 rtx label2
= gen_label_rtx ();
37126 rtx tmp
= gen_reg_rtx (XFmode
);
37127 rtx tmp2
= gen_reg_rtx (XFmode
);
37130 emit_insn (gen_absxf2 (tmp
, op1
));
37131 test
= gen_rtx_GE (VOIDmode
, tmp
,
37132 CONST_DOUBLE_FROM_REAL_VALUE (
37133 REAL_VALUE_ATOF ("0.29289321881345247561810596348408353", XFmode
),
37135 emit_jump_insn (gen_cbranchxf4 (test
, XEXP (test
, 0), XEXP (test
, 1), label1
));
37137 emit_move_insn (tmp2
, standard_80387_constant_rtx (4)); /* fldln2 */
37138 emit_insn (gen_fyl2xp1xf3_i387 (op0
, op1
, tmp2
));
37139 emit_jump (label2
);
37141 emit_label (label1
);
37142 emit_move_insn (tmp
, CONST1_RTX (XFmode
));
37143 emit_insn (gen_addxf3 (tmp
, op1
, tmp
));
37144 emit_move_insn (tmp2
, standard_80387_constant_rtx (4)); /* fldln2 */
37145 emit_insn (gen_fyl2xxf3_i387 (op0
, tmp
, tmp2
));
37147 emit_label (label2
);
37150 /* Emit code for round calculation. */
37151 void ix86_emit_i387_round (rtx op0
, rtx op1
)
37153 enum machine_mode inmode
= GET_MODE (op1
);
37154 enum machine_mode outmode
= GET_MODE (op0
);
37155 rtx e1
, e2
, res
, tmp
, tmp1
, half
;
37156 rtx scratch
= gen_reg_rtx (HImode
);
37157 rtx flags
= gen_rtx_REG (CCNOmode
, FLAGS_REG
);
37158 rtx jump_label
= gen_label_rtx ();
37160 rtx (*gen_abs
) (rtx
, rtx
);
37161 rtx (*gen_neg
) (rtx
, rtx
);
37166 gen_abs
= gen_abssf2
;
37169 gen_abs
= gen_absdf2
;
37172 gen_abs
= gen_absxf2
;
37175 gcc_unreachable ();
37181 gen_neg
= gen_negsf2
;
37184 gen_neg
= gen_negdf2
;
37187 gen_neg
= gen_negxf2
;
37190 gen_neg
= gen_neghi2
;
37193 gen_neg
= gen_negsi2
;
37196 gen_neg
= gen_negdi2
;
37199 gcc_unreachable ();
37202 e1
= gen_reg_rtx (inmode
);
37203 e2
= gen_reg_rtx (inmode
);
37204 res
= gen_reg_rtx (outmode
);
37206 half
= CONST_DOUBLE_FROM_REAL_VALUE (dconsthalf
, inmode
);
37208 /* round(a) = sgn(a) * floor(fabs(a) + 0.5) */
37210 /* scratch = fxam(op1) */
37211 emit_insn (gen_rtx_SET (VOIDmode
, scratch
,
37212 gen_rtx_UNSPEC (HImode
, gen_rtvec (1, op1
),
37214 /* e1 = fabs(op1) */
37215 emit_insn (gen_abs (e1
, op1
));
37217 /* e2 = e1 + 0.5 */
37218 half
= force_reg (inmode
, half
);
37219 emit_insn (gen_rtx_SET (VOIDmode
, e2
,
37220 gen_rtx_PLUS (inmode
, e1
, half
)));
37222 /* res = floor(e2) */
37223 if (inmode
!= XFmode
)
37225 tmp1
= gen_reg_rtx (XFmode
);
37227 emit_insn (gen_rtx_SET (VOIDmode
, tmp1
,
37228 gen_rtx_FLOAT_EXTEND (XFmode
, e2
)));
37238 rtx tmp0
= gen_reg_rtx (XFmode
);
37240 emit_insn (gen_frndintxf2_floor (tmp0
, tmp1
));
37242 emit_insn (gen_rtx_SET (VOIDmode
, res
,
37243 gen_rtx_UNSPEC (outmode
, gen_rtvec (1, tmp0
),
37244 UNSPEC_TRUNC_NOOP
)));
37248 emit_insn (gen_frndintxf2_floor (res
, tmp1
));
37251 emit_insn (gen_lfloorxfhi2 (res
, tmp1
));
37254 emit_insn (gen_lfloorxfsi2 (res
, tmp1
));
37257 emit_insn (gen_lfloorxfdi2 (res
, tmp1
));
37260 gcc_unreachable ();
37263 /* flags = signbit(a) */
37264 emit_insn (gen_testqi_ext_ccno_0 (scratch
, GEN_INT (0x02)));
37266 /* if (flags) then res = -res */
37267 tmp
= gen_rtx_IF_THEN_ELSE (VOIDmode
,
37268 gen_rtx_EQ (VOIDmode
, flags
, const0_rtx
),
37269 gen_rtx_LABEL_REF (VOIDmode
, jump_label
),
37271 insn
= emit_jump_insn (gen_rtx_SET (VOIDmode
, pc_rtx
, tmp
));
37272 predict_jump (REG_BR_PROB_BASE
* 50 / 100);
37273 JUMP_LABEL (insn
) = jump_label
;
37275 emit_insn (gen_neg (res
, res
));
37277 emit_label (jump_label
);
37278 LABEL_NUSES (jump_label
) = 1;
37280 emit_move_insn (op0
, res
);
37283 /* Output code to perform a Newton-Rhapson approximation of a single precision
37284 floating point divide [http://en.wikipedia.org/wiki/N-th_root_algorithm]. */
37286 void ix86_emit_swdivsf (rtx res
, rtx a
, rtx b
, enum machine_mode mode
)
37288 rtx x0
, x1
, e0
, e1
;
37290 x0
= gen_reg_rtx (mode
);
37291 e0
= gen_reg_rtx (mode
);
37292 e1
= gen_reg_rtx (mode
);
37293 x1
= gen_reg_rtx (mode
);
37295 /* a / b = a * ((rcp(b) + rcp(b)) - (b * rcp(b) * rcp (b))) */
37297 b
= force_reg (mode
, b
);
37299 /* x0 = rcp(b) estimate */
37300 emit_insn (gen_rtx_SET (VOIDmode
, x0
,
37301 gen_rtx_UNSPEC (mode
, gen_rtvec (1, b
),
37304 emit_insn (gen_rtx_SET (VOIDmode
, e0
,
37305 gen_rtx_MULT (mode
, x0
, b
)));
37308 emit_insn (gen_rtx_SET (VOIDmode
, e0
,
37309 gen_rtx_MULT (mode
, x0
, e0
)));
37312 emit_insn (gen_rtx_SET (VOIDmode
, e1
,
37313 gen_rtx_PLUS (mode
, x0
, x0
)));
37316 emit_insn (gen_rtx_SET (VOIDmode
, x1
,
37317 gen_rtx_MINUS (mode
, e1
, e0
)));
37320 emit_insn (gen_rtx_SET (VOIDmode
, res
,
37321 gen_rtx_MULT (mode
, a
, x1
)));
37324 /* Output code to perform a Newton-Rhapson approximation of a
37325 single precision floating point [reciprocal] square root. */
37327 void ix86_emit_swsqrtsf (rtx res
, rtx a
, enum machine_mode mode
,
37330 rtx x0
, e0
, e1
, e2
, e3
, mthree
, mhalf
;
37333 x0
= gen_reg_rtx (mode
);
37334 e0
= gen_reg_rtx (mode
);
37335 e1
= gen_reg_rtx (mode
);
37336 e2
= gen_reg_rtx (mode
);
37337 e3
= gen_reg_rtx (mode
);
37339 real_from_integer (&r
, VOIDmode
, -3, -1, 0);
37340 mthree
= CONST_DOUBLE_FROM_REAL_VALUE (r
, SFmode
);
37342 real_arithmetic (&r
, NEGATE_EXPR
, &dconsthalf
, NULL
);
37343 mhalf
= CONST_DOUBLE_FROM_REAL_VALUE (r
, SFmode
);
37345 if (VECTOR_MODE_P (mode
))
37347 mthree
= ix86_build_const_vector (mode
, true, mthree
);
37348 mhalf
= ix86_build_const_vector (mode
, true, mhalf
);
37351 /* sqrt(a) = -0.5 * a * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0)
37352 rsqrt(a) = -0.5 * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0) */
37354 a
= force_reg (mode
, a
);
37356 /* x0 = rsqrt(a) estimate */
37357 emit_insn (gen_rtx_SET (VOIDmode
, x0
,
37358 gen_rtx_UNSPEC (mode
, gen_rtvec (1, a
),
37361 /* If (a == 0.0) Filter out infinity to prevent NaN for sqrt(0.0). */
37366 zero
= gen_reg_rtx (mode
);
37367 mask
= gen_reg_rtx (mode
);
37369 zero
= force_reg (mode
, CONST0_RTX(mode
));
37370 emit_insn (gen_rtx_SET (VOIDmode
, mask
,
37371 gen_rtx_NE (mode
, zero
, a
)));
37373 emit_insn (gen_rtx_SET (VOIDmode
, x0
,
37374 gen_rtx_AND (mode
, x0
, mask
)));
37378 emit_insn (gen_rtx_SET (VOIDmode
, e0
,
37379 gen_rtx_MULT (mode
, x0
, a
)));
37381 emit_insn (gen_rtx_SET (VOIDmode
, e1
,
37382 gen_rtx_MULT (mode
, e0
, x0
)));
37385 mthree
= force_reg (mode
, mthree
);
37386 emit_insn (gen_rtx_SET (VOIDmode
, e2
,
37387 gen_rtx_PLUS (mode
, e1
, mthree
)));
37389 mhalf
= force_reg (mode
, mhalf
);
37391 /* e3 = -.5 * x0 */
37392 emit_insn (gen_rtx_SET (VOIDmode
, e3
,
37393 gen_rtx_MULT (mode
, x0
, mhalf
)));
37395 /* e3 = -.5 * e0 */
37396 emit_insn (gen_rtx_SET (VOIDmode
, e3
,
37397 gen_rtx_MULT (mode
, e0
, mhalf
)));
37398 /* ret = e2 * e3 */
37399 emit_insn (gen_rtx_SET (VOIDmode
, res
,
37400 gen_rtx_MULT (mode
, e2
, e3
)));
37403 #ifdef TARGET_SOLARIS
37404 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
37407 i386_solaris_elf_named_section (const char *name
, unsigned int flags
,
37410 /* With Binutils 2.15, the "@unwind" marker must be specified on
37411 every occurrence of the ".eh_frame" section, not just the first
37414 && strcmp (name
, ".eh_frame") == 0)
37416 fprintf (asm_out_file
, "\t.section\t%s,\"%s\",@unwind\n", name
,
37417 flags
& SECTION_WRITE
? "aw" : "a");
37422 if (HAVE_COMDAT_GROUP
&& flags
& SECTION_LINKONCE
)
37424 solaris_elf_asm_comdat_section (name
, flags
, decl
);
37429 default_elf_asm_named_section (name
, flags
, decl
);
37431 #endif /* TARGET_SOLARIS */
37433 /* Return the mangling of TYPE if it is an extended fundamental type. */
37435 static const char *
37436 ix86_mangle_type (const_tree type
)
37438 type
= TYPE_MAIN_VARIANT (type
);
37440 if (TREE_CODE (type
) != VOID_TYPE
&& TREE_CODE (type
) != BOOLEAN_TYPE
37441 && TREE_CODE (type
) != INTEGER_TYPE
&& TREE_CODE (type
) != REAL_TYPE
)
37444 switch (TYPE_MODE (type
))
37447 /* __float128 is "g". */
37450 /* "long double" or __float80 is "e". */
37457 /* For 32-bit code we can save PIC register setup by using
37458 __stack_chk_fail_local hidden function instead of calling
37459 __stack_chk_fail directly. 64-bit code doesn't need to setup any PIC
37460 register, so it is better to call __stack_chk_fail directly. */
37462 static tree ATTRIBUTE_UNUSED
37463 ix86_stack_protect_fail (void)
37465 return TARGET_64BIT
37466 ? default_external_stack_protect_fail ()
37467 : default_hidden_stack_protect_fail ();
37470 /* Select a format to encode pointers in exception handling data. CODE
37471 is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is
37472 true if the symbol may be affected by dynamic relocations.
37474 ??? All x86 object file formats are capable of representing this.
37475 After all, the relocation needed is the same as for the call insn.
37476 Whether or not a particular assembler allows us to enter such, I
37477 guess we'll have to see. */
37479 asm_preferred_eh_data_format (int code
, int global
)
37483 int type
= DW_EH_PE_sdata8
;
37485 || ix86_cmodel
== CM_SMALL_PIC
37486 || (ix86_cmodel
== CM_MEDIUM_PIC
&& (global
|| code
)))
37487 type
= DW_EH_PE_sdata4
;
37488 return (global
? DW_EH_PE_indirect
: 0) | DW_EH_PE_pcrel
| type
;
37490 if (ix86_cmodel
== CM_SMALL
37491 || (ix86_cmodel
== CM_MEDIUM
&& code
))
37492 return DW_EH_PE_udata4
;
37493 return DW_EH_PE_absptr
;
37496 /* Expand copysign from SIGN to the positive value ABS_VALUE
37497 storing in RESULT. If MASK is non-null, it shall be a mask to mask out
37500 ix86_sse_copysign_to_positive (rtx result
, rtx abs_value
, rtx sign
, rtx mask
)
37502 enum machine_mode mode
= GET_MODE (sign
);
37503 rtx sgn
= gen_reg_rtx (mode
);
37504 if (mask
== NULL_RTX
)
37506 enum machine_mode vmode
;
37508 if (mode
== SFmode
)
37510 else if (mode
== DFmode
)
37515 mask
= ix86_build_signbit_mask (vmode
, VECTOR_MODE_P (mode
), false);
37516 if (!VECTOR_MODE_P (mode
))
37518 /* We need to generate a scalar mode mask in this case. */
37519 rtx tmp
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (1, const0_rtx
));
37520 tmp
= gen_rtx_VEC_SELECT (mode
, mask
, tmp
);
37521 mask
= gen_reg_rtx (mode
);
37522 emit_insn (gen_rtx_SET (VOIDmode
, mask
, tmp
));
37526 mask
= gen_rtx_NOT (mode
, mask
);
37527 emit_insn (gen_rtx_SET (VOIDmode
, sgn
,
37528 gen_rtx_AND (mode
, mask
, sign
)));
37529 emit_insn (gen_rtx_SET (VOIDmode
, result
,
37530 gen_rtx_IOR (mode
, abs_value
, sgn
)));
37533 /* Expand fabs (OP0) and return a new rtx that holds the result. The
37534 mask for masking out the sign-bit is stored in *SMASK, if that is
37537 ix86_expand_sse_fabs (rtx op0
, rtx
*smask
)
37539 enum machine_mode vmode
, mode
= GET_MODE (op0
);
37542 xa
= gen_reg_rtx (mode
);
37543 if (mode
== SFmode
)
37545 else if (mode
== DFmode
)
37549 mask
= ix86_build_signbit_mask (vmode
, VECTOR_MODE_P (mode
), true);
37550 if (!VECTOR_MODE_P (mode
))
37552 /* We need to generate a scalar mode mask in this case. */
37553 rtx tmp
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (1, const0_rtx
));
37554 tmp
= gen_rtx_VEC_SELECT (mode
, mask
, tmp
);
37555 mask
= gen_reg_rtx (mode
);
37556 emit_insn (gen_rtx_SET (VOIDmode
, mask
, tmp
));
37558 emit_insn (gen_rtx_SET (VOIDmode
, xa
,
37559 gen_rtx_AND (mode
, op0
, mask
)));
37567 /* Expands a comparison of OP0 with OP1 using comparison code CODE,
37568 swapping the operands if SWAP_OPERANDS is true. The expanded
37569 code is a forward jump to a newly created label in case the
37570 comparison is true. The generated label rtx is returned. */
37572 ix86_expand_sse_compare_and_jump (enum rtx_code code
, rtx op0
, rtx op1
,
37573 bool swap_operands
)
37584 label
= gen_label_rtx ();
37585 tmp
= gen_rtx_REG (CCFPUmode
, FLAGS_REG
);
37586 emit_insn (gen_rtx_SET (VOIDmode
, tmp
,
37587 gen_rtx_COMPARE (CCFPUmode
, op0
, op1
)));
37588 tmp
= gen_rtx_fmt_ee (code
, VOIDmode
, tmp
, const0_rtx
);
37589 tmp
= gen_rtx_IF_THEN_ELSE (VOIDmode
, tmp
,
37590 gen_rtx_LABEL_REF (VOIDmode
, label
), pc_rtx
);
37591 tmp
= emit_jump_insn (gen_rtx_SET (VOIDmode
, pc_rtx
, tmp
));
37592 JUMP_LABEL (tmp
) = label
;
37597 /* Expand a mask generating SSE comparison instruction comparing OP0 with OP1
37598 using comparison code CODE. Operands are swapped for the comparison if
37599 SWAP_OPERANDS is true. Returns a rtx for the generated mask. */
37601 ix86_expand_sse_compare_mask (enum rtx_code code
, rtx op0
, rtx op1
,
37602 bool swap_operands
)
37604 rtx (*insn
)(rtx
, rtx
, rtx
, rtx
);
37605 enum machine_mode mode
= GET_MODE (op0
);
37606 rtx mask
= gen_reg_rtx (mode
);
37615 insn
= mode
== DFmode
? gen_setcc_df_sse
: gen_setcc_sf_sse
;
37617 emit_insn (insn (mask
, op0
, op1
,
37618 gen_rtx_fmt_ee (code
, mode
, op0
, op1
)));
37622 /* Generate and return a rtx of mode MODE for 2**n where n is the number
37623 of bits of the mantissa of MODE, which must be one of DFmode or SFmode. */
37625 ix86_gen_TWO52 (enum machine_mode mode
)
37627 REAL_VALUE_TYPE TWO52r
;
37630 real_ldexp (&TWO52r
, &dconst1
, mode
== DFmode
? 52 : 23);
37631 TWO52
= const_double_from_real_value (TWO52r
, mode
);
37632 TWO52
= force_reg (mode
, TWO52
);
37637 /* Expand SSE sequence for computing lround from OP1 storing
37640 ix86_expand_lround (rtx op0
, rtx op1
)
37642 /* C code for the stuff we're doing below:
37643 tmp = op1 + copysign (nextafter (0.5, 0.0), op1)
37646 enum machine_mode mode
= GET_MODE (op1
);
37647 const struct real_format
*fmt
;
37648 REAL_VALUE_TYPE pred_half
, half_minus_pred_half
;
37651 /* load nextafter (0.5, 0.0) */
37652 fmt
= REAL_MODE_FORMAT (mode
);
37653 real_2expN (&half_minus_pred_half
, -(fmt
->p
) - 1, mode
);
37654 REAL_ARITHMETIC (pred_half
, MINUS_EXPR
, dconsthalf
, half_minus_pred_half
);
37656 /* adj = copysign (0.5, op1) */
37657 adj
= force_reg (mode
, const_double_from_real_value (pred_half
, mode
));
37658 ix86_sse_copysign_to_positive (adj
, adj
, force_reg (mode
, op1
), NULL_RTX
);
37660 /* adj = op1 + adj */
37661 adj
= expand_simple_binop (mode
, PLUS
, adj
, op1
, NULL_RTX
, 0, OPTAB_DIRECT
);
37663 /* op0 = (imode)adj */
37664 expand_fix (op0
, adj
, 0);
37667 /* Expand SSE2 sequence for computing lround from OPERAND1 storing
37670 ix86_expand_lfloorceil (rtx op0
, rtx op1
, bool do_floor
)
37672 /* C code for the stuff we're doing below (for do_floor):
37674 xi -= (double)xi > op1 ? 1 : 0;
37677 enum machine_mode fmode
= GET_MODE (op1
);
37678 enum machine_mode imode
= GET_MODE (op0
);
37679 rtx ireg
, freg
, label
, tmp
;
37681 /* reg = (long)op1 */
37682 ireg
= gen_reg_rtx (imode
);
37683 expand_fix (ireg
, op1
, 0);
37685 /* freg = (double)reg */
37686 freg
= gen_reg_rtx (fmode
);
37687 expand_float (freg
, ireg
, 0);
37689 /* ireg = (freg > op1) ? ireg - 1 : ireg */
37690 label
= ix86_expand_sse_compare_and_jump (UNLE
,
37691 freg
, op1
, !do_floor
);
37692 tmp
= expand_simple_binop (imode
, do_floor
? MINUS
: PLUS
,
37693 ireg
, const1_rtx
, NULL_RTX
, 0, OPTAB_DIRECT
);
37694 emit_move_insn (ireg
, tmp
);
37696 emit_label (label
);
37697 LABEL_NUSES (label
) = 1;
37699 emit_move_insn (op0
, ireg
);
37702 /* Expand rint (IEEE round to nearest) rounding OPERAND1 and storing the
37703 result in OPERAND0. */
37705 ix86_expand_rint (rtx operand0
, rtx operand1
)
37707 /* C code for the stuff we're doing below:
37708 xa = fabs (operand1);
37709 if (!isless (xa, 2**52))
37711 xa = xa + 2**52 - 2**52;
37712 return copysign (xa, operand1);
37714 enum machine_mode mode
= GET_MODE (operand0
);
37715 rtx res
, xa
, label
, TWO52
, mask
;
37717 res
= gen_reg_rtx (mode
);
37718 emit_move_insn (res
, operand1
);
37720 /* xa = abs (operand1) */
37721 xa
= ix86_expand_sse_fabs (res
, &mask
);
37723 /* if (!isless (xa, TWO52)) goto label; */
37724 TWO52
= ix86_gen_TWO52 (mode
);
37725 label
= ix86_expand_sse_compare_and_jump (UNLE
, TWO52
, xa
, false);
37727 xa
= expand_simple_binop (mode
, PLUS
, xa
, TWO52
, NULL_RTX
, 0, OPTAB_DIRECT
);
37728 xa
= expand_simple_binop (mode
, MINUS
, xa
, TWO52
, xa
, 0, OPTAB_DIRECT
);
37730 ix86_sse_copysign_to_positive (res
, xa
, res
, mask
);
37732 emit_label (label
);
37733 LABEL_NUSES (label
) = 1;
37735 emit_move_insn (operand0
, res
);
37738 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
37741 ix86_expand_floorceildf_32 (rtx operand0
, rtx operand1
, bool do_floor
)
37743 /* C code for the stuff we expand below.
37744 double xa = fabs (x), x2;
37745 if (!isless (xa, TWO52))
37747 xa = xa + TWO52 - TWO52;
37748 x2 = copysign (xa, x);
37757 enum machine_mode mode
= GET_MODE (operand0
);
37758 rtx xa
, TWO52
, tmp
, label
, one
, res
, mask
;
37760 TWO52
= ix86_gen_TWO52 (mode
);
37762 /* Temporary for holding the result, initialized to the input
37763 operand to ease control flow. */
37764 res
= gen_reg_rtx (mode
);
37765 emit_move_insn (res
, operand1
);
37767 /* xa = abs (operand1) */
37768 xa
= ix86_expand_sse_fabs (res
, &mask
);
37770 /* if (!isless (xa, TWO52)) goto label; */
37771 label
= ix86_expand_sse_compare_and_jump (UNLE
, TWO52
, xa
, false);
37773 /* xa = xa + TWO52 - TWO52; */
37774 xa
= expand_simple_binop (mode
, PLUS
, xa
, TWO52
, NULL_RTX
, 0, OPTAB_DIRECT
);
37775 xa
= expand_simple_binop (mode
, MINUS
, xa
, TWO52
, xa
, 0, OPTAB_DIRECT
);
37777 /* xa = copysign (xa, operand1) */
37778 ix86_sse_copysign_to_positive (xa
, xa
, res
, mask
);
37780 /* generate 1.0 or -1.0 */
37781 one
= force_reg (mode
,
37782 const_double_from_real_value (do_floor
37783 ? dconst1
: dconstm1
, mode
));
37785 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
37786 tmp
= ix86_expand_sse_compare_mask (UNGT
, xa
, res
, !do_floor
);
37787 emit_insn (gen_rtx_SET (VOIDmode
, tmp
,
37788 gen_rtx_AND (mode
, one
, tmp
)));
37789 /* We always need to subtract here to preserve signed zero. */
37790 tmp
= expand_simple_binop (mode
, MINUS
,
37791 xa
, tmp
, NULL_RTX
, 0, OPTAB_DIRECT
);
37792 emit_move_insn (res
, tmp
);
37794 emit_label (label
);
37795 LABEL_NUSES (label
) = 1;
37797 emit_move_insn (operand0
, res
);
37800 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
37803 ix86_expand_floorceil (rtx operand0
, rtx operand1
, bool do_floor
)
37805 /* C code for the stuff we expand below.
37806 double xa = fabs (x), x2;
37807 if (!isless (xa, TWO52))
37809 x2 = (double)(long)x;
37816 if (HONOR_SIGNED_ZEROS (mode))
37817 return copysign (x2, x);
37820 enum machine_mode mode
= GET_MODE (operand0
);
37821 rtx xa
, xi
, TWO52
, tmp
, label
, one
, res
, mask
;
37823 TWO52
= ix86_gen_TWO52 (mode
);
37825 /* Temporary for holding the result, initialized to the input
37826 operand to ease control flow. */
37827 res
= gen_reg_rtx (mode
);
37828 emit_move_insn (res
, operand1
);
37830 /* xa = abs (operand1) */
37831 xa
= ix86_expand_sse_fabs (res
, &mask
);
37833 /* if (!isless (xa, TWO52)) goto label; */
37834 label
= ix86_expand_sse_compare_and_jump (UNLE
, TWO52
, xa
, false);
37836 /* xa = (double)(long)x */
37837 xi
= gen_reg_rtx (mode
== DFmode
? DImode
: SImode
);
37838 expand_fix (xi
, res
, 0);
37839 expand_float (xa
, xi
, 0);
37842 one
= force_reg (mode
, const_double_from_real_value (dconst1
, mode
));
37844 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
37845 tmp
= ix86_expand_sse_compare_mask (UNGT
, xa
, res
, !do_floor
);
37846 emit_insn (gen_rtx_SET (VOIDmode
, tmp
,
37847 gen_rtx_AND (mode
, one
, tmp
)));
37848 tmp
= expand_simple_binop (mode
, do_floor
? MINUS
: PLUS
,
37849 xa
, tmp
, NULL_RTX
, 0, OPTAB_DIRECT
);
37850 emit_move_insn (res
, tmp
);
37852 if (HONOR_SIGNED_ZEROS (mode
))
37853 ix86_sse_copysign_to_positive (res
, res
, force_reg (mode
, operand1
), mask
);
37855 emit_label (label
);
37856 LABEL_NUSES (label
) = 1;
37858 emit_move_insn (operand0
, res
);
37861 /* Expand SSE sequence for computing round from OPERAND1 storing
37862 into OPERAND0. Sequence that works without relying on DImode truncation
37863 via cvttsd2siq that is only available on 64bit targets. */
37865 ix86_expand_rounddf_32 (rtx operand0
, rtx operand1
)
37867 /* C code for the stuff we expand below.
37868 double xa = fabs (x), xa2, x2;
37869 if (!isless (xa, TWO52))
37871 Using the absolute value and copying back sign makes
37872 -0.0 -> -0.0 correct.
37873 xa2 = xa + TWO52 - TWO52;
37878 else if (dxa > 0.5)
37880 x2 = copysign (xa2, x);
37883 enum machine_mode mode
= GET_MODE (operand0
);
37884 rtx xa
, xa2
, dxa
, TWO52
, tmp
, label
, half
, mhalf
, one
, res
, mask
;
37886 TWO52
= ix86_gen_TWO52 (mode
);
37888 /* Temporary for holding the result, initialized to the input
37889 operand to ease control flow. */
37890 res
= gen_reg_rtx (mode
);
37891 emit_move_insn (res
, operand1
);
37893 /* xa = abs (operand1) */
37894 xa
= ix86_expand_sse_fabs (res
, &mask
);
37896 /* if (!isless (xa, TWO52)) goto label; */
37897 label
= ix86_expand_sse_compare_and_jump (UNLE
, TWO52
, xa
, false);
37899 /* xa2 = xa + TWO52 - TWO52; */
37900 xa2
= expand_simple_binop (mode
, PLUS
, xa
, TWO52
, NULL_RTX
, 0, OPTAB_DIRECT
);
37901 xa2
= expand_simple_binop (mode
, MINUS
, xa2
, TWO52
, xa2
, 0, OPTAB_DIRECT
);
37903 /* dxa = xa2 - xa; */
37904 dxa
= expand_simple_binop (mode
, MINUS
, xa2
, xa
, NULL_RTX
, 0, OPTAB_DIRECT
);
37906 /* generate 0.5, 1.0 and -0.5 */
37907 half
= force_reg (mode
, const_double_from_real_value (dconsthalf
, mode
));
37908 one
= expand_simple_binop (mode
, PLUS
, half
, half
, NULL_RTX
, 0, OPTAB_DIRECT
);
37909 mhalf
= expand_simple_binop (mode
, MINUS
, half
, one
, NULL_RTX
,
37913 tmp
= gen_reg_rtx (mode
);
37914 /* xa2 = xa2 - (dxa > 0.5 ? 1 : 0) */
37915 tmp
= ix86_expand_sse_compare_mask (UNGT
, dxa
, half
, false);
37916 emit_insn (gen_rtx_SET (VOIDmode
, tmp
,
37917 gen_rtx_AND (mode
, one
, tmp
)));
37918 xa2
= expand_simple_binop (mode
, MINUS
, xa2
, tmp
, NULL_RTX
, 0, OPTAB_DIRECT
);
37919 /* xa2 = xa2 + (dxa <= -0.5 ? 1 : 0) */
37920 tmp
= ix86_expand_sse_compare_mask (UNGE
, mhalf
, dxa
, false);
37921 emit_insn (gen_rtx_SET (VOIDmode
, tmp
,
37922 gen_rtx_AND (mode
, one
, tmp
)));
37923 xa2
= expand_simple_binop (mode
, PLUS
, xa2
, tmp
, NULL_RTX
, 0, OPTAB_DIRECT
);
37925 /* res = copysign (xa2, operand1) */
37926 ix86_sse_copysign_to_positive (res
, xa2
, force_reg (mode
, operand1
), mask
);
37928 emit_label (label
);
37929 LABEL_NUSES (label
) = 1;
37931 emit_move_insn (operand0
, res
);
37934 /* Expand SSE sequence for computing trunc from OPERAND1 storing
37937 ix86_expand_trunc (rtx operand0
, rtx operand1
)
37939 /* C code for SSE variant we expand below.
37940 double xa = fabs (x), x2;
37941 if (!isless (xa, TWO52))
37943 x2 = (double)(long)x;
37944 if (HONOR_SIGNED_ZEROS (mode))
37945 return copysign (x2, x);
37948 enum machine_mode mode
= GET_MODE (operand0
);
37949 rtx xa
, xi
, TWO52
, label
, res
, mask
;
37951 TWO52
= ix86_gen_TWO52 (mode
);
37953 /* Temporary for holding the result, initialized to the input
37954 operand to ease control flow. */
37955 res
= gen_reg_rtx (mode
);
37956 emit_move_insn (res
, operand1
);
37958 /* xa = abs (operand1) */
37959 xa
= ix86_expand_sse_fabs (res
, &mask
);
37961 /* if (!isless (xa, TWO52)) goto label; */
37962 label
= ix86_expand_sse_compare_and_jump (UNLE
, TWO52
, xa
, false);
37964 /* x = (double)(long)x */
37965 xi
= gen_reg_rtx (mode
== DFmode
? DImode
: SImode
);
37966 expand_fix (xi
, res
, 0);
37967 expand_float (res
, xi
, 0);
37969 if (HONOR_SIGNED_ZEROS (mode
))
37970 ix86_sse_copysign_to_positive (res
, res
, force_reg (mode
, operand1
), mask
);
37972 emit_label (label
);
37973 LABEL_NUSES (label
) = 1;
37975 emit_move_insn (operand0
, res
);
37978 /* Expand SSE sequence for computing trunc from OPERAND1 storing
37981 ix86_expand_truncdf_32 (rtx operand0
, rtx operand1
)
37983 enum machine_mode mode
= GET_MODE (operand0
);
37984 rtx xa
, mask
, TWO52
, label
, one
, res
, smask
, tmp
;
37986 /* C code for SSE variant we expand below.
37987 double xa = fabs (x), x2;
37988 if (!isless (xa, TWO52))
37990 xa2 = xa + TWO52 - TWO52;
37994 x2 = copysign (xa2, x);
37998 TWO52
= ix86_gen_TWO52 (mode
);
38000 /* Temporary for holding the result, initialized to the input
38001 operand to ease control flow. */
38002 res
= gen_reg_rtx (mode
);
38003 emit_move_insn (res
, operand1
);
38005 /* xa = abs (operand1) */
38006 xa
= ix86_expand_sse_fabs (res
, &smask
);
38008 /* if (!isless (xa, TWO52)) goto label; */
38009 label
= ix86_expand_sse_compare_and_jump (UNLE
, TWO52
, xa
, false);
38011 /* res = xa + TWO52 - TWO52; */
38012 tmp
= expand_simple_binop (mode
, PLUS
, xa
, TWO52
, NULL_RTX
, 0, OPTAB_DIRECT
);
38013 tmp
= expand_simple_binop (mode
, MINUS
, tmp
, TWO52
, tmp
, 0, OPTAB_DIRECT
);
38014 emit_move_insn (res
, tmp
);
38017 one
= force_reg (mode
, const_double_from_real_value (dconst1
, mode
));
38019 /* Compensate: res = xa2 - (res > xa ? 1 : 0) */
38020 mask
= ix86_expand_sse_compare_mask (UNGT
, res
, xa
, false);
38021 emit_insn (gen_rtx_SET (VOIDmode
, mask
,
38022 gen_rtx_AND (mode
, mask
, one
)));
38023 tmp
= expand_simple_binop (mode
, MINUS
,
38024 res
, mask
, NULL_RTX
, 0, OPTAB_DIRECT
);
38025 emit_move_insn (res
, tmp
);
38027 /* res = copysign (res, operand1) */
38028 ix86_sse_copysign_to_positive (res
, res
, force_reg (mode
, operand1
), smask
);
38030 emit_label (label
);
38031 LABEL_NUSES (label
) = 1;
38033 emit_move_insn (operand0
, res
);
38036 /* Expand SSE sequence for computing round from OPERAND1 storing
38039 ix86_expand_round (rtx operand0
, rtx operand1
)
38041 /* C code for the stuff we're doing below:
38042 double xa = fabs (x);
38043 if (!isless (xa, TWO52))
38045 xa = (double)(long)(xa + nextafter (0.5, 0.0));
38046 return copysign (xa, x);
38048 enum machine_mode mode
= GET_MODE (operand0
);
38049 rtx res
, TWO52
, xa
, label
, xi
, half
, mask
;
38050 const struct real_format
*fmt
;
38051 REAL_VALUE_TYPE pred_half
, half_minus_pred_half
;
38053 /* Temporary for holding the result, initialized to the input
38054 operand to ease control flow. */
38055 res
= gen_reg_rtx (mode
);
38056 emit_move_insn (res
, operand1
);
38058 TWO52
= ix86_gen_TWO52 (mode
);
38059 xa
= ix86_expand_sse_fabs (res
, &mask
);
38060 label
= ix86_expand_sse_compare_and_jump (UNLE
, TWO52
, xa
, false);
38062 /* load nextafter (0.5, 0.0) */
38063 fmt
= REAL_MODE_FORMAT (mode
);
38064 real_2expN (&half_minus_pred_half
, -(fmt
->p
) - 1, mode
);
38065 REAL_ARITHMETIC (pred_half
, MINUS_EXPR
, dconsthalf
, half_minus_pred_half
);
38067 /* xa = xa + 0.5 */
38068 half
= force_reg (mode
, const_double_from_real_value (pred_half
, mode
));
38069 xa
= expand_simple_binop (mode
, PLUS
, xa
, half
, NULL_RTX
, 0, OPTAB_DIRECT
);
38071 /* xa = (double)(int64_t)xa */
38072 xi
= gen_reg_rtx (mode
== DFmode
? DImode
: SImode
);
38073 expand_fix (xi
, xa
, 0);
38074 expand_float (xa
, xi
, 0);
38076 /* res = copysign (xa, operand1) */
38077 ix86_sse_copysign_to_positive (res
, xa
, force_reg (mode
, operand1
), mask
);
38079 emit_label (label
);
38080 LABEL_NUSES (label
) = 1;
38082 emit_move_insn (operand0
, res
);
38085 /* Expand SSE sequence for computing round
38086 from OP1 storing into OP0 using sse4 round insn. */
38088 ix86_expand_round_sse4 (rtx op0
, rtx op1
)
38090 enum machine_mode mode
= GET_MODE (op0
);
38091 rtx e1
, e2
, res
, half
;
38092 const struct real_format
*fmt
;
38093 REAL_VALUE_TYPE pred_half
, half_minus_pred_half
;
38094 rtx (*gen_copysign
) (rtx
, rtx
, rtx
);
38095 rtx (*gen_round
) (rtx
, rtx
, rtx
);
38100 gen_copysign
= gen_copysignsf3
;
38101 gen_round
= gen_sse4_1_roundsf2
;
38104 gen_copysign
= gen_copysigndf3
;
38105 gen_round
= gen_sse4_1_rounddf2
;
38108 gcc_unreachable ();
38111 /* round (a) = trunc (a + copysign (0.5, a)) */
38113 /* load nextafter (0.5, 0.0) */
38114 fmt
= REAL_MODE_FORMAT (mode
);
38115 real_2expN (&half_minus_pred_half
, -(fmt
->p
) - 1, mode
);
38116 REAL_ARITHMETIC (pred_half
, MINUS_EXPR
, dconsthalf
, half_minus_pred_half
);
38117 half
= const_double_from_real_value (pred_half
, mode
);
38119 /* e1 = copysign (0.5, op1) */
38120 e1
= gen_reg_rtx (mode
);
38121 emit_insn (gen_copysign (e1
, half
, op1
));
38123 /* e2 = op1 + e1 */
38124 e2
= expand_simple_binop (mode
, PLUS
, op1
, e1
, NULL_RTX
, 0, OPTAB_DIRECT
);
38126 /* res = trunc (e2) */
38127 res
= gen_reg_rtx (mode
);
38128 emit_insn (gen_round (res
, e2
, GEN_INT (ROUND_TRUNC
)));
38130 emit_move_insn (op0
, res
);
38134 /* Table of valid machine attributes. */
38135 static const struct attribute_spec ix86_attribute_table
[] =
38137 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
38138 affects_type_identity } */
38139 /* Stdcall attribute says callee is responsible for popping arguments
38140 if they are not variable. */
38141 { "stdcall", 0, 0, false, true, true, ix86_handle_cconv_attribute
,
38143 /* Fastcall attribute says callee is responsible for popping arguments
38144 if they are not variable. */
38145 { "fastcall", 0, 0, false, true, true, ix86_handle_cconv_attribute
,
38147 /* Thiscall attribute says callee is responsible for popping arguments
38148 if they are not variable. */
38149 { "thiscall", 0, 0, false, true, true, ix86_handle_cconv_attribute
,
38151 /* Cdecl attribute says the callee is a normal C declaration */
38152 { "cdecl", 0, 0, false, true, true, ix86_handle_cconv_attribute
,
38154 /* Regparm attribute specifies how many integer arguments are to be
38155 passed in registers. */
38156 { "regparm", 1, 1, false, true, true, ix86_handle_cconv_attribute
,
38158 /* Sseregparm attribute says we are using x86_64 calling conventions
38159 for FP arguments. */
38160 { "sseregparm", 0, 0, false, true, true, ix86_handle_cconv_attribute
,
38162 /* The transactional memory builtins are implicitly regparm or fastcall
38163 depending on the ABI. Override the generic do-nothing attribute that
38164 these builtins were declared with. */
38165 { "*tm regparm", 0, 0, false, true, true, ix86_handle_tm_regparm_attribute
,
38167 /* force_align_arg_pointer says this function realigns the stack at entry. */
38168 { (const char *)&ix86_force_align_arg_pointer_string
, 0, 0,
38169 false, true, true, ix86_handle_cconv_attribute
, false },
38170 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
38171 { "dllimport", 0, 0, false, false, false, handle_dll_attribute
, false },
38172 { "dllexport", 0, 0, false, false, false, handle_dll_attribute
, false },
38173 { "shared", 0, 0, true, false, false, ix86_handle_shared_attribute
,
38176 { "ms_struct", 0, 0, false, false, false, ix86_handle_struct_attribute
,
38178 { "gcc_struct", 0, 0, false, false, false, ix86_handle_struct_attribute
,
38180 #ifdef SUBTARGET_ATTRIBUTE_TABLE
38181 SUBTARGET_ATTRIBUTE_TABLE
,
38183 /* ms_abi and sysv_abi calling convention function attributes. */
38184 { "ms_abi", 0, 0, false, true, true, ix86_handle_abi_attribute
, true },
38185 { "sysv_abi", 0, 0, false, true, true, ix86_handle_abi_attribute
, true },
38186 { "ms_hook_prologue", 0, 0, true, false, false, ix86_handle_fndecl_attribute
,
38188 { "callee_pop_aggregate_return", 1, 1, false, true, true,
38189 ix86_handle_callee_pop_aggregate_return
, true },
38191 { NULL
, 0, 0, false, false, false, NULL
, false }
38194 /* Implement targetm.vectorize.builtin_vectorization_cost. */
38196 ix86_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost
,
38198 int misalign ATTRIBUTE_UNUSED
)
38202 switch (type_of_cost
)
38205 return ix86_cost
->scalar_stmt_cost
;
38208 return ix86_cost
->scalar_load_cost
;
38211 return ix86_cost
->scalar_store_cost
;
38214 return ix86_cost
->vec_stmt_cost
;
38217 return ix86_cost
->vec_align_load_cost
;
38220 return ix86_cost
->vec_store_cost
;
38222 case vec_to_scalar
:
38223 return ix86_cost
->vec_to_scalar_cost
;
38225 case scalar_to_vec
:
38226 return ix86_cost
->scalar_to_vec_cost
;
38228 case unaligned_load
:
38229 case unaligned_store
:
38230 return ix86_cost
->vec_unalign_load_cost
;
38232 case cond_branch_taken
:
38233 return ix86_cost
->cond_taken_branch_cost
;
38235 case cond_branch_not_taken
:
38236 return ix86_cost
->cond_not_taken_branch_cost
;
38239 case vec_promote_demote
:
38240 return ix86_cost
->vec_stmt_cost
;
38242 case vec_construct
:
38243 elements
= TYPE_VECTOR_SUBPARTS (vectype
);
38244 return elements
/ 2 + 1;
38247 gcc_unreachable ();
38251 /* A cached (set (nil) (vselect (vconcat (nil) (nil)) (parallel [])))
38252 insn, so that expand_vselect{,_vconcat} doesn't have to create a fresh
38253 insn every time. */
38255 static GTY(()) rtx vselect_insn
;
38257 /* Initialize vselect_insn. */
38260 init_vselect_insn (void)
38265 x
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (MAX_VECT_LEN
));
38266 for (i
= 0; i
< MAX_VECT_LEN
; ++i
)
38267 XVECEXP (x
, 0, i
) = const0_rtx
;
38268 x
= gen_rtx_VEC_SELECT (V2DFmode
, gen_rtx_VEC_CONCAT (V4DFmode
, const0_rtx
,
38270 x
= gen_rtx_SET (VOIDmode
, const0_rtx
, x
);
38272 vselect_insn
= emit_insn (x
);
38276 /* Construct (set target (vec_select op0 (parallel perm))) and
38277 return true if that's a valid instruction in the active ISA. */
38280 expand_vselect (rtx target
, rtx op0
, const unsigned char *perm
,
38281 unsigned nelt
, bool testing_p
)
38284 rtx x
, save_vconcat
;
38287 if (vselect_insn
== NULL_RTX
)
38288 init_vselect_insn ();
38290 x
= XEXP (SET_SRC (PATTERN (vselect_insn
)), 1);
38291 PUT_NUM_ELEM (XVEC (x
, 0), nelt
);
38292 for (i
= 0; i
< nelt
; ++i
)
38293 XVECEXP (x
, 0, i
) = GEN_INT (perm
[i
]);
38294 save_vconcat
= XEXP (SET_SRC (PATTERN (vselect_insn
)), 0);
38295 XEXP (SET_SRC (PATTERN (vselect_insn
)), 0) = op0
;
38296 PUT_MODE (SET_SRC (PATTERN (vselect_insn
)), GET_MODE (target
));
38297 SET_DEST (PATTERN (vselect_insn
)) = target
;
38298 icode
= recog_memoized (vselect_insn
);
38300 if (icode
>= 0 && !testing_p
)
38301 emit_insn (copy_rtx (PATTERN (vselect_insn
)));
38303 SET_DEST (PATTERN (vselect_insn
)) = const0_rtx
;
38304 XEXP (SET_SRC (PATTERN (vselect_insn
)), 0) = save_vconcat
;
38305 INSN_CODE (vselect_insn
) = -1;
38310 /* Similar, but generate a vec_concat from op0 and op1 as well. */
38313 expand_vselect_vconcat (rtx target
, rtx op0
, rtx op1
,
38314 const unsigned char *perm
, unsigned nelt
,
38317 enum machine_mode v2mode
;
38321 if (vselect_insn
== NULL_RTX
)
38322 init_vselect_insn ();
38324 v2mode
= GET_MODE_2XWIDER_MODE (GET_MODE (op0
));
38325 x
= XEXP (SET_SRC (PATTERN (vselect_insn
)), 0);
38326 PUT_MODE (x
, v2mode
);
38329 ok
= expand_vselect (target
, x
, perm
, nelt
, testing_p
);
38330 XEXP (x
, 0) = const0_rtx
;
38331 XEXP (x
, 1) = const0_rtx
;
38335 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
38336 in terms of blendp[sd] / pblendw / pblendvb / vpblendd. */
38339 expand_vec_perm_blend (struct expand_vec_perm_d
*d
)
38341 enum machine_mode vmode
= d
->vmode
;
38342 unsigned i
, mask
, nelt
= d
->nelt
;
38343 rtx target
, op0
, op1
, x
;
38344 rtx rperm
[32], vperm
;
38346 if (d
->one_operand_p
)
38348 if (TARGET_AVX2
&& GET_MODE_SIZE (vmode
) == 32)
38350 else if (TARGET_AVX
&& (vmode
== V4DFmode
|| vmode
== V8SFmode
))
38352 else if (TARGET_SSE4_1
&& GET_MODE_SIZE (vmode
) == 16)
38357 /* This is a blend, not a permute. Elements must stay in their
38358 respective lanes. */
38359 for (i
= 0; i
< nelt
; ++i
)
38361 unsigned e
= d
->perm
[i
];
38362 if (!(e
== i
|| e
== i
+ nelt
))
38369 /* ??? Without SSE4.1, we could implement this with and/andn/or. This
38370 decision should be extracted elsewhere, so that we only try that
38371 sequence once all budget==3 options have been tried. */
38372 target
= d
->target
;
38385 for (i
= 0; i
< nelt
; ++i
)
38386 mask
|= (d
->perm
[i
] >= nelt
) << i
;
38390 for (i
= 0; i
< 2; ++i
)
38391 mask
|= (d
->perm
[i
] >= 2 ? 15 : 0) << (i
* 4);
38396 for (i
= 0; i
< 4; ++i
)
38397 mask
|= (d
->perm
[i
] >= 4 ? 3 : 0) << (i
* 2);
38402 /* See if bytes move in pairs so we can use pblendw with
38403 an immediate argument, rather than pblendvb with a vector
38405 for (i
= 0; i
< 16; i
+= 2)
38406 if (d
->perm
[i
] + 1 != d
->perm
[i
+ 1])
38409 for (i
= 0; i
< nelt
; ++i
)
38410 rperm
[i
] = (d
->perm
[i
] < nelt
? const0_rtx
: constm1_rtx
);
38413 vperm
= gen_rtx_CONST_VECTOR (vmode
, gen_rtvec_v (nelt
, rperm
));
38414 vperm
= force_reg (vmode
, vperm
);
38416 if (GET_MODE_SIZE (vmode
) == 16)
38417 emit_insn (gen_sse4_1_pblendvb (target
, op0
, op1
, vperm
));
38419 emit_insn (gen_avx2_pblendvb (target
, op0
, op1
, vperm
));
38423 for (i
= 0; i
< 8; ++i
)
38424 mask
|= (d
->perm
[i
* 2] >= 16) << i
;
38429 target
= gen_lowpart (vmode
, target
);
38430 op0
= gen_lowpart (vmode
, op0
);
38431 op1
= gen_lowpart (vmode
, op1
);
38435 /* See if bytes move in pairs. If not, vpblendvb must be used. */
38436 for (i
= 0; i
< 32; i
+= 2)
38437 if (d
->perm
[i
] + 1 != d
->perm
[i
+ 1])
38439 /* See if bytes move in quadruplets. If yes, vpblendd
38440 with immediate can be used. */
38441 for (i
= 0; i
< 32; i
+= 4)
38442 if (d
->perm
[i
] + 2 != d
->perm
[i
+ 2])
38446 /* See if bytes move the same in both lanes. If yes,
38447 vpblendw with immediate can be used. */
38448 for (i
= 0; i
< 16; i
+= 2)
38449 if (d
->perm
[i
] + 16 != d
->perm
[i
+ 16])
38452 /* Use vpblendw. */
38453 for (i
= 0; i
< 16; ++i
)
38454 mask
|= (d
->perm
[i
* 2] >= 32) << i
;
38459 /* Use vpblendd. */
38460 for (i
= 0; i
< 8; ++i
)
38461 mask
|= (d
->perm
[i
* 4] >= 32) << i
;
38466 /* See if words move in pairs. If yes, vpblendd can be used. */
38467 for (i
= 0; i
< 16; i
+= 2)
38468 if (d
->perm
[i
] + 1 != d
->perm
[i
+ 1])
38472 /* See if words move the same in both lanes. If not,
38473 vpblendvb must be used. */
38474 for (i
= 0; i
< 8; i
++)
38475 if (d
->perm
[i
] + 8 != d
->perm
[i
+ 8])
38477 /* Use vpblendvb. */
38478 for (i
= 0; i
< 32; ++i
)
38479 rperm
[i
] = (d
->perm
[i
/ 2] < 16 ? const0_rtx
: constm1_rtx
);
38483 target
= gen_lowpart (vmode
, target
);
38484 op0
= gen_lowpart (vmode
, op0
);
38485 op1
= gen_lowpart (vmode
, op1
);
38486 goto finish_pblendvb
;
38489 /* Use vpblendw. */
38490 for (i
= 0; i
< 16; ++i
)
38491 mask
|= (d
->perm
[i
] >= 16) << i
;
38495 /* Use vpblendd. */
38496 for (i
= 0; i
< 8; ++i
)
38497 mask
|= (d
->perm
[i
* 2] >= 16) << i
;
38502 /* Use vpblendd. */
38503 for (i
= 0; i
< 4; ++i
)
38504 mask
|= (d
->perm
[i
] >= 4 ? 3 : 0) << (i
* 2);
38509 gcc_unreachable ();
38512 /* This matches five different patterns with the different modes. */
38513 x
= gen_rtx_VEC_MERGE (vmode
, op1
, op0
, GEN_INT (mask
));
38514 x
= gen_rtx_SET (VOIDmode
, target
, x
);
38520 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
38521 in terms of the variable form of vpermilps.
38523 Note that we will have already failed the immediate input vpermilps,
38524 which requires that the high and low part shuffle be identical; the
38525 variable form doesn't require that. */
38528 expand_vec_perm_vpermil (struct expand_vec_perm_d
*d
)
38530 rtx rperm
[8], vperm
;
38533 if (!TARGET_AVX
|| d
->vmode
!= V8SFmode
|| !d
->one_operand_p
)
38536 /* We can only permute within the 128-bit lane. */
38537 for (i
= 0; i
< 8; ++i
)
38539 unsigned e
= d
->perm
[i
];
38540 if (i
< 4 ? e
>= 4 : e
< 4)
38547 for (i
= 0; i
< 8; ++i
)
38549 unsigned e
= d
->perm
[i
];
38551 /* Within each 128-bit lane, the elements of op0 are numbered
38552 from 0 and the elements of op1 are numbered from 4. */
38558 rperm
[i
] = GEN_INT (e
);
38561 vperm
= gen_rtx_CONST_VECTOR (V8SImode
, gen_rtvec_v (8, rperm
));
38562 vperm
= force_reg (V8SImode
, vperm
);
38563 emit_insn (gen_avx_vpermilvarv8sf3 (d
->target
, d
->op0
, vperm
));
38568 /* Return true if permutation D can be performed as VMODE permutation
38572 valid_perm_using_mode_p (enum machine_mode vmode
, struct expand_vec_perm_d
*d
)
38574 unsigned int i
, j
, chunk
;
38576 if (GET_MODE_CLASS (vmode
) != MODE_VECTOR_INT
38577 || GET_MODE_CLASS (d
->vmode
) != MODE_VECTOR_INT
38578 || GET_MODE_SIZE (vmode
) != GET_MODE_SIZE (d
->vmode
))
38581 if (GET_MODE_NUNITS (vmode
) >= d
->nelt
)
38584 chunk
= d
->nelt
/ GET_MODE_NUNITS (vmode
);
38585 for (i
= 0; i
< d
->nelt
; i
+= chunk
)
38586 if (d
->perm
[i
] & (chunk
- 1))
38589 for (j
= 1; j
< chunk
; ++j
)
38590 if (d
->perm
[i
] + j
!= d
->perm
[i
+ j
])
38596 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
38597 in terms of pshufb, vpperm, vpermq, vpermd, vpermps or vperm2i128. */
38600 expand_vec_perm_pshufb (struct expand_vec_perm_d
*d
)
38602 unsigned i
, nelt
, eltsz
, mask
;
38603 unsigned char perm
[32];
38604 enum machine_mode vmode
= V16QImode
;
38605 rtx rperm
[32], vperm
, target
, op0
, op1
;
38609 if (!d
->one_operand_p
)
38611 if (!TARGET_XOP
|| GET_MODE_SIZE (d
->vmode
) != 16)
38614 && valid_perm_using_mode_p (V2TImode
, d
))
38619 /* Use vperm2i128 insn. The pattern uses
38620 V4DImode instead of V2TImode. */
38621 target
= gen_lowpart (V4DImode
, d
->target
);
38622 op0
= gen_lowpart (V4DImode
, d
->op0
);
38623 op1
= gen_lowpart (V4DImode
, d
->op1
);
38625 = GEN_INT (((d
->perm
[0] & (nelt
/ 2)) ? 1 : 0)
38626 || ((d
->perm
[nelt
/ 2] & (nelt
/ 2)) ? 2 : 0));
38627 emit_insn (gen_avx2_permv2ti (target
, op0
, op1
, rperm
[0]));
38635 if (GET_MODE_SIZE (d
->vmode
) == 16)
38640 else if (GET_MODE_SIZE (d
->vmode
) == 32)
38645 /* V4DImode should be already handled through
38646 expand_vselect by vpermq instruction. */
38647 gcc_assert (d
->vmode
!= V4DImode
);
38650 if (d
->vmode
== V8SImode
38651 || d
->vmode
== V16HImode
38652 || d
->vmode
== V32QImode
)
38654 /* First see if vpermq can be used for
38655 V8SImode/V16HImode/V32QImode. */
38656 if (valid_perm_using_mode_p (V4DImode
, d
))
38658 for (i
= 0; i
< 4; i
++)
38659 perm
[i
] = (d
->perm
[i
* nelt
/ 4] * 4 / nelt
) & 3;
38662 return expand_vselect (gen_lowpart (V4DImode
, d
->target
),
38663 gen_lowpart (V4DImode
, d
->op0
),
38667 /* Next see if vpermd can be used. */
38668 if (valid_perm_using_mode_p (V8SImode
, d
))
38671 /* Or if vpermps can be used. */
38672 else if (d
->vmode
== V8SFmode
)
38675 if (vmode
== V32QImode
)
38677 /* vpshufb only works intra lanes, it is not
38678 possible to shuffle bytes in between the lanes. */
38679 for (i
= 0; i
< nelt
; ++i
)
38680 if ((d
->perm
[i
] ^ i
) & (nelt
/ 2))
38691 if (vmode
== V8SImode
)
38692 for (i
= 0; i
< 8; ++i
)
38693 rperm
[i
] = GEN_INT ((d
->perm
[i
* nelt
/ 8] * 8 / nelt
) & 7);
38696 eltsz
= GET_MODE_SIZE (GET_MODE_INNER (d
->vmode
));
38697 if (!d
->one_operand_p
)
38698 mask
= 2 * nelt
- 1;
38699 else if (vmode
== V16QImode
)
38702 mask
= nelt
/ 2 - 1;
38704 for (i
= 0; i
< nelt
; ++i
)
38706 unsigned j
, e
= d
->perm
[i
] & mask
;
38707 for (j
= 0; j
< eltsz
; ++j
)
38708 rperm
[i
* eltsz
+ j
] = GEN_INT (e
* eltsz
+ j
);
38712 vperm
= gen_rtx_CONST_VECTOR (vmode
,
38713 gen_rtvec_v (GET_MODE_NUNITS (vmode
), rperm
));
38714 vperm
= force_reg (vmode
, vperm
);
38716 target
= gen_lowpart (vmode
, d
->target
);
38717 op0
= gen_lowpart (vmode
, d
->op0
);
38718 if (d
->one_operand_p
)
38720 if (vmode
== V16QImode
)
38721 emit_insn (gen_ssse3_pshufbv16qi3 (target
, op0
, vperm
));
38722 else if (vmode
== V32QImode
)
38723 emit_insn (gen_avx2_pshufbv32qi3 (target
, op0
, vperm
));
38724 else if (vmode
== V8SFmode
)
38725 emit_insn (gen_avx2_permvarv8sf (target
, op0
, vperm
));
38727 emit_insn (gen_avx2_permvarv8si (target
, op0
, vperm
));
38731 op1
= gen_lowpart (vmode
, d
->op1
);
38732 emit_insn (gen_xop_pperm (target
, op0
, op1
, vperm
));
38738 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to instantiate D
38739 in a single instruction. */
38742 expand_vec_perm_1 (struct expand_vec_perm_d
*d
)
38744 unsigned i
, nelt
= d
->nelt
;
38745 unsigned char perm2
[MAX_VECT_LEN
];
38747 /* Check plain VEC_SELECT first, because AVX has instructions that could
38748 match both SEL and SEL+CONCAT, but the plain SEL will allow a memory
38749 input where SEL+CONCAT may not. */
38750 if (d
->one_operand_p
)
38752 int mask
= nelt
- 1;
38753 bool identity_perm
= true;
38754 bool broadcast_perm
= true;
38756 for (i
= 0; i
< nelt
; i
++)
38758 perm2
[i
] = d
->perm
[i
] & mask
;
38760 identity_perm
= false;
38762 broadcast_perm
= false;
38768 emit_move_insn (d
->target
, d
->op0
);
38771 else if (broadcast_perm
&& TARGET_AVX2
)
38773 /* Use vpbroadcast{b,w,d}. */
38774 rtx (*gen
) (rtx
, rtx
) = NULL
;
38778 gen
= gen_avx2_pbroadcastv32qi_1
;
38781 gen
= gen_avx2_pbroadcastv16hi_1
;
38784 gen
= gen_avx2_pbroadcastv8si_1
;
38787 gen
= gen_avx2_pbroadcastv16qi
;
38790 gen
= gen_avx2_pbroadcastv8hi
;
38793 gen
= gen_avx2_vec_dupv8sf_1
;
38795 /* For other modes prefer other shuffles this function creates. */
38801 emit_insn (gen (d
->target
, d
->op0
));
38806 if (expand_vselect (d
->target
, d
->op0
, perm2
, nelt
, d
->testing_p
))
38809 /* There are plenty of patterns in sse.md that are written for
38810 SEL+CONCAT and are not replicated for a single op. Perhaps
38811 that should be changed, to avoid the nastiness here. */
38813 /* Recognize interleave style patterns, which means incrementing
38814 every other permutation operand. */
38815 for (i
= 0; i
< nelt
; i
+= 2)
38817 perm2
[i
] = d
->perm
[i
] & mask
;
38818 perm2
[i
+ 1] = (d
->perm
[i
+ 1] & mask
) + nelt
;
38820 if (expand_vselect_vconcat (d
->target
, d
->op0
, d
->op0
, perm2
, nelt
,
38824 /* Recognize shufps, which means adding {0, 0, nelt, nelt}. */
38827 for (i
= 0; i
< nelt
; i
+= 4)
38829 perm2
[i
+ 0] = d
->perm
[i
+ 0] & mask
;
38830 perm2
[i
+ 1] = d
->perm
[i
+ 1] & mask
;
38831 perm2
[i
+ 2] = (d
->perm
[i
+ 2] & mask
) + nelt
;
38832 perm2
[i
+ 3] = (d
->perm
[i
+ 3] & mask
) + nelt
;
38835 if (expand_vselect_vconcat (d
->target
, d
->op0
, d
->op0
, perm2
, nelt
,
38841 /* Finally, try the fully general two operand permute. */
38842 if (expand_vselect_vconcat (d
->target
, d
->op0
, d
->op1
, d
->perm
, nelt
,
38846 /* Recognize interleave style patterns with reversed operands. */
38847 if (!d
->one_operand_p
)
38849 for (i
= 0; i
< nelt
; ++i
)
38851 unsigned e
= d
->perm
[i
];
38859 if (expand_vselect_vconcat (d
->target
, d
->op1
, d
->op0
, perm2
, nelt
,
38864 /* Try the SSE4.1 blend variable merge instructions. */
38865 if (expand_vec_perm_blend (d
))
38868 /* Try one of the AVX vpermil variable permutations. */
38869 if (expand_vec_perm_vpermil (d
))
38872 /* Try the SSSE3 pshufb or XOP vpperm or AVX2 vperm2i128,
38873 vpshufb, vpermd, vpermps or vpermq variable permutation. */
38874 if (expand_vec_perm_pshufb (d
))
38880 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
38881 in terms of a pair of pshuflw + pshufhw instructions. */
38884 expand_vec_perm_pshuflw_pshufhw (struct expand_vec_perm_d
*d
)
38886 unsigned char perm2
[MAX_VECT_LEN
];
38890 if (d
->vmode
!= V8HImode
|| !d
->one_operand_p
)
38893 /* The two permutations only operate in 64-bit lanes. */
38894 for (i
= 0; i
< 4; ++i
)
38895 if (d
->perm
[i
] >= 4)
38897 for (i
= 4; i
< 8; ++i
)
38898 if (d
->perm
[i
] < 4)
38904 /* Emit the pshuflw. */
38905 memcpy (perm2
, d
->perm
, 4);
38906 for (i
= 4; i
< 8; ++i
)
38908 ok
= expand_vselect (d
->target
, d
->op0
, perm2
, 8, d
->testing_p
);
38911 /* Emit the pshufhw. */
38912 memcpy (perm2
+ 4, d
->perm
+ 4, 4);
38913 for (i
= 0; i
< 4; ++i
)
38915 ok
= expand_vselect (d
->target
, d
->target
, perm2
, 8, d
->testing_p
);
38921 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to simplify
38922 the permutation using the SSSE3 palignr instruction. This succeeds
38923 when all of the elements in PERM fit within one vector and we merely
38924 need to shift them down so that a single vector permutation has a
38925 chance to succeed. */
38928 expand_vec_perm_palignr (struct expand_vec_perm_d
*d
)
38930 unsigned i
, nelt
= d
->nelt
;
38935 /* Even with AVX, palignr only operates on 128-bit vectors. */
38936 if (!TARGET_SSSE3
|| GET_MODE_SIZE (d
->vmode
) != 16)
38939 min
= nelt
, max
= 0;
38940 for (i
= 0; i
< nelt
; ++i
)
38942 unsigned e
= d
->perm
[i
];
38948 if (min
== 0 || max
- min
>= nelt
)
38951 /* Given that we have SSSE3, we know we'll be able to implement the
38952 single operand permutation after the palignr with pshufb. */
38956 shift
= GEN_INT (min
* GET_MODE_BITSIZE (GET_MODE_INNER (d
->vmode
)));
38957 emit_insn (gen_ssse3_palignrti (gen_lowpart (TImode
, d
->target
),
38958 gen_lowpart (TImode
, d
->op1
),
38959 gen_lowpart (TImode
, d
->op0
), shift
));
38961 d
->op0
= d
->op1
= d
->target
;
38962 d
->one_operand_p
= true;
38965 for (i
= 0; i
< nelt
; ++i
)
38967 unsigned e
= d
->perm
[i
] - min
;
38973 /* Test for the degenerate case where the alignment by itself
38974 produces the desired permutation. */
38978 ok
= expand_vec_perm_1 (d
);
38984 static bool expand_vec_perm_interleave3 (struct expand_vec_perm_d
*d
);
38986 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to simplify
38987 a two vector permutation into a single vector permutation by using
38988 an interleave operation to merge the vectors. */
38991 expand_vec_perm_interleave2 (struct expand_vec_perm_d
*d
)
38993 struct expand_vec_perm_d dremap
, dfinal
;
38994 unsigned i
, nelt
= d
->nelt
, nelt2
= nelt
/ 2;
38995 unsigned HOST_WIDE_INT contents
;
38996 unsigned char remap
[2 * MAX_VECT_LEN
];
38998 bool ok
, same_halves
= false;
39000 if (GET_MODE_SIZE (d
->vmode
) == 16)
39002 if (d
->one_operand_p
)
39005 else if (GET_MODE_SIZE (d
->vmode
) == 32)
39009 /* For 32-byte modes allow even d->one_operand_p.
39010 The lack of cross-lane shuffling in some instructions
39011 might prevent a single insn shuffle. */
39013 dfinal
.testing_p
= true;
39014 /* If expand_vec_perm_interleave3 can expand this into
39015 a 3 insn sequence, give up and let it be expanded as
39016 3 insn sequence. While that is one insn longer,
39017 it doesn't need a memory operand and in the common
39018 case that both interleave low and high permutations
39019 with the same operands are adjacent needs 4 insns
39020 for both after CSE. */
39021 if (expand_vec_perm_interleave3 (&dfinal
))
39027 /* Examine from whence the elements come. */
39029 for (i
= 0; i
< nelt
; ++i
)
39030 contents
|= ((unsigned HOST_WIDE_INT
) 1) << d
->perm
[i
];
39032 memset (remap
, 0xff, sizeof (remap
));
39035 if (GET_MODE_SIZE (d
->vmode
) == 16)
39037 unsigned HOST_WIDE_INT h1
, h2
, h3
, h4
;
39039 /* Split the two input vectors into 4 halves. */
39040 h1
= (((unsigned HOST_WIDE_INT
) 1) << nelt2
) - 1;
39045 /* If the elements from the low halves use interleave low, and similarly
39046 for interleave high. If the elements are from mis-matched halves, we
39047 can use shufps for V4SF/V4SI or do a DImode shuffle. */
39048 if ((contents
& (h1
| h3
)) == contents
)
39051 for (i
= 0; i
< nelt2
; ++i
)
39054 remap
[i
+ nelt
] = i
* 2 + 1;
39055 dremap
.perm
[i
* 2] = i
;
39056 dremap
.perm
[i
* 2 + 1] = i
+ nelt
;
39058 if (!TARGET_SSE2
&& d
->vmode
== V4SImode
)
39059 dremap
.vmode
= V4SFmode
;
39061 else if ((contents
& (h2
| h4
)) == contents
)
39064 for (i
= 0; i
< nelt2
; ++i
)
39066 remap
[i
+ nelt2
] = i
* 2;
39067 remap
[i
+ nelt
+ nelt2
] = i
* 2 + 1;
39068 dremap
.perm
[i
* 2] = i
+ nelt2
;
39069 dremap
.perm
[i
* 2 + 1] = i
+ nelt
+ nelt2
;
39071 if (!TARGET_SSE2
&& d
->vmode
== V4SImode
)
39072 dremap
.vmode
= V4SFmode
;
39074 else if ((contents
& (h1
| h4
)) == contents
)
39077 for (i
= 0; i
< nelt2
; ++i
)
39080 remap
[i
+ nelt
+ nelt2
] = i
+ nelt2
;
39081 dremap
.perm
[i
] = i
;
39082 dremap
.perm
[i
+ nelt2
] = i
+ nelt
+ nelt2
;
39087 dremap
.vmode
= V2DImode
;
39089 dremap
.perm
[0] = 0;
39090 dremap
.perm
[1] = 3;
39093 else if ((contents
& (h2
| h3
)) == contents
)
39096 for (i
= 0; i
< nelt2
; ++i
)
39098 remap
[i
+ nelt2
] = i
;
39099 remap
[i
+ nelt
] = i
+ nelt2
;
39100 dremap
.perm
[i
] = i
+ nelt2
;
39101 dremap
.perm
[i
+ nelt2
] = i
+ nelt
;
39106 dremap
.vmode
= V2DImode
;
39108 dremap
.perm
[0] = 1;
39109 dremap
.perm
[1] = 2;
39117 unsigned int nelt4
= nelt
/ 4, nzcnt
= 0;
39118 unsigned HOST_WIDE_INT q
[8];
39119 unsigned int nonzero_halves
[4];
39121 /* Split the two input vectors into 8 quarters. */
39122 q
[0] = (((unsigned HOST_WIDE_INT
) 1) << nelt4
) - 1;
39123 for (i
= 1; i
< 8; ++i
)
39124 q
[i
] = q
[0] << (nelt4
* i
);
39125 for (i
= 0; i
< 4; ++i
)
39126 if (((q
[2 * i
] | q
[2 * i
+ 1]) & contents
) != 0)
39128 nonzero_halves
[nzcnt
] = i
;
39134 gcc_assert (d
->one_operand_p
);
39135 nonzero_halves
[1] = nonzero_halves
[0];
39136 same_halves
= true;
39138 else if (d
->one_operand_p
)
39140 gcc_assert (nonzero_halves
[0] == 0);
39141 gcc_assert (nonzero_halves
[1] == 1);
39146 if (d
->perm
[0] / nelt2
== nonzero_halves
[1])
39148 /* Attempt to increase the likelihood that dfinal
39149 shuffle will be intra-lane. */
39150 char tmph
= nonzero_halves
[0];
39151 nonzero_halves
[0] = nonzero_halves
[1];
39152 nonzero_halves
[1] = tmph
;
39155 /* vperm2f128 or vperm2i128. */
39156 for (i
= 0; i
< nelt2
; ++i
)
39158 remap
[i
+ nonzero_halves
[1] * nelt2
] = i
+ nelt2
;
39159 remap
[i
+ nonzero_halves
[0] * nelt2
] = i
;
39160 dremap
.perm
[i
+ nelt2
] = i
+ nonzero_halves
[1] * nelt2
;
39161 dremap
.perm
[i
] = i
+ nonzero_halves
[0] * nelt2
;
39164 if (d
->vmode
!= V8SFmode
39165 && d
->vmode
!= V4DFmode
39166 && d
->vmode
!= V8SImode
)
39168 dremap
.vmode
= V8SImode
;
39170 for (i
= 0; i
< 4; ++i
)
39172 dremap
.perm
[i
] = i
+ nonzero_halves
[0] * 4;
39173 dremap
.perm
[i
+ 4] = i
+ nonzero_halves
[1] * 4;
39177 else if (d
->one_operand_p
)
39179 else if (TARGET_AVX2
39180 && (contents
& (q
[0] | q
[2] | q
[4] | q
[6])) == contents
)
39183 for (i
= 0; i
< nelt4
; ++i
)
39186 remap
[i
+ nelt
] = i
* 2 + 1;
39187 remap
[i
+ nelt2
] = i
* 2 + nelt2
;
39188 remap
[i
+ nelt
+ nelt2
] = i
* 2 + nelt2
+ 1;
39189 dremap
.perm
[i
* 2] = i
;
39190 dremap
.perm
[i
* 2 + 1] = i
+ nelt
;
39191 dremap
.perm
[i
* 2 + nelt2
] = i
+ nelt2
;
39192 dremap
.perm
[i
* 2 + nelt2
+ 1] = i
+ nelt
+ nelt2
;
39195 else if (TARGET_AVX2
39196 && (contents
& (q
[1] | q
[3] | q
[5] | q
[7])) == contents
)
39199 for (i
= 0; i
< nelt4
; ++i
)
39201 remap
[i
+ nelt4
] = i
* 2;
39202 remap
[i
+ nelt
+ nelt4
] = i
* 2 + 1;
39203 remap
[i
+ nelt2
+ nelt4
] = i
* 2 + nelt2
;
39204 remap
[i
+ nelt
+ nelt2
+ nelt4
] = i
* 2 + nelt2
+ 1;
39205 dremap
.perm
[i
* 2] = i
+ nelt4
;
39206 dremap
.perm
[i
* 2 + 1] = i
+ nelt
+ nelt4
;
39207 dremap
.perm
[i
* 2 + nelt2
] = i
+ nelt2
+ nelt4
;
39208 dremap
.perm
[i
* 2 + nelt2
+ 1] = i
+ nelt
+ nelt2
+ nelt4
;
39215 /* Use the remapping array set up above to move the elements from their
39216 swizzled locations into their final destinations. */
39218 for (i
= 0; i
< nelt
; ++i
)
39220 unsigned e
= remap
[d
->perm
[i
]];
39221 gcc_assert (e
< nelt
);
39222 /* If same_halves is true, both halves of the remapped vector are the
39223 same. Avoid cross-lane accesses if possible. */
39224 if (same_halves
&& i
>= nelt2
)
39226 gcc_assert (e
< nelt2
);
39227 dfinal
.perm
[i
] = e
+ nelt2
;
39230 dfinal
.perm
[i
] = e
;
39232 dfinal
.op0
= gen_reg_rtx (dfinal
.vmode
);
39233 dfinal
.op1
= dfinal
.op0
;
39234 dfinal
.one_operand_p
= true;
39235 dremap
.target
= dfinal
.op0
;
39237 /* Test if the final remap can be done with a single insn. For V4SFmode or
39238 V4SImode this *will* succeed. For V8HImode or V16QImode it may not. */
39240 ok
= expand_vec_perm_1 (&dfinal
);
39241 seq
= get_insns ();
39250 if (dremap
.vmode
!= dfinal
.vmode
)
39252 dremap
.target
= gen_lowpart (dremap
.vmode
, dremap
.target
);
39253 dremap
.op0
= gen_lowpart (dremap
.vmode
, dremap
.op0
);
39254 dremap
.op1
= gen_lowpart (dremap
.vmode
, dremap
.op1
);
39257 ok
= expand_vec_perm_1 (&dremap
);
39264 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to simplify
39265 a single vector cross-lane permutation into vpermq followed
39266 by any of the single insn permutations. */
39269 expand_vec_perm_vpermq_perm_1 (struct expand_vec_perm_d
*d
)
39271 struct expand_vec_perm_d dremap
, dfinal
;
39272 unsigned i
, j
, nelt
= d
->nelt
, nelt2
= nelt
/ 2, nelt4
= nelt
/ 4;
39273 unsigned contents
[2];
39277 && (d
->vmode
== V32QImode
|| d
->vmode
== V16HImode
)
39278 && d
->one_operand_p
))
39283 for (i
= 0; i
< nelt2
; ++i
)
39285 contents
[0] |= 1u << (d
->perm
[i
] / nelt4
);
39286 contents
[1] |= 1u << (d
->perm
[i
+ nelt2
] / nelt4
);
39289 for (i
= 0; i
< 2; ++i
)
39291 unsigned int cnt
= 0;
39292 for (j
= 0; j
< 4; ++j
)
39293 if ((contents
[i
] & (1u << j
)) != 0 && ++cnt
> 2)
39301 dremap
.vmode
= V4DImode
;
39303 dremap
.target
= gen_reg_rtx (V4DImode
);
39304 dremap
.op0
= gen_lowpart (V4DImode
, d
->op0
);
39305 dremap
.op1
= dremap
.op0
;
39306 dremap
.one_operand_p
= true;
39307 for (i
= 0; i
< 2; ++i
)
39309 unsigned int cnt
= 0;
39310 for (j
= 0; j
< 4; ++j
)
39311 if ((contents
[i
] & (1u << j
)) != 0)
39312 dremap
.perm
[2 * i
+ cnt
++] = j
;
39313 for (; cnt
< 2; ++cnt
)
39314 dremap
.perm
[2 * i
+ cnt
] = 0;
39318 dfinal
.op0
= gen_lowpart (dfinal
.vmode
, dremap
.target
);
39319 dfinal
.op1
= dfinal
.op0
;
39320 dfinal
.one_operand_p
= true;
39321 for (i
= 0, j
= 0; i
< nelt
; ++i
)
39325 dfinal
.perm
[i
] = (d
->perm
[i
] & (nelt4
- 1)) | (j
? nelt2
: 0);
39326 if ((d
->perm
[i
] / nelt4
) == dremap
.perm
[j
])
39328 else if ((d
->perm
[i
] / nelt4
) == dremap
.perm
[j
+ 1])
39329 dfinal
.perm
[i
] |= nelt4
;
39331 gcc_unreachable ();
39334 ok
= expand_vec_perm_1 (&dremap
);
39337 ok
= expand_vec_perm_1 (&dfinal
);
39343 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to expand
39344 a vector permutation using two instructions, vperm2f128 resp.
39345 vperm2i128 followed by any single in-lane permutation. */
39348 expand_vec_perm_vperm2f128 (struct expand_vec_perm_d
*d
)
39350 struct expand_vec_perm_d dfirst
, dsecond
;
39351 unsigned i
, j
, nelt
= d
->nelt
, nelt2
= nelt
/ 2, perm
;
39355 || GET_MODE_SIZE (d
->vmode
) != 32
39356 || (d
->vmode
!= V8SFmode
&& d
->vmode
!= V4DFmode
&& !TARGET_AVX2
))
39360 dsecond
.one_operand_p
= false;
39361 dsecond
.testing_p
= true;
39363 /* ((perm << 2)|perm) & 0x33 is the vperm2[fi]128
39364 immediate. For perm < 16 the second permutation uses
39365 d->op0 as first operand, for perm >= 16 it uses d->op1
39366 as first operand. The second operand is the result of
39368 for (perm
= 0; perm
< 32; perm
++)
39370 /* Ignore permutations which do not move anything cross-lane. */
39373 /* The second shuffle for e.g. V4DFmode has
39374 0123 and ABCD operands.
39375 Ignore AB23, as 23 is already in the second lane
39376 of the first operand. */
39377 if ((perm
& 0xc) == (1 << 2)) continue;
39378 /* And 01CD, as 01 is in the first lane of the first
39380 if ((perm
& 3) == 0) continue;
39381 /* And 4567, as then the vperm2[fi]128 doesn't change
39382 anything on the original 4567 second operand. */
39383 if ((perm
& 0xf) == ((3 << 2) | 2)) continue;
39387 /* The second shuffle for e.g. V4DFmode has
39388 4567 and ABCD operands.
39389 Ignore AB67, as 67 is already in the second lane
39390 of the first operand. */
39391 if ((perm
& 0xc) == (3 << 2)) continue;
39392 /* And 45CD, as 45 is in the first lane of the first
39394 if ((perm
& 3) == 2) continue;
39395 /* And 0123, as then the vperm2[fi]128 doesn't change
39396 anything on the original 0123 first operand. */
39397 if ((perm
& 0xf) == (1 << 2)) continue;
39400 for (i
= 0; i
< nelt
; i
++)
39402 j
= d
->perm
[i
] / nelt2
;
39403 if (j
== ((perm
>> (2 * (i
>= nelt2
))) & 3))
39404 dsecond
.perm
[i
] = nelt
+ (i
& nelt2
) + (d
->perm
[i
] & (nelt2
- 1));
39405 else if (j
== (unsigned) (i
>= nelt2
) + 2 * (perm
>= 16))
39406 dsecond
.perm
[i
] = d
->perm
[i
] & (nelt
- 1);
39414 ok
= expand_vec_perm_1 (&dsecond
);
39425 /* Found a usable second shuffle. dfirst will be
39426 vperm2f128 on d->op0 and d->op1. */
39427 dsecond
.testing_p
= false;
39429 dfirst
.target
= gen_reg_rtx (d
->vmode
);
39430 for (i
= 0; i
< nelt
; i
++)
39431 dfirst
.perm
[i
] = (i
& (nelt2
- 1))
39432 + ((perm
>> (2 * (i
>= nelt2
))) & 3) * nelt2
;
39434 ok
= expand_vec_perm_1 (&dfirst
);
39437 /* And dsecond is some single insn shuffle, taking
39438 d->op0 and result of vperm2f128 (if perm < 16) or
39439 d->op1 and result of vperm2f128 (otherwise). */
39440 dsecond
.op1
= dfirst
.target
;
39442 dsecond
.op0
= dfirst
.op1
;
39444 ok
= expand_vec_perm_1 (&dsecond
);
39450 /* For one operand, the only useful vperm2f128 permutation is 0x10. */
39451 if (d
->one_operand_p
)
39458 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to simplify
39459 a two vector permutation using 2 intra-lane interleave insns
39460 and cross-lane shuffle for 32-byte vectors. */
39463 expand_vec_perm_interleave3 (struct expand_vec_perm_d
*d
)
39466 rtx (*gen
) (rtx
, rtx
, rtx
);
39468 if (d
->one_operand_p
)
39470 if (TARGET_AVX2
&& GET_MODE_SIZE (d
->vmode
) == 32)
39472 else if (TARGET_AVX
&& (d
->vmode
== V8SFmode
|| d
->vmode
== V4DFmode
))
39478 if (d
->perm
[0] != 0 && d
->perm
[0] != nelt
/ 2)
39480 for (i
= 0; i
< nelt
; i
+= 2)
39481 if (d
->perm
[i
] != d
->perm
[0] + i
/ 2
39482 || d
->perm
[i
+ 1] != d
->perm
[0] + i
/ 2 + nelt
)
39492 gen
= gen_vec_interleave_highv32qi
;
39494 gen
= gen_vec_interleave_lowv32qi
;
39498 gen
= gen_vec_interleave_highv16hi
;
39500 gen
= gen_vec_interleave_lowv16hi
;
39504 gen
= gen_vec_interleave_highv8si
;
39506 gen
= gen_vec_interleave_lowv8si
;
39510 gen
= gen_vec_interleave_highv4di
;
39512 gen
= gen_vec_interleave_lowv4di
;
39516 gen
= gen_vec_interleave_highv8sf
;
39518 gen
= gen_vec_interleave_lowv8sf
;
39522 gen
= gen_vec_interleave_highv4df
;
39524 gen
= gen_vec_interleave_lowv4df
;
39527 gcc_unreachable ();
39530 emit_insn (gen (d
->target
, d
->op0
, d
->op1
));
39534 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement
39535 a single vector permutation using a single intra-lane vector
39536 permutation, vperm2f128 swapping the lanes and vblend* insn blending
39537 the non-swapped and swapped vectors together. */
39540 expand_vec_perm_vperm2f128_vblend (struct expand_vec_perm_d
*d
)
39542 struct expand_vec_perm_d dfirst
, dsecond
;
39543 unsigned i
, j
, msk
, nelt
= d
->nelt
, nelt2
= nelt
/ 2;
39546 rtx (*blend
) (rtx
, rtx
, rtx
, rtx
) = NULL
;
39550 || (d
->vmode
!= V8SFmode
&& d
->vmode
!= V4DFmode
)
39551 || !d
->one_operand_p
)
39555 for (i
= 0; i
< nelt
; i
++)
39556 dfirst
.perm
[i
] = 0xff;
39557 for (i
= 0, msk
= 0; i
< nelt
; i
++)
39559 j
= (d
->perm
[i
] & nelt2
) ? i
| nelt2
: i
& ~nelt2
;
39560 if (dfirst
.perm
[j
] != 0xff && dfirst
.perm
[j
] != d
->perm
[i
])
39562 dfirst
.perm
[j
] = d
->perm
[i
];
39566 for (i
= 0; i
< nelt
; i
++)
39567 if (dfirst
.perm
[i
] == 0xff)
39568 dfirst
.perm
[i
] = i
;
39571 dfirst
.target
= gen_reg_rtx (dfirst
.vmode
);
39574 ok
= expand_vec_perm_1 (&dfirst
);
39575 seq
= get_insns ();
39587 dsecond
.op0
= dfirst
.target
;
39588 dsecond
.op1
= dfirst
.target
;
39589 dsecond
.one_operand_p
= true;
39590 dsecond
.target
= gen_reg_rtx (dsecond
.vmode
);
39591 for (i
= 0; i
< nelt
; i
++)
39592 dsecond
.perm
[i
] = i
^ nelt2
;
39594 ok
= expand_vec_perm_1 (&dsecond
);
39597 blend
= d
->vmode
== V8SFmode
? gen_avx_blendps256
: gen_avx_blendpd256
;
39598 emit_insn (blend (d
->target
, dfirst
.target
, dsecond
.target
, GEN_INT (msk
)));
39602 /* A subroutine of ix86_expand_vec_perm_builtin_1. Implement a V4DF
39603 permutation using two vperm2f128, followed by a vshufpd insn blending
39604 the two vectors together. */
39607 expand_vec_perm_2vperm2f128_vshuf (struct expand_vec_perm_d
*d
)
39609 struct expand_vec_perm_d dfirst
, dsecond
, dthird
;
39612 if (!TARGET_AVX
|| (d
->vmode
!= V4DFmode
))
39622 dfirst
.perm
[0] = (d
->perm
[0] & ~1);
39623 dfirst
.perm
[1] = (d
->perm
[0] & ~1) + 1;
39624 dfirst
.perm
[2] = (d
->perm
[2] & ~1);
39625 dfirst
.perm
[3] = (d
->perm
[2] & ~1) + 1;
39626 dsecond
.perm
[0] = (d
->perm
[1] & ~1);
39627 dsecond
.perm
[1] = (d
->perm
[1] & ~1) + 1;
39628 dsecond
.perm
[2] = (d
->perm
[3] & ~1);
39629 dsecond
.perm
[3] = (d
->perm
[3] & ~1) + 1;
39630 dthird
.perm
[0] = (d
->perm
[0] % 2);
39631 dthird
.perm
[1] = (d
->perm
[1] % 2) + 4;
39632 dthird
.perm
[2] = (d
->perm
[2] % 2) + 2;
39633 dthird
.perm
[3] = (d
->perm
[3] % 2) + 6;
39635 dfirst
.target
= gen_reg_rtx (dfirst
.vmode
);
39636 dsecond
.target
= gen_reg_rtx (dsecond
.vmode
);
39637 dthird
.op0
= dfirst
.target
;
39638 dthird
.op1
= dsecond
.target
;
39639 dthird
.one_operand_p
= false;
39641 canonicalize_perm (&dfirst
);
39642 canonicalize_perm (&dsecond
);
39644 ok
= expand_vec_perm_1 (&dfirst
)
39645 && expand_vec_perm_1 (&dsecond
)
39646 && expand_vec_perm_1 (&dthird
);
39653 /* A subroutine of expand_vec_perm_even_odd_1. Implement the double-word
39654 permutation with two pshufb insns and an ior. We should have already
39655 failed all two instruction sequences. */
39658 expand_vec_perm_pshufb2 (struct expand_vec_perm_d
*d
)
39660 rtx rperm
[2][16], vperm
, l
, h
, op
, m128
;
39661 unsigned int i
, nelt
, eltsz
;
39663 if (!TARGET_SSSE3
|| GET_MODE_SIZE (d
->vmode
) != 16)
39665 gcc_assert (!d
->one_operand_p
);
39668 eltsz
= GET_MODE_SIZE (GET_MODE_INNER (d
->vmode
));
39670 /* Generate two permutation masks. If the required element is within
39671 the given vector it is shuffled into the proper lane. If the required
39672 element is in the other vector, force a zero into the lane by setting
39673 bit 7 in the permutation mask. */
39674 m128
= GEN_INT (-128);
39675 for (i
= 0; i
< nelt
; ++i
)
39677 unsigned j
, e
= d
->perm
[i
];
39678 unsigned which
= (e
>= nelt
);
39682 for (j
= 0; j
< eltsz
; ++j
)
39684 rperm
[which
][i
*eltsz
+ j
] = GEN_INT (e
*eltsz
+ j
);
39685 rperm
[1-which
][i
*eltsz
+ j
] = m128
;
39689 vperm
= gen_rtx_CONST_VECTOR (V16QImode
, gen_rtvec_v (16, rperm
[0]));
39690 vperm
= force_reg (V16QImode
, vperm
);
39692 l
= gen_reg_rtx (V16QImode
);
39693 op
= gen_lowpart (V16QImode
, d
->op0
);
39694 emit_insn (gen_ssse3_pshufbv16qi3 (l
, op
, vperm
));
39696 vperm
= gen_rtx_CONST_VECTOR (V16QImode
, gen_rtvec_v (16, rperm
[1]));
39697 vperm
= force_reg (V16QImode
, vperm
);
39699 h
= gen_reg_rtx (V16QImode
);
39700 op
= gen_lowpart (V16QImode
, d
->op1
);
39701 emit_insn (gen_ssse3_pshufbv16qi3 (h
, op
, vperm
));
39703 op
= gen_lowpart (V16QImode
, d
->target
);
39704 emit_insn (gen_iorv16qi3 (op
, l
, h
));
39709 /* Implement arbitrary permutation of one V32QImode and V16QImode operand
39710 with two vpshufb insns, vpermq and vpor. We should have already failed
39711 all two or three instruction sequences. */
39714 expand_vec_perm_vpshufb2_vpermq (struct expand_vec_perm_d
*d
)
39716 rtx rperm
[2][32], vperm
, l
, h
, hp
, op
, m128
;
39717 unsigned int i
, nelt
, eltsz
;
39720 || !d
->one_operand_p
39721 || (d
->vmode
!= V32QImode
&& d
->vmode
!= V16HImode
))
39728 eltsz
= GET_MODE_SIZE (GET_MODE_INNER (d
->vmode
));
39730 /* Generate two permutation masks. If the required element is within
39731 the same lane, it is shuffled in. If the required element from the
39732 other lane, force a zero by setting bit 7 in the permutation mask.
39733 In the other mask the mask has non-negative elements if element
39734 is requested from the other lane, but also moved to the other lane,
39735 so that the result of vpshufb can have the two V2TImode halves
39737 m128
= GEN_INT (-128);
39738 for (i
= 0; i
< nelt
; ++i
)
39740 unsigned j
, e
= d
->perm
[i
] & (nelt
/ 2 - 1);
39741 unsigned which
= ((d
->perm
[i
] ^ i
) & (nelt
/ 2)) * eltsz
;
39743 for (j
= 0; j
< eltsz
; ++j
)
39745 rperm
[!!which
][(i
* eltsz
+ j
) ^ which
] = GEN_INT (e
* eltsz
+ j
);
39746 rperm
[!which
][(i
* eltsz
+ j
) ^ (which
^ 16)] = m128
;
39750 vperm
= gen_rtx_CONST_VECTOR (V32QImode
, gen_rtvec_v (32, rperm
[1]));
39751 vperm
= force_reg (V32QImode
, vperm
);
39753 h
= gen_reg_rtx (V32QImode
);
39754 op
= gen_lowpart (V32QImode
, d
->op0
);
39755 emit_insn (gen_avx2_pshufbv32qi3 (h
, op
, vperm
));
39757 /* Swap the 128-byte lanes of h into hp. */
39758 hp
= gen_reg_rtx (V4DImode
);
39759 op
= gen_lowpart (V4DImode
, h
);
39760 emit_insn (gen_avx2_permv4di_1 (hp
, op
, const2_rtx
, GEN_INT (3), const0_rtx
,
39763 vperm
= gen_rtx_CONST_VECTOR (V32QImode
, gen_rtvec_v (32, rperm
[0]));
39764 vperm
= force_reg (V32QImode
, vperm
);
39766 l
= gen_reg_rtx (V32QImode
);
39767 op
= gen_lowpart (V32QImode
, d
->op0
);
39768 emit_insn (gen_avx2_pshufbv32qi3 (l
, op
, vperm
));
39770 op
= gen_lowpart (V32QImode
, d
->target
);
39771 emit_insn (gen_iorv32qi3 (op
, l
, gen_lowpart (V32QImode
, hp
)));
39776 /* A subroutine of expand_vec_perm_even_odd_1. Implement extract-even
39777 and extract-odd permutations of two V32QImode and V16QImode operand
39778 with two vpshufb insns, vpor and vpermq. We should have already
39779 failed all two or three instruction sequences. */
39782 expand_vec_perm_vpshufb2_vpermq_even_odd (struct expand_vec_perm_d
*d
)
39784 rtx rperm
[2][32], vperm
, l
, h
, ior
, op
, m128
;
39785 unsigned int i
, nelt
, eltsz
;
39788 || d
->one_operand_p
39789 || (d
->vmode
!= V32QImode
&& d
->vmode
!= V16HImode
))
39792 for (i
= 0; i
< d
->nelt
; ++i
)
39793 if ((d
->perm
[i
] ^ (i
* 2)) & (3 * d
->nelt
/ 2))
39800 eltsz
= GET_MODE_SIZE (GET_MODE_INNER (d
->vmode
));
39802 /* Generate two permutation masks. In the first permutation mask
39803 the first quarter will contain indexes for the first half
39804 of the op0, the second quarter will contain bit 7 set, third quarter
39805 will contain indexes for the second half of the op0 and the
39806 last quarter bit 7 set. In the second permutation mask
39807 the first quarter will contain bit 7 set, the second quarter
39808 indexes for the first half of the op1, the third quarter bit 7 set
39809 and last quarter indexes for the second half of the op1.
39810 I.e. the first mask e.g. for V32QImode extract even will be:
39811 0, 2, ..., 0xe, -128, ..., -128, 0, 2, ..., 0xe, -128, ..., -128
39812 (all values masked with 0xf except for -128) and second mask
39813 for extract even will be
39814 -128, ..., -128, 0, 2, ..., 0xe, -128, ..., -128, 0, 2, ..., 0xe. */
39815 m128
= GEN_INT (-128);
39816 for (i
= 0; i
< nelt
; ++i
)
39818 unsigned j
, e
= d
->perm
[i
] & (nelt
/ 2 - 1);
39819 unsigned which
= d
->perm
[i
] >= nelt
;
39820 unsigned xorv
= (i
>= nelt
/ 4 && i
< 3 * nelt
/ 4) ? 24 : 0;
39822 for (j
= 0; j
< eltsz
; ++j
)
39824 rperm
[which
][(i
* eltsz
+ j
) ^ xorv
] = GEN_INT (e
* eltsz
+ j
);
39825 rperm
[1 - which
][(i
* eltsz
+ j
) ^ xorv
] = m128
;
39829 vperm
= gen_rtx_CONST_VECTOR (V32QImode
, gen_rtvec_v (32, rperm
[0]));
39830 vperm
= force_reg (V32QImode
, vperm
);
39832 l
= gen_reg_rtx (V32QImode
);
39833 op
= gen_lowpart (V32QImode
, d
->op0
);
39834 emit_insn (gen_avx2_pshufbv32qi3 (l
, op
, vperm
));
39836 vperm
= gen_rtx_CONST_VECTOR (V32QImode
, gen_rtvec_v (32, rperm
[1]));
39837 vperm
= force_reg (V32QImode
, vperm
);
39839 h
= gen_reg_rtx (V32QImode
);
39840 op
= gen_lowpart (V32QImode
, d
->op1
);
39841 emit_insn (gen_avx2_pshufbv32qi3 (h
, op
, vperm
));
39843 ior
= gen_reg_rtx (V32QImode
);
39844 emit_insn (gen_iorv32qi3 (ior
, l
, h
));
39846 /* Permute the V4DImode quarters using { 0, 2, 1, 3 } permutation. */
39847 op
= gen_lowpart (V4DImode
, d
->target
);
39848 ior
= gen_lowpart (V4DImode
, ior
);
39849 emit_insn (gen_avx2_permv4di_1 (op
, ior
, const0_rtx
, const2_rtx
,
39850 const1_rtx
, GEN_INT (3)));
39855 /* A subroutine of ix86_expand_vec_perm_builtin_1. Implement extract-even
39856 and extract-odd permutations. */
39859 expand_vec_perm_even_odd_1 (struct expand_vec_perm_d
*d
, unsigned odd
)
39866 t1
= gen_reg_rtx (V4DFmode
);
39867 t2
= gen_reg_rtx (V4DFmode
);
39869 /* Shuffle the lanes around into { 0 1 4 5 } and { 2 3 6 7 }. */
39870 emit_insn (gen_avx_vperm2f128v4df3 (t1
, d
->op0
, d
->op1
, GEN_INT (0x20)));
39871 emit_insn (gen_avx_vperm2f128v4df3 (t2
, d
->op0
, d
->op1
, GEN_INT (0x31)));
39873 /* Now an unpck[lh]pd will produce the result required. */
39875 t3
= gen_avx_unpckhpd256 (d
->target
, t1
, t2
);
39877 t3
= gen_avx_unpcklpd256 (d
->target
, t1
, t2
);
39883 int mask
= odd
? 0xdd : 0x88;
39885 t1
= gen_reg_rtx (V8SFmode
);
39886 t2
= gen_reg_rtx (V8SFmode
);
39887 t3
= gen_reg_rtx (V8SFmode
);
39889 /* Shuffle within the 128-bit lanes to produce:
39890 { 0 2 8 a 4 6 c e } | { 1 3 9 b 5 7 d f }. */
39891 emit_insn (gen_avx_shufps256 (t1
, d
->op0
, d
->op1
,
39894 /* Shuffle the lanes around to produce:
39895 { 4 6 c e 0 2 8 a } and { 5 7 d f 1 3 9 b }. */
39896 emit_insn (gen_avx_vperm2f128v8sf3 (t2
, t1
, t1
,
39899 /* Shuffle within the 128-bit lanes to produce:
39900 { 0 2 4 6 4 6 0 2 } | { 1 3 5 7 5 7 1 3 }. */
39901 emit_insn (gen_avx_shufps256 (t3
, t1
, t2
, GEN_INT (0x44)));
39903 /* Shuffle within the 128-bit lanes to produce:
39904 { 8 a c e c e 8 a } | { 9 b d f d f 9 b }. */
39905 emit_insn (gen_avx_shufps256 (t2
, t1
, t2
, GEN_INT (0xee)));
39907 /* Shuffle the lanes around to produce:
39908 { 0 2 4 6 8 a c e } | { 1 3 5 7 9 b d f }. */
39909 emit_insn (gen_avx_vperm2f128v8sf3 (d
->target
, t3
, t2
,
39918 /* These are always directly implementable by expand_vec_perm_1. */
39919 gcc_unreachable ();
39923 return expand_vec_perm_pshufb2 (d
);
39926 /* We need 2*log2(N)-1 operations to achieve odd/even
39927 with interleave. */
39928 t1
= gen_reg_rtx (V8HImode
);
39929 t2
= gen_reg_rtx (V8HImode
);
39930 emit_insn (gen_vec_interleave_highv8hi (t1
, d
->op0
, d
->op1
));
39931 emit_insn (gen_vec_interleave_lowv8hi (d
->target
, d
->op0
, d
->op1
));
39932 emit_insn (gen_vec_interleave_highv8hi (t2
, d
->target
, t1
));
39933 emit_insn (gen_vec_interleave_lowv8hi (d
->target
, d
->target
, t1
));
39935 t3
= gen_vec_interleave_highv8hi (d
->target
, d
->target
, t2
);
39937 t3
= gen_vec_interleave_lowv8hi (d
->target
, d
->target
, t2
);
39944 return expand_vec_perm_pshufb2 (d
);
39947 t1
= gen_reg_rtx (V16QImode
);
39948 t2
= gen_reg_rtx (V16QImode
);
39949 t3
= gen_reg_rtx (V16QImode
);
39950 emit_insn (gen_vec_interleave_highv16qi (t1
, d
->op0
, d
->op1
));
39951 emit_insn (gen_vec_interleave_lowv16qi (d
->target
, d
->op0
, d
->op1
));
39952 emit_insn (gen_vec_interleave_highv16qi (t2
, d
->target
, t1
));
39953 emit_insn (gen_vec_interleave_lowv16qi (d
->target
, d
->target
, t1
));
39954 emit_insn (gen_vec_interleave_highv16qi (t3
, d
->target
, t2
));
39955 emit_insn (gen_vec_interleave_lowv16qi (d
->target
, d
->target
, t2
));
39957 t3
= gen_vec_interleave_highv16qi (d
->target
, d
->target
, t3
);
39959 t3
= gen_vec_interleave_lowv16qi (d
->target
, d
->target
, t3
);
39966 return expand_vec_perm_vpshufb2_vpermq_even_odd (d
);
39971 struct expand_vec_perm_d d_copy
= *d
;
39972 d_copy
.vmode
= V4DFmode
;
39973 d_copy
.target
= gen_lowpart (V4DFmode
, d
->target
);
39974 d_copy
.op0
= gen_lowpart (V4DFmode
, d
->op0
);
39975 d_copy
.op1
= gen_lowpart (V4DFmode
, d
->op1
);
39976 return expand_vec_perm_even_odd_1 (&d_copy
, odd
);
39979 t1
= gen_reg_rtx (V4DImode
);
39980 t2
= gen_reg_rtx (V4DImode
);
39982 /* Shuffle the lanes around into { 0 1 4 5 } and { 2 3 6 7 }. */
39983 emit_insn (gen_avx2_permv2ti (t1
, d
->op0
, d
->op1
, GEN_INT (0x20)));
39984 emit_insn (gen_avx2_permv2ti (t2
, d
->op0
, d
->op1
, GEN_INT (0x31)));
39986 /* Now an vpunpck[lh]qdq will produce the result required. */
39988 t3
= gen_avx2_interleave_highv4di (d
->target
, t1
, t2
);
39990 t3
= gen_avx2_interleave_lowv4di (d
->target
, t1
, t2
);
39997 struct expand_vec_perm_d d_copy
= *d
;
39998 d_copy
.vmode
= V8SFmode
;
39999 d_copy
.target
= gen_lowpart (V8SFmode
, d
->target
);
40000 d_copy
.op0
= gen_lowpart (V8SFmode
, d
->op0
);
40001 d_copy
.op1
= gen_lowpart (V8SFmode
, d
->op1
);
40002 return expand_vec_perm_even_odd_1 (&d_copy
, odd
);
40005 t1
= gen_reg_rtx (V8SImode
);
40006 t2
= gen_reg_rtx (V8SImode
);
40008 /* Shuffle the lanes around into
40009 { 0 1 2 3 8 9 a b } and { 4 5 6 7 c d e f }. */
40010 emit_insn (gen_avx2_permv2ti (gen_lowpart (V4DImode
, t1
),
40011 gen_lowpart (V4DImode
, d
->op0
),
40012 gen_lowpart (V4DImode
, d
->op1
),
40014 emit_insn (gen_avx2_permv2ti (gen_lowpart (V4DImode
, t2
),
40015 gen_lowpart (V4DImode
, d
->op0
),
40016 gen_lowpart (V4DImode
, d
->op1
),
40019 /* Swap the 2nd and 3rd position in each lane into
40020 { 0 2 1 3 8 a 9 b } and { 4 6 5 7 c e d f }. */
40021 emit_insn (gen_avx2_pshufdv3 (t1
, t1
,
40022 GEN_INT (2 * 4 + 1 * 16 + 3 * 64)));
40023 emit_insn (gen_avx2_pshufdv3 (t2
, t2
,
40024 GEN_INT (2 * 4 + 1 * 16 + 3 * 64)));
40026 /* Now an vpunpck[lh]qdq will produce
40027 { 0 2 4 6 8 a c e } resp. { 1 3 5 7 9 b d f }. */
40029 t3
= gen_avx2_interleave_highv4di (gen_lowpart (V4DImode
, d
->target
),
40030 gen_lowpart (V4DImode
, t1
),
40031 gen_lowpart (V4DImode
, t2
));
40033 t3
= gen_avx2_interleave_lowv4di (gen_lowpart (V4DImode
, d
->target
),
40034 gen_lowpart (V4DImode
, t1
),
40035 gen_lowpart (V4DImode
, t2
));
40040 gcc_unreachable ();
40046 /* A subroutine of ix86_expand_vec_perm_builtin_1. Pattern match
40047 extract-even and extract-odd permutations. */
40050 expand_vec_perm_even_odd (struct expand_vec_perm_d
*d
)
40052 unsigned i
, odd
, nelt
= d
->nelt
;
40055 if (odd
!= 0 && odd
!= 1)
40058 for (i
= 1; i
< nelt
; ++i
)
40059 if (d
->perm
[i
] != 2 * i
+ odd
)
40062 return expand_vec_perm_even_odd_1 (d
, odd
);
40065 /* A subroutine of ix86_expand_vec_perm_builtin_1. Implement broadcast
40066 permutations. We assume that expand_vec_perm_1 has already failed. */
40069 expand_vec_perm_broadcast_1 (struct expand_vec_perm_d
*d
)
40071 unsigned elt
= d
->perm
[0], nelt2
= d
->nelt
/ 2;
40072 enum machine_mode vmode
= d
->vmode
;
40073 unsigned char perm2
[4];
40081 /* These are special-cased in sse.md so that we can optionally
40082 use the vbroadcast instruction. They expand to two insns
40083 if the input happens to be in a register. */
40084 gcc_unreachable ();
40090 /* These are always implementable using standard shuffle patterns. */
40091 gcc_unreachable ();
40095 /* These can be implemented via interleave. We save one insn by
40096 stopping once we have promoted to V4SImode and then use pshufd. */
40100 rtx (*gen
) (rtx
, rtx
, rtx
)
40101 = vmode
== V16QImode
? gen_vec_interleave_lowv16qi
40102 : gen_vec_interleave_lowv8hi
;
40106 gen
= vmode
== V16QImode
? gen_vec_interleave_highv16qi
40107 : gen_vec_interleave_highv8hi
;
40112 dest
= gen_reg_rtx (vmode
);
40113 emit_insn (gen (dest
, op0
, op0
));
40114 vmode
= get_mode_wider_vector (vmode
);
40115 op0
= gen_lowpart (vmode
, dest
);
40117 while (vmode
!= V4SImode
);
40119 memset (perm2
, elt
, 4);
40120 ok
= expand_vselect (gen_lowpart (V4SImode
, d
->target
), op0
, perm2
, 4,
40129 /* For AVX2 broadcasts of the first element vpbroadcast* or
40130 vpermq should be used by expand_vec_perm_1. */
40131 gcc_assert (!TARGET_AVX2
|| d
->perm
[0]);
40135 gcc_unreachable ();
40139 /* A subroutine of ix86_expand_vec_perm_builtin_1. Pattern match
40140 broadcast permutations. */
40143 expand_vec_perm_broadcast (struct expand_vec_perm_d
*d
)
40145 unsigned i
, elt
, nelt
= d
->nelt
;
40147 if (!d
->one_operand_p
)
40151 for (i
= 1; i
< nelt
; ++i
)
40152 if (d
->perm
[i
] != elt
)
40155 return expand_vec_perm_broadcast_1 (d
);
40158 /* Implement arbitrary permutation of two V32QImode and V16QImode operands
40159 with 4 vpshufb insns, 2 vpermq and 3 vpor. We should have already failed
40160 all the shorter instruction sequences. */
40163 expand_vec_perm_vpshufb4_vpermq2 (struct expand_vec_perm_d
*d
)
40165 rtx rperm
[4][32], vperm
, l
[2], h
[2], op
, m128
;
40166 unsigned int i
, nelt
, eltsz
;
40170 || d
->one_operand_p
40171 || (d
->vmode
!= V32QImode
&& d
->vmode
!= V16HImode
))
40178 eltsz
= GET_MODE_SIZE (GET_MODE_INNER (d
->vmode
));
40180 /* Generate 4 permutation masks. If the required element is within
40181 the same lane, it is shuffled in. If the required element from the
40182 other lane, force a zero by setting bit 7 in the permutation mask.
40183 In the other mask the mask has non-negative elements if element
40184 is requested from the other lane, but also moved to the other lane,
40185 so that the result of vpshufb can have the two V2TImode halves
40187 m128
= GEN_INT (-128);
40188 for (i
= 0; i
< 32; ++i
)
40190 rperm
[0][i
] = m128
;
40191 rperm
[1][i
] = m128
;
40192 rperm
[2][i
] = m128
;
40193 rperm
[3][i
] = m128
;
40199 for (i
= 0; i
< nelt
; ++i
)
40201 unsigned j
, e
= d
->perm
[i
] & (nelt
/ 2 - 1);
40202 unsigned xlane
= ((d
->perm
[i
] ^ i
) & (nelt
/ 2)) * eltsz
;
40203 unsigned int which
= ((d
->perm
[i
] & nelt
) ? 2 : 0) + (xlane
? 1 : 0);
40205 for (j
= 0; j
< eltsz
; ++j
)
40206 rperm
[which
][(i
* eltsz
+ j
) ^ xlane
] = GEN_INT (e
* eltsz
+ j
);
40207 used
[which
] = true;
40210 for (i
= 0; i
< 2; ++i
)
40212 if (!used
[2 * i
+ 1])
40217 vperm
= gen_rtx_CONST_VECTOR (V32QImode
,
40218 gen_rtvec_v (32, rperm
[2 * i
+ 1]));
40219 vperm
= force_reg (V32QImode
, vperm
);
40220 h
[i
] = gen_reg_rtx (V32QImode
);
40221 op
= gen_lowpart (V32QImode
, i
? d
->op1
: d
->op0
);
40222 emit_insn (gen_avx2_pshufbv32qi3 (h
[i
], op
, vperm
));
40225 /* Swap the 128-byte lanes of h[X]. */
40226 for (i
= 0; i
< 2; ++i
)
40228 if (h
[i
] == NULL_RTX
)
40230 op
= gen_reg_rtx (V4DImode
);
40231 emit_insn (gen_avx2_permv4di_1 (op
, gen_lowpart (V4DImode
, h
[i
]),
40232 const2_rtx
, GEN_INT (3), const0_rtx
,
40234 h
[i
] = gen_lowpart (V32QImode
, op
);
40237 for (i
= 0; i
< 2; ++i
)
40244 vperm
= gen_rtx_CONST_VECTOR (V32QImode
, gen_rtvec_v (32, rperm
[2 * i
]));
40245 vperm
= force_reg (V32QImode
, vperm
);
40246 l
[i
] = gen_reg_rtx (V32QImode
);
40247 op
= gen_lowpart (V32QImode
, i
? d
->op1
: d
->op0
);
40248 emit_insn (gen_avx2_pshufbv32qi3 (l
[i
], op
, vperm
));
40251 for (i
= 0; i
< 2; ++i
)
40255 op
= gen_reg_rtx (V32QImode
);
40256 emit_insn (gen_iorv32qi3 (op
, l
[i
], h
[i
]));
40263 gcc_assert (l
[0] && l
[1]);
40264 op
= gen_lowpart (V32QImode
, d
->target
);
40265 emit_insn (gen_iorv32qi3 (op
, l
[0], l
[1]));
40269 /* The guts of ix86_expand_vec_perm_const, also used by the ok hook.
40270 With all of the interface bits taken care of, perform the expansion
40271 in D and return true on success. */
40274 ix86_expand_vec_perm_const_1 (struct expand_vec_perm_d
*d
)
40276 /* Try a single instruction expansion. */
40277 if (expand_vec_perm_1 (d
))
40280 /* Try sequences of two instructions. */
40282 if (expand_vec_perm_pshuflw_pshufhw (d
))
40285 if (expand_vec_perm_palignr (d
))
40288 if (expand_vec_perm_interleave2 (d
))
40291 if (expand_vec_perm_broadcast (d
))
40294 if (expand_vec_perm_vpermq_perm_1 (d
))
40297 if (expand_vec_perm_vperm2f128 (d
))
40300 /* Try sequences of three instructions. */
40302 if (expand_vec_perm_2vperm2f128_vshuf (d
))
40305 if (expand_vec_perm_pshufb2 (d
))
40308 if (expand_vec_perm_interleave3 (d
))
40311 if (expand_vec_perm_vperm2f128_vblend (d
))
40314 /* Try sequences of four instructions. */
40316 if (expand_vec_perm_vpshufb2_vpermq (d
))
40319 if (expand_vec_perm_vpshufb2_vpermq_even_odd (d
))
40322 /* ??? Look for narrow permutations whose element orderings would
40323 allow the promotion to a wider mode. */
40325 /* ??? Look for sequences of interleave or a wider permute that place
40326 the data into the correct lanes for a half-vector shuffle like
40327 pshuf[lh]w or vpermilps. */
40329 /* ??? Look for sequences of interleave that produce the desired results.
40330 The combinatorics of punpck[lh] get pretty ugly... */
40332 if (expand_vec_perm_even_odd (d
))
40335 /* Even longer sequences. */
40336 if (expand_vec_perm_vpshufb4_vpermq2 (d
))
40342 /* If a permutation only uses one operand, make it clear. Returns true
40343 if the permutation references both operands. */
40346 canonicalize_perm (struct expand_vec_perm_d
*d
)
40348 int i
, which
, nelt
= d
->nelt
;
40350 for (i
= which
= 0; i
< nelt
; ++i
)
40351 which
|= (d
->perm
[i
] < nelt
? 1 : 2);
40353 d
->one_operand_p
= true;
40360 if (!rtx_equal_p (d
->op0
, d
->op1
))
40362 d
->one_operand_p
= false;
40365 /* The elements of PERM do not suggest that only the first operand
40366 is used, but both operands are identical. Allow easier matching
40367 of the permutation by folding the permutation into the single
40372 for (i
= 0; i
< nelt
; ++i
)
40373 d
->perm
[i
] &= nelt
- 1;
40382 return (which
== 3);
40386 ix86_expand_vec_perm_const (rtx operands
[4])
40388 struct expand_vec_perm_d d
;
40389 unsigned char perm
[MAX_VECT_LEN
];
40394 d
.target
= operands
[0];
40395 d
.op0
= operands
[1];
40396 d
.op1
= operands
[2];
40399 d
.vmode
= GET_MODE (d
.target
);
40400 gcc_assert (VECTOR_MODE_P (d
.vmode
));
40401 d
.nelt
= nelt
= GET_MODE_NUNITS (d
.vmode
);
40402 d
.testing_p
= false;
40404 gcc_assert (GET_CODE (sel
) == CONST_VECTOR
);
40405 gcc_assert (XVECLEN (sel
, 0) == nelt
);
40406 gcc_checking_assert (sizeof (d
.perm
) == sizeof (perm
));
40408 for (i
= 0; i
< nelt
; ++i
)
40410 rtx e
= XVECEXP (sel
, 0, i
);
40411 int ei
= INTVAL (e
) & (2 * nelt
- 1);
40416 two_args
= canonicalize_perm (&d
);
40418 if (ix86_expand_vec_perm_const_1 (&d
))
40421 /* If the selector says both arguments are needed, but the operands are the
40422 same, the above tried to expand with one_operand_p and flattened selector.
40423 If that didn't work, retry without one_operand_p; we succeeded with that
40425 if (two_args
&& d
.one_operand_p
)
40427 d
.one_operand_p
= false;
40428 memcpy (d
.perm
, perm
, sizeof (perm
));
40429 return ix86_expand_vec_perm_const_1 (&d
);
40435 /* Implement targetm.vectorize.vec_perm_const_ok. */
40438 ix86_vectorize_vec_perm_const_ok (enum machine_mode vmode
,
40439 const unsigned char *sel
)
40441 struct expand_vec_perm_d d
;
40442 unsigned int i
, nelt
, which
;
40446 d
.nelt
= nelt
= GET_MODE_NUNITS (d
.vmode
);
40447 d
.testing_p
= true;
40449 /* Given sufficient ISA support we can just return true here
40450 for selected vector modes. */
40451 if (GET_MODE_SIZE (d
.vmode
) == 16)
40453 /* All implementable with a single vpperm insn. */
40456 /* All implementable with 2 pshufb + 1 ior. */
40459 /* All implementable with shufpd or unpck[lh]pd. */
40464 /* Extract the values from the vector CST into the permutation
40466 memcpy (d
.perm
, sel
, nelt
);
40467 for (i
= which
= 0; i
< nelt
; ++i
)
40469 unsigned char e
= d
.perm
[i
];
40470 gcc_assert (e
< 2 * nelt
);
40471 which
|= (e
< nelt
? 1 : 2);
40474 /* For all elements from second vector, fold the elements to first. */
40476 for (i
= 0; i
< nelt
; ++i
)
40479 /* Check whether the mask can be applied to the vector type. */
40480 d
.one_operand_p
= (which
!= 3);
40482 /* Implementable with shufps or pshufd. */
40483 if (d
.one_operand_p
&& (d
.vmode
== V4SFmode
|| d
.vmode
== V4SImode
))
40486 /* Otherwise we have to go through the motions and see if we can
40487 figure out how to generate the requested permutation. */
40488 d
.target
= gen_raw_REG (d
.vmode
, LAST_VIRTUAL_REGISTER
+ 1);
40489 d
.op1
= d
.op0
= gen_raw_REG (d
.vmode
, LAST_VIRTUAL_REGISTER
+ 2);
40490 if (!d
.one_operand_p
)
40491 d
.op1
= gen_raw_REG (d
.vmode
, LAST_VIRTUAL_REGISTER
+ 3);
40494 ret
= ix86_expand_vec_perm_const_1 (&d
);
40501 ix86_expand_vec_extract_even_odd (rtx targ
, rtx op0
, rtx op1
, unsigned odd
)
40503 struct expand_vec_perm_d d
;
40509 d
.vmode
= GET_MODE (targ
);
40510 d
.nelt
= nelt
= GET_MODE_NUNITS (d
.vmode
);
40511 d
.one_operand_p
= false;
40512 d
.testing_p
= false;
40514 for (i
= 0; i
< nelt
; ++i
)
40515 d
.perm
[i
] = i
* 2 + odd
;
40517 /* We'll either be able to implement the permutation directly... */
40518 if (expand_vec_perm_1 (&d
))
40521 /* ... or we use the special-case patterns. */
40522 expand_vec_perm_even_odd_1 (&d
, odd
);
40526 ix86_expand_vec_interleave (rtx targ
, rtx op0
, rtx op1
, bool high_p
)
40528 struct expand_vec_perm_d d
;
40529 unsigned i
, nelt
, base
;
40535 d
.vmode
= GET_MODE (targ
);
40536 d
.nelt
= nelt
= GET_MODE_NUNITS (d
.vmode
);
40537 d
.one_operand_p
= false;
40538 d
.testing_p
= false;
40540 base
= high_p
? nelt
/ 2 : 0;
40541 for (i
= 0; i
< nelt
/ 2; ++i
)
40543 d
.perm
[i
* 2] = i
+ base
;
40544 d
.perm
[i
* 2 + 1] = i
+ base
+ nelt
;
40547 /* Note that for AVX this isn't one instruction. */
40548 ok
= ix86_expand_vec_perm_const_1 (&d
);
40553 /* Expand a vector operation CODE for a V*QImode in terms of the
40554 same operation on V*HImode. */
40557 ix86_expand_vecop_qihi (enum rtx_code code
, rtx dest
, rtx op1
, rtx op2
)
40559 enum machine_mode qimode
= GET_MODE (dest
);
40560 enum machine_mode himode
;
40561 rtx (*gen_il
) (rtx
, rtx
, rtx
);
40562 rtx (*gen_ih
) (rtx
, rtx
, rtx
);
40563 rtx op1_l
, op1_h
, op2_l
, op2_h
, res_l
, res_h
;
40564 struct expand_vec_perm_d d
;
40565 bool ok
, full_interleave
;
40566 bool uns_p
= false;
40573 gen_il
= gen_vec_interleave_lowv16qi
;
40574 gen_ih
= gen_vec_interleave_highv16qi
;
40577 himode
= V16HImode
;
40578 gen_il
= gen_avx2_interleave_lowv32qi
;
40579 gen_ih
= gen_avx2_interleave_highv32qi
;
40582 gcc_unreachable ();
40585 op2_l
= op2_h
= op2
;
40589 /* Unpack data such that we've got a source byte in each low byte of
40590 each word. We don't care what goes into the high byte of each word.
40591 Rather than trying to get zero in there, most convenient is to let
40592 it be a copy of the low byte. */
40593 op2_l
= gen_reg_rtx (qimode
);
40594 op2_h
= gen_reg_rtx (qimode
);
40595 emit_insn (gen_il (op2_l
, op2
, op2
));
40596 emit_insn (gen_ih (op2_h
, op2
, op2
));
40599 op1_l
= gen_reg_rtx (qimode
);
40600 op1_h
= gen_reg_rtx (qimode
);
40601 emit_insn (gen_il (op1_l
, op1
, op1
));
40602 emit_insn (gen_ih (op1_h
, op1
, op1
));
40603 full_interleave
= qimode
== V16QImode
;
40611 op1_l
= gen_reg_rtx (himode
);
40612 op1_h
= gen_reg_rtx (himode
);
40613 ix86_expand_sse_unpack (op1_l
, op1
, uns_p
, false);
40614 ix86_expand_sse_unpack (op1_h
, op1
, uns_p
, true);
40615 full_interleave
= true;
40618 gcc_unreachable ();
40621 /* Perform the operation. */
40622 res_l
= expand_simple_binop (himode
, code
, op1_l
, op2_l
, NULL_RTX
,
40624 res_h
= expand_simple_binop (himode
, code
, op1_h
, op2_h
, NULL_RTX
,
40626 gcc_assert (res_l
&& res_h
);
40628 /* Merge the data back into the right place. */
40630 d
.op0
= gen_lowpart (qimode
, res_l
);
40631 d
.op1
= gen_lowpart (qimode
, res_h
);
40633 d
.nelt
= GET_MODE_NUNITS (qimode
);
40634 d
.one_operand_p
= false;
40635 d
.testing_p
= false;
40637 if (full_interleave
)
40639 /* For SSE2, we used an full interleave, so the desired
40640 results are in the even elements. */
40641 for (i
= 0; i
< 32; ++i
)
40646 /* For AVX, the interleave used above was not cross-lane. So the
40647 extraction is evens but with the second and third quarter swapped.
40648 Happily, that is even one insn shorter than even extraction. */
40649 for (i
= 0; i
< 32; ++i
)
40650 d
.perm
[i
] = i
* 2 + ((i
& 24) == 8 ? 16 : (i
& 24) == 16 ? -16 : 0);
40653 ok
= ix86_expand_vec_perm_const_1 (&d
);
40656 set_unique_reg_note (get_last_insn (), REG_EQUAL
,
40657 gen_rtx_fmt_ee (code
, qimode
, op1
, op2
));
40661 ix86_expand_mul_widen_evenodd (rtx dest
, rtx op1
, rtx op2
,
40662 bool uns_p
, bool odd_p
)
40664 enum machine_mode mode
= GET_MODE (op1
);
40665 enum machine_mode wmode
= GET_MODE (dest
);
40668 /* We only play even/odd games with vectors of SImode. */
40669 gcc_assert (mode
== V4SImode
|| mode
== V8SImode
);
40671 /* If we're looking for the odd results, shift those members down to
40672 the even slots. For some cpus this is faster than a PSHUFD. */
40675 if (TARGET_XOP
&& mode
== V4SImode
)
40677 x
= force_reg (wmode
, CONST0_RTX (wmode
));
40678 emit_insn (gen_xop_pmacsdqh (dest
, op1
, op2
, x
));
40682 x
= GEN_INT (GET_MODE_UNIT_BITSIZE (mode
));
40683 op1
= expand_binop (wmode
, lshr_optab
, gen_lowpart (wmode
, op1
),
40684 x
, NULL
, 1, OPTAB_DIRECT
);
40685 op2
= expand_binop (wmode
, lshr_optab
, gen_lowpart (wmode
, op2
),
40686 x
, NULL
, 1, OPTAB_DIRECT
);
40687 op1
= gen_lowpart (mode
, op1
);
40688 op2
= gen_lowpart (mode
, op2
);
40691 if (mode
== V8SImode
)
40694 x
= gen_vec_widen_umult_even_v8si (dest
, op1
, op2
);
40696 x
= gen_vec_widen_smult_even_v8si (dest
, op1
, op2
);
40699 x
= gen_vec_widen_umult_even_v4si (dest
, op1
, op2
);
40700 else if (TARGET_SSE4_1
)
40701 x
= gen_sse4_1_mulv2siv2di3 (dest
, op1
, op2
);
40704 rtx s1
, s2
, t0
, t1
, t2
;
40706 /* The easiest way to implement this without PMULDQ is to go through
40707 the motions as if we are performing a full 64-bit multiply. With
40708 the exception that we need to do less shuffling of the elements. */
40710 /* Compute the sign-extension, aka highparts, of the two operands. */
40711 s1
= ix86_expand_sse_cmp (gen_reg_rtx (mode
), GT
, CONST0_RTX (mode
),
40712 op1
, pc_rtx
, pc_rtx
);
40713 s2
= ix86_expand_sse_cmp (gen_reg_rtx (mode
), GT
, CONST0_RTX (mode
),
40714 op2
, pc_rtx
, pc_rtx
);
40716 /* Multiply LO(A) * HI(B), and vice-versa. */
40717 t1
= gen_reg_rtx (wmode
);
40718 t2
= gen_reg_rtx (wmode
);
40719 emit_insn (gen_vec_widen_umult_even_v4si (t1
, s1
, op2
));
40720 emit_insn (gen_vec_widen_umult_even_v4si (t2
, s2
, op1
));
40722 /* Multiply LO(A) * LO(B). */
40723 t0
= gen_reg_rtx (wmode
);
40724 emit_insn (gen_vec_widen_umult_even_v4si (t0
, op1
, op2
));
40726 /* Combine and shift the highparts into place. */
40727 t1
= expand_binop (wmode
, add_optab
, t1
, t2
, t1
, 1, OPTAB_DIRECT
);
40728 t1
= expand_binop (wmode
, ashl_optab
, t1
, GEN_INT (32), t1
,
40731 /* Combine high and low parts. */
40732 force_expand_binop (wmode
, add_optab
, t0
, t1
, dest
, 1, OPTAB_DIRECT
);
40739 ix86_expand_mul_widen_hilo (rtx dest
, rtx op1
, rtx op2
,
40740 bool uns_p
, bool high_p
)
40742 enum machine_mode wmode
= GET_MODE (dest
);
40743 enum machine_mode mode
= GET_MODE (op1
);
40744 rtx t1
, t2
, t3
, t4
, mask
;
40749 t1
= gen_reg_rtx (mode
);
40750 t2
= gen_reg_rtx (mode
);
40751 if (TARGET_XOP
&& !uns_p
)
40753 /* With XOP, we have pmacsdqh, aka mul_widen_odd. In this case,
40754 shuffle the elements once so that all elements are in the right
40755 place for immediate use: { A C B D }. */
40756 emit_insn (gen_sse2_pshufd_1 (t1
, op1
, const0_rtx
, const2_rtx
,
40757 const1_rtx
, GEN_INT (3)));
40758 emit_insn (gen_sse2_pshufd_1 (t2
, op2
, const0_rtx
, const2_rtx
,
40759 const1_rtx
, GEN_INT (3)));
40763 /* Put the elements into place for the multiply. */
40764 ix86_expand_vec_interleave (t1
, op1
, op1
, high_p
);
40765 ix86_expand_vec_interleave (t2
, op2
, op2
, high_p
);
40768 ix86_expand_mul_widen_evenodd (dest
, t1
, t2
, uns_p
, high_p
);
40772 /* Shuffle the elements between the lanes. After this we
40773 have { A B E F | C D G H } for each operand. */
40774 t1
= gen_reg_rtx (V4DImode
);
40775 t2
= gen_reg_rtx (V4DImode
);
40776 emit_insn (gen_avx2_permv4di_1 (t1
, gen_lowpart (V4DImode
, op1
),
40777 const0_rtx
, const2_rtx
,
40778 const1_rtx
, GEN_INT (3)));
40779 emit_insn (gen_avx2_permv4di_1 (t2
, gen_lowpart (V4DImode
, op2
),
40780 const0_rtx
, const2_rtx
,
40781 const1_rtx
, GEN_INT (3)));
40783 /* Shuffle the elements within the lanes. After this we
40784 have { A A B B | C C D D } or { E E F F | G G H H }. */
40785 t3
= gen_reg_rtx (V8SImode
);
40786 t4
= gen_reg_rtx (V8SImode
);
40787 mask
= GEN_INT (high_p
40788 ? 2 + (2 << 2) + (3 << 4) + (3 << 6)
40789 : 0 + (0 << 2) + (1 << 4) + (1 << 6));
40790 emit_insn (gen_avx2_pshufdv3 (t3
, gen_lowpart (V8SImode
, t1
), mask
));
40791 emit_insn (gen_avx2_pshufdv3 (t4
, gen_lowpart (V8SImode
, t2
), mask
));
40793 ix86_expand_mul_widen_evenodd (dest
, t3
, t4
, uns_p
, false);
40798 t1
= expand_binop (mode
, smul_optab
, op1
, op2
, NULL_RTX
,
40799 uns_p
, OPTAB_DIRECT
);
40800 t2
= expand_binop (mode
,
40801 uns_p
? umul_highpart_optab
: smul_highpart_optab
,
40802 op1
, op2
, NULL_RTX
, uns_p
, OPTAB_DIRECT
);
40803 gcc_assert (t1
&& t2
);
40805 ix86_expand_vec_interleave (gen_lowpart (mode
, dest
), t1
, t2
, high_p
);
40810 t1
= gen_reg_rtx (wmode
);
40811 t2
= gen_reg_rtx (wmode
);
40812 ix86_expand_sse_unpack (t1
, op1
, uns_p
, high_p
);
40813 ix86_expand_sse_unpack (t2
, op2
, uns_p
, high_p
);
40815 emit_insn (gen_rtx_SET (VOIDmode
, dest
, gen_rtx_MULT (wmode
, t1
, t2
)));
40819 gcc_unreachable ();
40824 ix86_expand_sse2_mulv4si3 (rtx op0
, rtx op1
, rtx op2
)
40828 res_1
= gen_reg_rtx (V4SImode
);
40829 res_2
= gen_reg_rtx (V4SImode
);
40830 ix86_expand_mul_widen_evenodd (gen_lowpart (V2DImode
, res_1
),
40831 op1
, op2
, true, false);
40832 ix86_expand_mul_widen_evenodd (gen_lowpart (V2DImode
, res_2
),
40833 op1
, op2
, true, true);
40835 /* Move the results in element 2 down to element 1; we don't care
40836 what goes in elements 2 and 3. Then we can merge the parts
40837 back together with an interleave.
40839 Note that two other sequences were tried:
40840 (1) Use interleaves at the start instead of psrldq, which allows
40841 us to use a single shufps to merge things back at the end.
40842 (2) Use shufps here to combine the two vectors, then pshufd to
40843 put the elements in the correct order.
40844 In both cases the cost of the reformatting stall was too high
40845 and the overall sequence slower. */
40847 emit_insn (gen_sse2_pshufd_1 (res_1
, res_1
, const0_rtx
, const2_rtx
,
40848 const0_rtx
, const0_rtx
));
40849 emit_insn (gen_sse2_pshufd_1 (res_2
, res_2
, const0_rtx
, const2_rtx
,
40850 const0_rtx
, const0_rtx
));
40851 res_1
= emit_insn (gen_vec_interleave_lowv4si (op0
, res_1
, res_2
));
40853 set_unique_reg_note (res_1
, REG_EQUAL
, gen_rtx_MULT (V4SImode
, op1
, op2
));
40857 ix86_expand_sse2_mulvxdi3 (rtx op0
, rtx op1
, rtx op2
)
40859 enum machine_mode mode
= GET_MODE (op0
);
40860 rtx t1
, t2
, t3
, t4
, t5
, t6
;
40862 if (TARGET_XOP
&& mode
== V2DImode
)
40864 /* op1: A,B,C,D, op2: E,F,G,H */
40865 op1
= gen_lowpart (V4SImode
, op1
);
40866 op2
= gen_lowpart (V4SImode
, op2
);
40868 t1
= gen_reg_rtx (V4SImode
);
40869 t2
= gen_reg_rtx (V4SImode
);
40870 t3
= gen_reg_rtx (V2DImode
);
40871 t4
= gen_reg_rtx (V2DImode
);
40874 emit_insn (gen_sse2_pshufd_1 (t1
, op1
,
40880 /* t2: (B*E),(A*F),(D*G),(C*H) */
40881 emit_insn (gen_mulv4si3 (t2
, t1
, op2
));
40883 /* t3: (B*E)+(A*F), (D*G)+(C*H) */
40884 emit_insn (gen_xop_phadddq (t3
, t2
));
40886 /* t4: ((B*E)+(A*F))<<32, ((D*G)+(C*H))<<32 */
40887 emit_insn (gen_ashlv2di3 (t4
, t3
, GEN_INT (32)));
40889 /* op0: (((B*E)+(A*F))<<32)+(B*F), (((D*G)+(C*H))<<32)+(D*H) */
40890 emit_insn (gen_xop_pmacsdql (op0
, op1
, op2
, t4
));
40894 enum machine_mode nmode
;
40895 rtx (*umul
) (rtx
, rtx
, rtx
);
40897 if (mode
== V2DImode
)
40899 umul
= gen_vec_widen_umult_even_v4si
;
40902 else if (mode
== V4DImode
)
40904 umul
= gen_vec_widen_umult_even_v8si
;
40908 gcc_unreachable ();
40911 /* Multiply low parts. */
40912 t1
= gen_reg_rtx (mode
);
40913 emit_insn (umul (t1
, gen_lowpart (nmode
, op1
), gen_lowpart (nmode
, op2
)));
40915 /* Shift input vectors right 32 bits so we can multiply high parts. */
40917 t2
= expand_binop (mode
, lshr_optab
, op1
, t6
, NULL
, 1, OPTAB_DIRECT
);
40918 t3
= expand_binop (mode
, lshr_optab
, op2
, t6
, NULL
, 1, OPTAB_DIRECT
);
40920 /* Multiply high parts by low parts. */
40921 t4
= gen_reg_rtx (mode
);
40922 t5
= gen_reg_rtx (mode
);
40923 emit_insn (umul (t4
, gen_lowpart (nmode
, t2
), gen_lowpart (nmode
, op2
)));
40924 emit_insn (umul (t5
, gen_lowpart (nmode
, t3
), gen_lowpart (nmode
, op1
)));
40926 /* Combine and shift the highparts back. */
40927 t4
= expand_binop (mode
, add_optab
, t4
, t5
, t4
, 1, OPTAB_DIRECT
);
40928 t4
= expand_binop (mode
, ashl_optab
, t4
, t6
, t4
, 1, OPTAB_DIRECT
);
40930 /* Combine high and low parts. */
40931 force_expand_binop (mode
, add_optab
, t1
, t4
, op0
, 1, OPTAB_DIRECT
);
40934 set_unique_reg_note (get_last_insn (), REG_EQUAL
,
40935 gen_rtx_MULT (mode
, op1
, op2
));
40938 /* Expand an insert into a vector register through pinsr insn.
40939 Return true if successful. */
40942 ix86_expand_pinsr (rtx
*operands
)
40944 rtx dst
= operands
[0];
40945 rtx src
= operands
[3];
40947 unsigned int size
= INTVAL (operands
[1]);
40948 unsigned int pos
= INTVAL (operands
[2]);
40950 if (GET_CODE (dst
) == SUBREG
)
40952 pos
+= SUBREG_BYTE (dst
) * BITS_PER_UNIT
;
40953 dst
= SUBREG_REG (dst
);
40956 if (GET_CODE (src
) == SUBREG
)
40957 src
= SUBREG_REG (src
);
40959 switch (GET_MODE (dst
))
40966 enum machine_mode srcmode
, dstmode
;
40967 rtx (*pinsr
)(rtx
, rtx
, rtx
, rtx
);
40969 srcmode
= mode_for_size (size
, MODE_INT
, 0);
40974 if (!TARGET_SSE4_1
)
40976 dstmode
= V16QImode
;
40977 pinsr
= gen_sse4_1_pinsrb
;
40983 dstmode
= V8HImode
;
40984 pinsr
= gen_sse2_pinsrw
;
40988 if (!TARGET_SSE4_1
)
40990 dstmode
= V4SImode
;
40991 pinsr
= gen_sse4_1_pinsrd
;
40995 gcc_assert (TARGET_64BIT
);
40996 if (!TARGET_SSE4_1
)
40998 dstmode
= V2DImode
;
40999 pinsr
= gen_sse4_1_pinsrq
;
41006 dst
= gen_lowpart (dstmode
, dst
);
41007 src
= gen_lowpart (srcmode
, src
);
41011 emit_insn (pinsr (dst
, dst
, src
, GEN_INT (1 << pos
)));
41020 /* This function returns the calling abi specific va_list type node.
41021 It returns the FNDECL specific va_list type. */
41024 ix86_fn_abi_va_list (tree fndecl
)
41027 return va_list_type_node
;
41028 gcc_assert (fndecl
!= NULL_TREE
);
41030 if (ix86_function_abi ((const_tree
) fndecl
) == MS_ABI
)
41031 return ms_va_list_type_node
;
41033 return sysv_va_list_type_node
;
41036 /* Returns the canonical va_list type specified by TYPE. If there
41037 is no valid TYPE provided, it return NULL_TREE. */
41040 ix86_canonical_va_list_type (tree type
)
41044 /* Resolve references and pointers to va_list type. */
41045 if (TREE_CODE (type
) == MEM_REF
)
41046 type
= TREE_TYPE (type
);
41047 else if (POINTER_TYPE_P (type
) && POINTER_TYPE_P (TREE_TYPE(type
)))
41048 type
= TREE_TYPE (type
);
41049 else if (POINTER_TYPE_P (type
) && TREE_CODE (TREE_TYPE (type
)) == ARRAY_TYPE
)
41050 type
= TREE_TYPE (type
);
41052 if (TARGET_64BIT
&& va_list_type_node
!= NULL_TREE
)
41054 wtype
= va_list_type_node
;
41055 gcc_assert (wtype
!= NULL_TREE
);
41057 if (TREE_CODE (wtype
) == ARRAY_TYPE
)
41059 /* If va_list is an array type, the argument may have decayed
41060 to a pointer type, e.g. by being passed to another function.
41061 In that case, unwrap both types so that we can compare the
41062 underlying records. */
41063 if (TREE_CODE (htype
) == ARRAY_TYPE
41064 || POINTER_TYPE_P (htype
))
41066 wtype
= TREE_TYPE (wtype
);
41067 htype
= TREE_TYPE (htype
);
41070 if (TYPE_MAIN_VARIANT (wtype
) == TYPE_MAIN_VARIANT (htype
))
41071 return va_list_type_node
;
41072 wtype
= sysv_va_list_type_node
;
41073 gcc_assert (wtype
!= NULL_TREE
);
41075 if (TREE_CODE (wtype
) == ARRAY_TYPE
)
41077 /* If va_list is an array type, the argument may have decayed
41078 to a pointer type, e.g. by being passed to another function.
41079 In that case, unwrap both types so that we can compare the
41080 underlying records. */
41081 if (TREE_CODE (htype
) == ARRAY_TYPE
41082 || POINTER_TYPE_P (htype
))
41084 wtype
= TREE_TYPE (wtype
);
41085 htype
= TREE_TYPE (htype
);
41088 if (TYPE_MAIN_VARIANT (wtype
) == TYPE_MAIN_VARIANT (htype
))
41089 return sysv_va_list_type_node
;
41090 wtype
= ms_va_list_type_node
;
41091 gcc_assert (wtype
!= NULL_TREE
);
41093 if (TREE_CODE (wtype
) == ARRAY_TYPE
)
41095 /* If va_list is an array type, the argument may have decayed
41096 to a pointer type, e.g. by being passed to another function.
41097 In that case, unwrap both types so that we can compare the
41098 underlying records. */
41099 if (TREE_CODE (htype
) == ARRAY_TYPE
41100 || POINTER_TYPE_P (htype
))
41102 wtype
= TREE_TYPE (wtype
);
41103 htype
= TREE_TYPE (htype
);
41106 if (TYPE_MAIN_VARIANT (wtype
) == TYPE_MAIN_VARIANT (htype
))
41107 return ms_va_list_type_node
;
41110 return std_canonical_va_list_type (type
);
41113 /* Iterate through the target-specific builtin types for va_list.
41114 IDX denotes the iterator, *PTREE is set to the result type of
41115 the va_list builtin, and *PNAME to its internal type.
41116 Returns zero if there is no element for this index, otherwise
41117 IDX should be increased upon the next call.
41118 Note, do not iterate a base builtin's name like __builtin_va_list.
41119 Used from c_common_nodes_and_builtins. */
41122 ix86_enum_va_list (int idx
, const char **pname
, tree
*ptree
)
41132 *ptree
= ms_va_list_type_node
;
41133 *pname
= "__builtin_ms_va_list";
41137 *ptree
= sysv_va_list_type_node
;
41138 *pname
= "__builtin_sysv_va_list";
41146 #undef TARGET_SCHED_DISPATCH
41147 #define TARGET_SCHED_DISPATCH has_dispatch
41148 #undef TARGET_SCHED_DISPATCH_DO
41149 #define TARGET_SCHED_DISPATCH_DO do_dispatch
41150 #undef TARGET_SCHED_REASSOCIATION_WIDTH
41151 #define TARGET_SCHED_REASSOCIATION_WIDTH ix86_reassociation_width
41152 #undef TARGET_SCHED_REORDER
41153 #define TARGET_SCHED_REORDER ix86_sched_reorder
41154 #undef TARGET_SCHED_ADJUST_PRIORITY
41155 #define TARGET_SCHED_ADJUST_PRIORITY ix86_adjust_priority
41156 #undef TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK
41157 #define TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK ix86_dependencies_evaluation_hook
41159 /* The size of the dispatch window is the total number of bytes of
41160 object code allowed in a window. */
41161 #define DISPATCH_WINDOW_SIZE 16
41163 /* Number of dispatch windows considered for scheduling. */
41164 #define MAX_DISPATCH_WINDOWS 3
41166 /* Maximum number of instructions in a window. */
41169 /* Maximum number of immediate operands in a window. */
41172 /* Maximum number of immediate bits allowed in a window. */
41173 #define MAX_IMM_SIZE 128
41175 /* Maximum number of 32 bit immediates allowed in a window. */
41176 #define MAX_IMM_32 4
41178 /* Maximum number of 64 bit immediates allowed in a window. */
41179 #define MAX_IMM_64 2
41181 /* Maximum total of loads or prefetches allowed in a window. */
41184 /* Maximum total of stores allowed in a window. */
41185 #define MAX_STORE 1
41191 /* Dispatch groups. Istructions that affect the mix in a dispatch window. */
41192 enum dispatch_group
{
41207 /* Number of allowable groups in a dispatch window. It is an array
41208 indexed by dispatch_group enum. 100 is used as a big number,
41209 because the number of these kind of operations does not have any
41210 effect in dispatch window, but we need them for other reasons in
41212 static unsigned int num_allowable_groups
[disp_last
] = {
41213 0, 2, 1, 1, 2, 4, 4, 2, 1, BIG
, BIG
41216 char group_name
[disp_last
+ 1][16] = {
41217 "disp_no_group", "disp_load", "disp_store", "disp_load_store",
41218 "disp_prefetch", "disp_imm", "disp_imm_32", "disp_imm_64",
41219 "disp_branch", "disp_cmp", "disp_jcc", "disp_last"
41222 /* Instruction path. */
41225 path_single
, /* Single micro op. */
41226 path_double
, /* Double micro op. */
41227 path_multi
, /* Instructions with more than 2 micro op.. */
41231 /* sched_insn_info defines a window to the instructions scheduled in
41232 the basic block. It contains a pointer to the insn_info table and
41233 the instruction scheduled.
41235 Windows are allocated for each basic block and are linked
41237 typedef struct sched_insn_info_s
{
41239 enum dispatch_group group
;
41240 enum insn_path path
;
41245 /* Linked list of dispatch windows. This is a two way list of
41246 dispatch windows of a basic block. It contains information about
41247 the number of uops in the window and the total number of
41248 instructions and of bytes in the object code for this dispatch
41250 typedef struct dispatch_windows_s
{
41251 int num_insn
; /* Number of insn in the window. */
41252 int num_uops
; /* Number of uops in the window. */
41253 int window_size
; /* Number of bytes in the window. */
41254 int window_num
; /* Window number between 0 or 1. */
41255 int num_imm
; /* Number of immediates in an insn. */
41256 int num_imm_32
; /* Number of 32 bit immediates in an insn. */
41257 int num_imm_64
; /* Number of 64 bit immediates in an insn. */
41258 int imm_size
; /* Total immediates in the window. */
41259 int num_loads
; /* Total memory loads in the window. */
41260 int num_stores
; /* Total memory stores in the window. */
41261 int violation
; /* Violation exists in window. */
41262 sched_insn_info
*window
; /* Pointer to the window. */
41263 struct dispatch_windows_s
*next
;
41264 struct dispatch_windows_s
*prev
;
41265 } dispatch_windows
;
41267 /* Immediate valuse used in an insn. */
41268 typedef struct imm_info_s
41275 static dispatch_windows
*dispatch_window_list
;
41276 static dispatch_windows
*dispatch_window_list1
;
41278 /* Get dispatch group of insn. */
41280 static enum dispatch_group
41281 get_mem_group (rtx insn
)
41283 enum attr_memory memory
;
41285 if (INSN_CODE (insn
) < 0)
41286 return disp_no_group
;
41287 memory
= get_attr_memory (insn
);
41288 if (memory
== MEMORY_STORE
)
41291 if (memory
== MEMORY_LOAD
)
41294 if (memory
== MEMORY_BOTH
)
41295 return disp_load_store
;
41297 return disp_no_group
;
41300 /* Return true if insn is a compare instruction. */
41305 enum attr_type type
;
41307 type
= get_attr_type (insn
);
41308 return (type
== TYPE_TEST
41309 || type
== TYPE_ICMP
41310 || type
== TYPE_FCMP
41311 || GET_CODE (PATTERN (insn
)) == COMPARE
);
41314 /* Return true if a dispatch violation encountered. */
41317 dispatch_violation (void)
41319 if (dispatch_window_list
->next
)
41320 return dispatch_window_list
->next
->violation
;
41321 return dispatch_window_list
->violation
;
41324 /* Return true if insn is a branch instruction. */
41327 is_branch (rtx insn
)
41329 return (CALL_P (insn
) || JUMP_P (insn
));
41332 /* Return true if insn is a prefetch instruction. */
41335 is_prefetch (rtx insn
)
41337 return NONJUMP_INSN_P (insn
) && GET_CODE (PATTERN (insn
)) == PREFETCH
;
41340 /* This function initializes a dispatch window and the list container holding a
41341 pointer to the window. */
41344 init_window (int window_num
)
41347 dispatch_windows
*new_list
;
41349 if (window_num
== 0)
41350 new_list
= dispatch_window_list
;
41352 new_list
= dispatch_window_list1
;
41354 new_list
->num_insn
= 0;
41355 new_list
->num_uops
= 0;
41356 new_list
->window_size
= 0;
41357 new_list
->next
= NULL
;
41358 new_list
->prev
= NULL
;
41359 new_list
->window_num
= window_num
;
41360 new_list
->num_imm
= 0;
41361 new_list
->num_imm_32
= 0;
41362 new_list
->num_imm_64
= 0;
41363 new_list
->imm_size
= 0;
41364 new_list
->num_loads
= 0;
41365 new_list
->num_stores
= 0;
41366 new_list
->violation
= false;
41368 for (i
= 0; i
< MAX_INSN
; i
++)
41370 new_list
->window
[i
].insn
= NULL
;
41371 new_list
->window
[i
].group
= disp_no_group
;
41372 new_list
->window
[i
].path
= no_path
;
41373 new_list
->window
[i
].byte_len
= 0;
41374 new_list
->window
[i
].imm_bytes
= 0;
41379 /* This function allocates and initializes a dispatch window and the
41380 list container holding a pointer to the window. */
41382 static dispatch_windows
*
41383 allocate_window (void)
41385 dispatch_windows
*new_list
= XNEW (struct dispatch_windows_s
);
41386 new_list
->window
= XNEWVEC (struct sched_insn_info_s
, MAX_INSN
+ 1);
41391 /* This routine initializes the dispatch scheduling information. It
41392 initiates building dispatch scheduler tables and constructs the
41393 first dispatch window. */
41396 init_dispatch_sched (void)
41398 /* Allocate a dispatch list and a window. */
41399 dispatch_window_list
= allocate_window ();
41400 dispatch_window_list1
= allocate_window ();
41405 /* This function returns true if a branch is detected. End of a basic block
41406 does not have to be a branch, but here we assume only branches end a
41410 is_end_basic_block (enum dispatch_group group
)
41412 return group
== disp_branch
;
41415 /* This function is called when the end of a window processing is reached. */
41418 process_end_window (void)
41420 gcc_assert (dispatch_window_list
->num_insn
<= MAX_INSN
);
41421 if (dispatch_window_list
->next
)
41423 gcc_assert (dispatch_window_list1
->num_insn
<= MAX_INSN
);
41424 gcc_assert (dispatch_window_list
->window_size
41425 + dispatch_window_list1
->window_size
<= 48);
41431 /* Allocates a new dispatch window and adds it to WINDOW_LIST.
41432 WINDOW_NUM is either 0 or 1. A maximum of two windows are generated
41433 for 48 bytes of instructions. Note that these windows are not dispatch
41434 windows that their sizes are DISPATCH_WINDOW_SIZE. */
41436 static dispatch_windows
*
41437 allocate_next_window (int window_num
)
41439 if (window_num
== 0)
41441 if (dispatch_window_list
->next
)
41444 return dispatch_window_list
;
41447 dispatch_window_list
->next
= dispatch_window_list1
;
41448 dispatch_window_list1
->prev
= dispatch_window_list
;
41450 return dispatch_window_list1
;
41453 /* Increment the number of immediate operands of an instruction. */
41456 find_constant_1 (rtx
*in_rtx
, imm_info
*imm_values
)
41461 switch ( GET_CODE (*in_rtx
))
41466 (imm_values
->imm
)++;
41467 if (x86_64_immediate_operand (*in_rtx
, SImode
))
41468 (imm_values
->imm32
)++;
41470 (imm_values
->imm64
)++;
41474 (imm_values
->imm
)++;
41475 (imm_values
->imm64
)++;
41479 if (LABEL_KIND (*in_rtx
) == LABEL_NORMAL
)
41481 (imm_values
->imm
)++;
41482 (imm_values
->imm32
)++;
41493 /* Compute number of immediate operands of an instruction. */
41496 find_constant (rtx in_rtx
, imm_info
*imm_values
)
41498 for_each_rtx (INSN_P (in_rtx
) ? &PATTERN (in_rtx
) : &in_rtx
,
41499 (rtx_function
) find_constant_1
, (void *) imm_values
);
41502 /* Return total size of immediate operands of an instruction along with number
41503 of corresponding immediate-operands. It initializes its parameters to zero
41504 befor calling FIND_CONSTANT.
41505 INSN is the input instruction. IMM is the total of immediates.
41506 IMM32 is the number of 32 bit immediates. IMM64 is the number of 64
41510 get_num_immediates (rtx insn
, int *imm
, int *imm32
, int *imm64
)
41512 imm_info imm_values
= {0, 0, 0};
41514 find_constant (insn
, &imm_values
);
41515 *imm
= imm_values
.imm
;
41516 *imm32
= imm_values
.imm32
;
41517 *imm64
= imm_values
.imm64
;
41518 return imm_values
.imm32
* 4 + imm_values
.imm64
* 8;
41521 /* This function indicates if an operand of an instruction is an
41525 has_immediate (rtx insn
)
41527 int num_imm_operand
;
41528 int num_imm32_operand
;
41529 int num_imm64_operand
;
41532 return get_num_immediates (insn
, &num_imm_operand
, &num_imm32_operand
,
41533 &num_imm64_operand
);
41537 /* Return single or double path for instructions. */
41539 static enum insn_path
41540 get_insn_path (rtx insn
)
41542 enum attr_amdfam10_decode path
= get_attr_amdfam10_decode (insn
);
41544 if ((int)path
== 0)
41545 return path_single
;
41547 if ((int)path
== 1)
41548 return path_double
;
41553 /* Return insn dispatch group. */
41555 static enum dispatch_group
41556 get_insn_group (rtx insn
)
41558 enum dispatch_group group
= get_mem_group (insn
);
41562 if (is_branch (insn
))
41563 return disp_branch
;
41568 if (has_immediate (insn
))
41571 if (is_prefetch (insn
))
41572 return disp_prefetch
;
41574 return disp_no_group
;
41577 /* Count number of GROUP restricted instructions in a dispatch
41578 window WINDOW_LIST. */
41581 count_num_restricted (rtx insn
, dispatch_windows
*window_list
)
41583 enum dispatch_group group
= get_insn_group (insn
);
41585 int num_imm_operand
;
41586 int num_imm32_operand
;
41587 int num_imm64_operand
;
41589 if (group
== disp_no_group
)
41592 if (group
== disp_imm
)
41594 imm_size
= get_num_immediates (insn
, &num_imm_operand
, &num_imm32_operand
,
41595 &num_imm64_operand
);
41596 if (window_list
->imm_size
+ imm_size
> MAX_IMM_SIZE
41597 || num_imm_operand
+ window_list
->num_imm
> MAX_IMM
41598 || (num_imm32_operand
> 0
41599 && (window_list
->num_imm_32
+ num_imm32_operand
> MAX_IMM_32
41600 || window_list
->num_imm_64
* 2 + num_imm32_operand
> MAX_IMM_32
))
41601 || (num_imm64_operand
> 0
41602 && (window_list
->num_imm_64
+ num_imm64_operand
> MAX_IMM_64
41603 || window_list
->num_imm_32
+ num_imm64_operand
* 2 > MAX_IMM_32
))
41604 || (window_list
->imm_size
+ imm_size
== MAX_IMM_SIZE
41605 && num_imm64_operand
> 0
41606 && ((window_list
->num_imm_64
> 0
41607 && window_list
->num_insn
>= 2)
41608 || window_list
->num_insn
>= 3)))
41614 if ((group
== disp_load_store
41615 && (window_list
->num_loads
>= MAX_LOAD
41616 || window_list
->num_stores
>= MAX_STORE
))
41617 || ((group
== disp_load
41618 || group
== disp_prefetch
)
41619 && window_list
->num_loads
>= MAX_LOAD
)
41620 || (group
== disp_store
41621 && window_list
->num_stores
>= MAX_STORE
))
41627 /* This function returns true if insn satisfies dispatch rules on the
41628 last window scheduled. */
41631 fits_dispatch_window (rtx insn
)
41633 dispatch_windows
*window_list
= dispatch_window_list
;
41634 dispatch_windows
*window_list_next
= dispatch_window_list
->next
;
41635 unsigned int num_restrict
;
41636 enum dispatch_group group
= get_insn_group (insn
);
41637 enum insn_path path
= get_insn_path (insn
);
41640 /* Make disp_cmp and disp_jcc get scheduled at the latest. These
41641 instructions should be given the lowest priority in the
41642 scheduling process in Haifa scheduler to make sure they will be
41643 scheduled in the same dispatch window as the reference to them. */
41644 if (group
== disp_jcc
|| group
== disp_cmp
)
41647 /* Check nonrestricted. */
41648 if (group
== disp_no_group
|| group
== disp_branch
)
41651 /* Get last dispatch window. */
41652 if (window_list_next
)
41653 window_list
= window_list_next
;
41655 if (window_list
->window_num
== 1)
41657 sum
= window_list
->prev
->window_size
+ window_list
->window_size
;
41660 || (min_insn_size (insn
) + sum
) >= 48)
41661 /* Window 1 is full. Go for next window. */
41665 num_restrict
= count_num_restricted (insn
, window_list
);
41667 if (num_restrict
> num_allowable_groups
[group
])
41670 /* See if it fits in the first window. */
41671 if (window_list
->window_num
== 0)
41673 /* The first widow should have only single and double path
41675 if (path
== path_double
41676 && (window_list
->num_uops
+ 2) > MAX_INSN
)
41678 else if (path
!= path_single
)
41684 /* Add an instruction INSN with NUM_UOPS micro-operations to the
41685 dispatch window WINDOW_LIST. */
41688 add_insn_window (rtx insn
, dispatch_windows
*window_list
, int num_uops
)
41690 int byte_len
= min_insn_size (insn
);
41691 int num_insn
= window_list
->num_insn
;
41693 sched_insn_info
*window
= window_list
->window
;
41694 enum dispatch_group group
= get_insn_group (insn
);
41695 enum insn_path path
= get_insn_path (insn
);
41696 int num_imm_operand
;
41697 int num_imm32_operand
;
41698 int num_imm64_operand
;
41700 if (!window_list
->violation
&& group
!= disp_cmp
41701 && !fits_dispatch_window (insn
))
41702 window_list
->violation
= true;
41704 imm_size
= get_num_immediates (insn
, &num_imm_operand
, &num_imm32_operand
,
41705 &num_imm64_operand
);
41707 /* Initialize window with new instruction. */
41708 window
[num_insn
].insn
= insn
;
41709 window
[num_insn
].byte_len
= byte_len
;
41710 window
[num_insn
].group
= group
;
41711 window
[num_insn
].path
= path
;
41712 window
[num_insn
].imm_bytes
= imm_size
;
41714 window_list
->window_size
+= byte_len
;
41715 window_list
->num_insn
= num_insn
+ 1;
41716 window_list
->num_uops
= window_list
->num_uops
+ num_uops
;
41717 window_list
->imm_size
+= imm_size
;
41718 window_list
->num_imm
+= num_imm_operand
;
41719 window_list
->num_imm_32
+= num_imm32_operand
;
41720 window_list
->num_imm_64
+= num_imm64_operand
;
41722 if (group
== disp_store
)
41723 window_list
->num_stores
+= 1;
41724 else if (group
== disp_load
41725 || group
== disp_prefetch
)
41726 window_list
->num_loads
+= 1;
41727 else if (group
== disp_load_store
)
41729 window_list
->num_stores
+= 1;
41730 window_list
->num_loads
+= 1;
41734 /* Adds a scheduled instruction, INSN, to the current dispatch window.
41735 If the total bytes of instructions or the number of instructions in
41736 the window exceed allowable, it allocates a new window. */
41739 add_to_dispatch_window (rtx insn
)
41742 dispatch_windows
*window_list
;
41743 dispatch_windows
*next_list
;
41744 dispatch_windows
*window0_list
;
41745 enum insn_path path
;
41746 enum dispatch_group insn_group
;
41754 if (INSN_CODE (insn
) < 0)
41757 byte_len
= min_insn_size (insn
);
41758 window_list
= dispatch_window_list
;
41759 next_list
= window_list
->next
;
41760 path
= get_insn_path (insn
);
41761 insn_group
= get_insn_group (insn
);
41763 /* Get the last dispatch window. */
41765 window_list
= dispatch_window_list
->next
;
41767 if (path
== path_single
)
41769 else if (path
== path_double
)
41772 insn_num_uops
= (int) path
;
41774 /* If current window is full, get a new window.
41775 Window number zero is full, if MAX_INSN uops are scheduled in it.
41776 Window number one is full, if window zero's bytes plus window
41777 one's bytes is 32, or if the bytes of the new instruction added
41778 to the total makes it greater than 48, or it has already MAX_INSN
41779 instructions in it. */
41780 num_insn
= window_list
->num_insn
;
41781 num_uops
= window_list
->num_uops
;
41782 window_num
= window_list
->window_num
;
41783 insn_fits
= fits_dispatch_window (insn
);
41785 if (num_insn
>= MAX_INSN
41786 || num_uops
+ insn_num_uops
> MAX_INSN
41789 window_num
= ~window_num
& 1;
41790 window_list
= allocate_next_window (window_num
);
41793 if (window_num
== 0)
41795 add_insn_window (insn
, window_list
, insn_num_uops
);
41796 if (window_list
->num_insn
>= MAX_INSN
41797 && insn_group
== disp_branch
)
41799 process_end_window ();
41803 else if (window_num
== 1)
41805 window0_list
= window_list
->prev
;
41806 sum
= window0_list
->window_size
+ window_list
->window_size
;
41808 || (byte_len
+ sum
) >= 48)
41810 process_end_window ();
41811 window_list
= dispatch_window_list
;
41814 add_insn_window (insn
, window_list
, insn_num_uops
);
41817 gcc_unreachable ();
41819 if (is_end_basic_block (insn_group
))
41821 /* End of basic block is reached do end-basic-block process. */
41822 process_end_window ();
41827 /* Print the dispatch window, WINDOW_NUM, to FILE. */
41829 DEBUG_FUNCTION
static void
41830 debug_dispatch_window_file (FILE *file
, int window_num
)
41832 dispatch_windows
*list
;
41835 if (window_num
== 0)
41836 list
= dispatch_window_list
;
41838 list
= dispatch_window_list1
;
41840 fprintf (file
, "Window #%d:\n", list
->window_num
);
41841 fprintf (file
, " num_insn = %d, num_uops = %d, window_size = %d\n",
41842 list
->num_insn
, list
->num_uops
, list
->window_size
);
41843 fprintf (file
, " num_imm = %d, num_imm_32 = %d, num_imm_64 = %d, imm_size = %d\n",
41844 list
->num_imm
, list
->num_imm_32
, list
->num_imm_64
, list
->imm_size
);
41846 fprintf (file
, " num_loads = %d, num_stores = %d\n", list
->num_loads
,
41848 fprintf (file
, " insn info:\n");
41850 for (i
= 0; i
< MAX_INSN
; i
++)
41852 if (!list
->window
[i
].insn
)
41854 fprintf (file
, " group[%d] = %s, insn[%d] = %p, path[%d] = %d byte_len[%d] = %d, imm_bytes[%d] = %d\n",
41855 i
, group_name
[list
->window
[i
].group
],
41856 i
, (void *)list
->window
[i
].insn
,
41857 i
, list
->window
[i
].path
,
41858 i
, list
->window
[i
].byte_len
,
41859 i
, list
->window
[i
].imm_bytes
);
41863 /* Print to stdout a dispatch window. */
41865 DEBUG_FUNCTION
void
41866 debug_dispatch_window (int window_num
)
41868 debug_dispatch_window_file (stdout
, window_num
);
41871 /* Print INSN dispatch information to FILE. */
41873 DEBUG_FUNCTION
static void
41874 debug_insn_dispatch_info_file (FILE *file
, rtx insn
)
41877 enum insn_path path
;
41878 enum dispatch_group group
;
41880 int num_imm_operand
;
41881 int num_imm32_operand
;
41882 int num_imm64_operand
;
41884 if (INSN_CODE (insn
) < 0)
41887 byte_len
= min_insn_size (insn
);
41888 path
= get_insn_path (insn
);
41889 group
= get_insn_group (insn
);
41890 imm_size
= get_num_immediates (insn
, &num_imm_operand
, &num_imm32_operand
,
41891 &num_imm64_operand
);
41893 fprintf (file
, " insn info:\n");
41894 fprintf (file
, " group = %s, path = %d, byte_len = %d\n",
41895 group_name
[group
], path
, byte_len
);
41896 fprintf (file
, " num_imm = %d, num_imm_32 = %d, num_imm_64 = %d, imm_size = %d\n",
41897 num_imm_operand
, num_imm32_operand
, num_imm64_operand
, imm_size
);
41900 /* Print to STDERR the status of the ready list with respect to
41901 dispatch windows. */
41903 DEBUG_FUNCTION
void
41904 debug_ready_dispatch (void)
41907 int no_ready
= number_in_ready ();
41909 fprintf (stdout
, "Number of ready: %d\n", no_ready
);
41911 for (i
= 0; i
< no_ready
; i
++)
41912 debug_insn_dispatch_info_file (stdout
, get_ready_element (i
));
41915 /* This routine is the driver of the dispatch scheduler. */
41918 do_dispatch (rtx insn
, int mode
)
41920 if (mode
== DISPATCH_INIT
)
41921 init_dispatch_sched ();
41922 else if (mode
== ADD_TO_DISPATCH_WINDOW
)
41923 add_to_dispatch_window (insn
);
41926 /* Return TRUE if Dispatch Scheduling is supported. */
41929 has_dispatch (rtx insn
, int action
)
41931 if ((TARGET_BDVER1
|| TARGET_BDVER2
|| TARGET_BDVER3
)
41932 && flag_dispatch_scheduler
)
41938 case IS_DISPATCH_ON
:
41943 return is_cmp (insn
);
41945 case DISPATCH_VIOLATION
:
41946 return dispatch_violation ();
41948 case FITS_DISPATCH_WINDOW
:
41949 return fits_dispatch_window (insn
);
41955 /* Implementation of reassociation_width target hook used by
41956 reassoc phase to identify parallelism level in reassociated
41957 tree. Statements tree_code is passed in OPC. Arguments type
41960 Currently parallel reassociation is enabled for Atom
41961 processors only and we set reassociation width to be 2
41962 because Atom may issue up to 2 instructions per cycle.
41964 Return value should be fixed if parallel reassociation is
41965 enabled for other processors. */
41968 ix86_reassociation_width (unsigned int opc ATTRIBUTE_UNUSED
,
41969 enum machine_mode mode
)
41973 if (INTEGRAL_MODE_P (mode
) && TARGET_REASSOC_INT_TO_PARALLEL
)
41975 else if (FLOAT_MODE_P (mode
) && TARGET_REASSOC_FP_TO_PARALLEL
)
41981 /* ??? No autovectorization into MMX or 3DNOW until we can reliably
41982 place emms and femms instructions. */
41984 static enum machine_mode
41985 ix86_preferred_simd_mode (enum machine_mode mode
)
41993 return (TARGET_AVX
&& !TARGET_PREFER_AVX128
) ? V32QImode
: V16QImode
;
41995 return (TARGET_AVX
&& !TARGET_PREFER_AVX128
) ? V16HImode
: V8HImode
;
41997 return (TARGET_AVX
&& !TARGET_PREFER_AVX128
) ? V8SImode
: V4SImode
;
41999 return (TARGET_AVX
&& !TARGET_PREFER_AVX128
) ? V4DImode
: V2DImode
;
42002 if (TARGET_AVX
&& !TARGET_PREFER_AVX128
)
42008 if (!TARGET_VECTORIZE_DOUBLE
)
42010 else if (TARGET_AVX
&& !TARGET_PREFER_AVX128
)
42012 else if (TARGET_SSE2
)
42021 /* If AVX is enabled then try vectorizing with both 256bit and 128bit
42024 static unsigned int
42025 ix86_autovectorize_vector_sizes (void)
42027 return (TARGET_AVX
&& !TARGET_PREFER_AVX128
) ? 32 | 16 : 0;
42032 /* Return class of registers which could be used for pseudo of MODE
42033 and of class RCLASS for spilling instead of memory. Return NO_REGS
42034 if it is not possible or non-profitable. */
42036 ix86_spill_class (reg_class_t rclass
, enum machine_mode mode
)
42038 if (TARGET_SSE
&& TARGET_GENERAL_REGS_SSE_SPILL
&& ! TARGET_MMX
42039 && hard_reg_set_subset_p (reg_class_contents
[rclass
],
42040 reg_class_contents
[GENERAL_REGS
])
42041 && (mode
== SImode
|| (TARGET_64BIT
&& mode
== DImode
)))
42046 /* Implement targetm.vectorize.init_cost. */
42049 ix86_init_cost (struct loop
*loop_info ATTRIBUTE_UNUSED
)
42051 unsigned *cost
= XNEWVEC (unsigned, 3);
42052 cost
[vect_prologue
] = cost
[vect_body
] = cost
[vect_epilogue
] = 0;
42056 /* Implement targetm.vectorize.add_stmt_cost. */
42059 ix86_add_stmt_cost (void *data
, int count
, enum vect_cost_for_stmt kind
,
42060 struct _stmt_vec_info
*stmt_info
, int misalign
,
42061 enum vect_cost_model_location where
)
42063 unsigned *cost
= (unsigned *) data
;
42064 unsigned retval
= 0;
42066 if (flag_vect_cost_model
)
42068 tree vectype
= stmt_info
? stmt_vectype (stmt_info
) : NULL_TREE
;
42069 int stmt_cost
= ix86_builtin_vectorization_cost (kind
, vectype
, misalign
);
42071 /* Statements in an inner loop relative to the loop being
42072 vectorized are weighted more heavily. The value here is
42073 arbitrary and could potentially be improved with analysis. */
42074 if (where
== vect_body
&& stmt_info
&& stmt_in_inner_loop_p (stmt_info
))
42075 count
*= 50; /* FIXME. */
42077 retval
= (unsigned) (count
* stmt_cost
);
42078 cost
[where
] += retval
;
42084 /* Implement targetm.vectorize.finish_cost. */
42087 ix86_finish_cost (void *data
, unsigned *prologue_cost
,
42088 unsigned *body_cost
, unsigned *epilogue_cost
)
42090 unsigned *cost
= (unsigned *) data
;
42091 *prologue_cost
= cost
[vect_prologue
];
42092 *body_cost
= cost
[vect_body
];
42093 *epilogue_cost
= cost
[vect_epilogue
];
42096 /* Implement targetm.vectorize.destroy_cost_data. */
42099 ix86_destroy_cost_data (void *data
)
42104 /* Validate target specific memory model bits in VAL. */
42106 static unsigned HOST_WIDE_INT
42107 ix86_memmodel_check (unsigned HOST_WIDE_INT val
)
42109 unsigned HOST_WIDE_INT model
= val
& MEMMODEL_MASK
;
42110 unsigned HOST_WIDE_INT strong
;
42112 if (val
& ~(unsigned HOST_WIDE_INT
)(IX86_HLE_ACQUIRE
|IX86_HLE_RELEASE
42114 || ((val
& IX86_HLE_ACQUIRE
) && (val
& IX86_HLE_RELEASE
)))
42116 warning (OPT_Winvalid_memory_model
,
42117 "Unknown architecture specific memory model");
42118 return MEMMODEL_SEQ_CST
;
42120 strong
= (model
== MEMMODEL_ACQ_REL
|| model
== MEMMODEL_SEQ_CST
);
42121 if (val
& IX86_HLE_ACQUIRE
&& !(model
== MEMMODEL_ACQUIRE
|| strong
))
42123 warning (OPT_Winvalid_memory_model
,
42124 "HLE_ACQUIRE not used with ACQUIRE or stronger memory model");
42125 return MEMMODEL_SEQ_CST
| IX86_HLE_ACQUIRE
;
42127 if (val
& IX86_HLE_RELEASE
&& !(model
== MEMMODEL_RELEASE
|| strong
))
42129 warning (OPT_Winvalid_memory_model
,
42130 "HLE_RELEASE not used with RELEASE or stronger memory model");
42131 return MEMMODEL_SEQ_CST
| IX86_HLE_RELEASE
;
42136 /* Initialize the GCC target structure. */
42137 #undef TARGET_RETURN_IN_MEMORY
42138 #define TARGET_RETURN_IN_MEMORY ix86_return_in_memory
42140 #undef TARGET_LEGITIMIZE_ADDRESS
42141 #define TARGET_LEGITIMIZE_ADDRESS ix86_legitimize_address
42143 #undef TARGET_ATTRIBUTE_TABLE
42144 #define TARGET_ATTRIBUTE_TABLE ix86_attribute_table
42145 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
42146 # undef TARGET_MERGE_DECL_ATTRIBUTES
42147 # define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
42150 #undef TARGET_COMP_TYPE_ATTRIBUTES
42151 #define TARGET_COMP_TYPE_ATTRIBUTES ix86_comp_type_attributes
42153 #undef TARGET_INIT_BUILTINS
42154 #define TARGET_INIT_BUILTINS ix86_init_builtins
42155 #undef TARGET_BUILTIN_DECL
42156 #define TARGET_BUILTIN_DECL ix86_builtin_decl
42157 #undef TARGET_EXPAND_BUILTIN
42158 #define TARGET_EXPAND_BUILTIN ix86_expand_builtin
42160 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
42161 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
42162 ix86_builtin_vectorized_function
42164 #undef TARGET_VECTORIZE_BUILTIN_TM_LOAD
42165 #define TARGET_VECTORIZE_BUILTIN_TM_LOAD ix86_builtin_tm_load
42167 #undef TARGET_VECTORIZE_BUILTIN_TM_STORE
42168 #define TARGET_VECTORIZE_BUILTIN_TM_STORE ix86_builtin_tm_store
42170 #undef TARGET_VECTORIZE_BUILTIN_GATHER
42171 #define TARGET_VECTORIZE_BUILTIN_GATHER ix86_vectorize_builtin_gather
42173 #undef TARGET_BUILTIN_RECIPROCAL
42174 #define TARGET_BUILTIN_RECIPROCAL ix86_builtin_reciprocal
42176 #undef TARGET_ASM_FUNCTION_EPILOGUE
42177 #define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
42179 #undef TARGET_ENCODE_SECTION_INFO
42180 #ifndef SUBTARGET_ENCODE_SECTION_INFO
42181 #define TARGET_ENCODE_SECTION_INFO ix86_encode_section_info
42183 #define TARGET_ENCODE_SECTION_INFO SUBTARGET_ENCODE_SECTION_INFO
42186 #undef TARGET_ASM_OPEN_PAREN
42187 #define TARGET_ASM_OPEN_PAREN ""
42188 #undef TARGET_ASM_CLOSE_PAREN
42189 #define TARGET_ASM_CLOSE_PAREN ""
42191 #undef TARGET_ASM_BYTE_OP
42192 #define TARGET_ASM_BYTE_OP ASM_BYTE
42194 #undef TARGET_ASM_ALIGNED_HI_OP
42195 #define TARGET_ASM_ALIGNED_HI_OP ASM_SHORT
42196 #undef TARGET_ASM_ALIGNED_SI_OP
42197 #define TARGET_ASM_ALIGNED_SI_OP ASM_LONG
42199 #undef TARGET_ASM_ALIGNED_DI_OP
42200 #define TARGET_ASM_ALIGNED_DI_OP ASM_QUAD
42203 #undef TARGET_PROFILE_BEFORE_PROLOGUE
42204 #define TARGET_PROFILE_BEFORE_PROLOGUE ix86_profile_before_prologue
42206 #undef TARGET_MANGLE_DECL_ASSEMBLER_NAME
42207 #define TARGET_MANGLE_DECL_ASSEMBLER_NAME ix86_mangle_decl_assembler_name
42209 #undef TARGET_ASM_UNALIGNED_HI_OP
42210 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
42211 #undef TARGET_ASM_UNALIGNED_SI_OP
42212 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
42213 #undef TARGET_ASM_UNALIGNED_DI_OP
42214 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
42216 #undef TARGET_PRINT_OPERAND
42217 #define TARGET_PRINT_OPERAND ix86_print_operand
42218 #undef TARGET_PRINT_OPERAND_ADDRESS
42219 #define TARGET_PRINT_OPERAND_ADDRESS ix86_print_operand_address
42220 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
42221 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P ix86_print_operand_punct_valid_p
42222 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
42223 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA i386_asm_output_addr_const_extra
42225 #undef TARGET_SCHED_INIT_GLOBAL
42226 #define TARGET_SCHED_INIT_GLOBAL ix86_sched_init_global
42227 #undef TARGET_SCHED_ADJUST_COST
42228 #define TARGET_SCHED_ADJUST_COST ix86_adjust_cost
42229 #undef TARGET_SCHED_ISSUE_RATE
42230 #define TARGET_SCHED_ISSUE_RATE ix86_issue_rate
42231 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
42232 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
42233 ia32_multipass_dfa_lookahead
42235 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
42236 #define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall
42238 #undef TARGET_MEMMODEL_CHECK
42239 #define TARGET_MEMMODEL_CHECK ix86_memmodel_check
42242 #undef TARGET_HAVE_TLS
42243 #define TARGET_HAVE_TLS true
42245 #undef TARGET_CANNOT_FORCE_CONST_MEM
42246 #define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem
42247 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
42248 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
42250 #undef TARGET_DELEGITIMIZE_ADDRESS
42251 #define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address
42253 #undef TARGET_MS_BITFIELD_LAYOUT_P
42254 #define TARGET_MS_BITFIELD_LAYOUT_P ix86_ms_bitfield_layout_p
42257 #undef TARGET_BINDS_LOCAL_P
42258 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
42260 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
42261 #undef TARGET_BINDS_LOCAL_P
42262 #define TARGET_BINDS_LOCAL_P i386_pe_binds_local_p
42265 #undef TARGET_ASM_OUTPUT_MI_THUNK
42266 #define TARGET_ASM_OUTPUT_MI_THUNK x86_output_mi_thunk
42267 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
42268 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK x86_can_output_mi_thunk
42270 #undef TARGET_ASM_FILE_START
42271 #define TARGET_ASM_FILE_START x86_file_start
42273 #undef TARGET_OPTION_OVERRIDE
42274 #define TARGET_OPTION_OVERRIDE ix86_option_override
42276 #undef TARGET_REGISTER_MOVE_COST
42277 #define TARGET_REGISTER_MOVE_COST ix86_register_move_cost
42278 #undef TARGET_MEMORY_MOVE_COST
42279 #define TARGET_MEMORY_MOVE_COST ix86_memory_move_cost
42280 #undef TARGET_RTX_COSTS
42281 #define TARGET_RTX_COSTS ix86_rtx_costs
42282 #undef TARGET_ADDRESS_COST
42283 #define TARGET_ADDRESS_COST ix86_address_cost
42285 #undef TARGET_FIXED_CONDITION_CODE_REGS
42286 #define TARGET_FIXED_CONDITION_CODE_REGS ix86_fixed_condition_code_regs
42287 #undef TARGET_CC_MODES_COMPATIBLE
42288 #define TARGET_CC_MODES_COMPATIBLE ix86_cc_modes_compatible
42290 #undef TARGET_MACHINE_DEPENDENT_REORG
42291 #define TARGET_MACHINE_DEPENDENT_REORG ix86_reorg
42293 #undef TARGET_BUILTIN_SETJMP_FRAME_VALUE
42294 #define TARGET_BUILTIN_SETJMP_FRAME_VALUE ix86_builtin_setjmp_frame_value
42296 #undef TARGET_BUILD_BUILTIN_VA_LIST
42297 #define TARGET_BUILD_BUILTIN_VA_LIST ix86_build_builtin_va_list
42299 #undef TARGET_FOLD_BUILTIN
42300 #define TARGET_FOLD_BUILTIN ix86_fold_builtin
42302 #undef TARGET_COMPARE_VERSION_PRIORITY
42303 #define TARGET_COMPARE_VERSION_PRIORITY ix86_compare_version_priority
42305 #undef TARGET_GENERATE_VERSION_DISPATCHER_BODY
42306 #define TARGET_GENERATE_VERSION_DISPATCHER_BODY \
42307 ix86_generate_version_dispatcher_body
42309 #undef TARGET_GET_FUNCTION_VERSIONS_DISPATCHER
42310 #define TARGET_GET_FUNCTION_VERSIONS_DISPATCHER \
42311 ix86_get_function_versions_dispatcher
42313 #undef TARGET_ENUM_VA_LIST_P
42314 #define TARGET_ENUM_VA_LIST_P ix86_enum_va_list
42316 #undef TARGET_FN_ABI_VA_LIST
42317 #define TARGET_FN_ABI_VA_LIST ix86_fn_abi_va_list
42319 #undef TARGET_CANONICAL_VA_LIST_TYPE
42320 #define TARGET_CANONICAL_VA_LIST_TYPE ix86_canonical_va_list_type
42322 #undef TARGET_EXPAND_BUILTIN_VA_START
42323 #define TARGET_EXPAND_BUILTIN_VA_START ix86_va_start
42325 #undef TARGET_MD_ASM_CLOBBERS
42326 #define TARGET_MD_ASM_CLOBBERS ix86_md_asm_clobbers
42328 #undef TARGET_PROMOTE_PROTOTYPES
42329 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
42330 #undef TARGET_STRUCT_VALUE_RTX
42331 #define TARGET_STRUCT_VALUE_RTX ix86_struct_value_rtx
42332 #undef TARGET_SETUP_INCOMING_VARARGS
42333 #define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs
42334 #undef TARGET_MUST_PASS_IN_STACK
42335 #define TARGET_MUST_PASS_IN_STACK ix86_must_pass_in_stack
42336 #undef TARGET_FUNCTION_ARG_ADVANCE
42337 #define TARGET_FUNCTION_ARG_ADVANCE ix86_function_arg_advance
42338 #undef TARGET_FUNCTION_ARG
42339 #define TARGET_FUNCTION_ARG ix86_function_arg
42340 #undef TARGET_FUNCTION_ARG_BOUNDARY
42341 #define TARGET_FUNCTION_ARG_BOUNDARY ix86_function_arg_boundary
42342 #undef TARGET_PASS_BY_REFERENCE
42343 #define TARGET_PASS_BY_REFERENCE ix86_pass_by_reference
42344 #undef TARGET_INTERNAL_ARG_POINTER
42345 #define TARGET_INTERNAL_ARG_POINTER ix86_internal_arg_pointer
42346 #undef TARGET_UPDATE_STACK_BOUNDARY
42347 #define TARGET_UPDATE_STACK_BOUNDARY ix86_update_stack_boundary
42348 #undef TARGET_GET_DRAP_RTX
42349 #define TARGET_GET_DRAP_RTX ix86_get_drap_rtx
42350 #undef TARGET_STRICT_ARGUMENT_NAMING
42351 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
42352 #undef TARGET_STATIC_CHAIN
42353 #define TARGET_STATIC_CHAIN ix86_static_chain
42354 #undef TARGET_TRAMPOLINE_INIT
42355 #define TARGET_TRAMPOLINE_INIT ix86_trampoline_init
42356 #undef TARGET_RETURN_POPS_ARGS
42357 #define TARGET_RETURN_POPS_ARGS ix86_return_pops_args
42359 #undef TARGET_LEGITIMATE_COMBINED_INSN
42360 #define TARGET_LEGITIMATE_COMBINED_INSN ix86_legitimate_combined_insn
42362 #undef TARGET_ASAN_SHADOW_OFFSET
42363 #define TARGET_ASAN_SHADOW_OFFSET ix86_asan_shadow_offset
42365 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
42366 #define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
42368 #undef TARGET_SCALAR_MODE_SUPPORTED_P
42369 #define TARGET_SCALAR_MODE_SUPPORTED_P ix86_scalar_mode_supported_p
42371 #undef TARGET_VECTOR_MODE_SUPPORTED_P
42372 #define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
42374 #undef TARGET_C_MODE_FOR_SUFFIX
42375 #define TARGET_C_MODE_FOR_SUFFIX ix86_c_mode_for_suffix
42378 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
42379 #define TARGET_ASM_OUTPUT_DWARF_DTPREL i386_output_dwarf_dtprel
42382 #ifdef SUBTARGET_INSERT_ATTRIBUTES
42383 #undef TARGET_INSERT_ATTRIBUTES
42384 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
42387 #undef TARGET_MANGLE_TYPE
42388 #define TARGET_MANGLE_TYPE ix86_mangle_type
42391 #undef TARGET_STACK_PROTECT_FAIL
42392 #define TARGET_STACK_PROTECT_FAIL ix86_stack_protect_fail
42395 #undef TARGET_FUNCTION_VALUE
42396 #define TARGET_FUNCTION_VALUE ix86_function_value
42398 #undef TARGET_FUNCTION_VALUE_REGNO_P
42399 #define TARGET_FUNCTION_VALUE_REGNO_P ix86_function_value_regno_p
42401 #undef TARGET_PROMOTE_FUNCTION_MODE
42402 #define TARGET_PROMOTE_FUNCTION_MODE ix86_promote_function_mode
42404 #undef TARGET_MEMBER_TYPE_FORCES_BLK
42405 #define TARGET_MEMBER_TYPE_FORCES_BLK ix86_member_type_forces_blk
42407 #undef TARGET_INSTANTIATE_DECLS
42408 #define TARGET_INSTANTIATE_DECLS ix86_instantiate_decls
42410 #undef TARGET_SECONDARY_RELOAD
42411 #define TARGET_SECONDARY_RELOAD ix86_secondary_reload
42413 #undef TARGET_CLASS_MAX_NREGS
42414 #define TARGET_CLASS_MAX_NREGS ix86_class_max_nregs
42416 #undef TARGET_PREFERRED_RELOAD_CLASS
42417 #define TARGET_PREFERRED_RELOAD_CLASS ix86_preferred_reload_class
42418 #undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
42419 #define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS ix86_preferred_output_reload_class
42420 #undef TARGET_CLASS_LIKELY_SPILLED_P
42421 #define TARGET_CLASS_LIKELY_SPILLED_P ix86_class_likely_spilled_p
42423 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
42424 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
42425 ix86_builtin_vectorization_cost
42426 #undef TARGET_VECTORIZE_VEC_PERM_CONST_OK
42427 #define TARGET_VECTORIZE_VEC_PERM_CONST_OK \
42428 ix86_vectorize_vec_perm_const_ok
42429 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
42430 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
42431 ix86_preferred_simd_mode
42432 #undef TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES
42433 #define TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES \
42434 ix86_autovectorize_vector_sizes
42435 #undef TARGET_VECTORIZE_INIT_COST
42436 #define TARGET_VECTORIZE_INIT_COST ix86_init_cost
42437 #undef TARGET_VECTORIZE_ADD_STMT_COST
42438 #define TARGET_VECTORIZE_ADD_STMT_COST ix86_add_stmt_cost
42439 #undef TARGET_VECTORIZE_FINISH_COST
42440 #define TARGET_VECTORIZE_FINISH_COST ix86_finish_cost
42441 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
42442 #define TARGET_VECTORIZE_DESTROY_COST_DATA ix86_destroy_cost_data
42444 #undef TARGET_SET_CURRENT_FUNCTION
42445 #define TARGET_SET_CURRENT_FUNCTION ix86_set_current_function
42447 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
42448 #define TARGET_OPTION_VALID_ATTRIBUTE_P ix86_valid_target_attribute_p
42450 #undef TARGET_OPTION_SAVE
42451 #define TARGET_OPTION_SAVE ix86_function_specific_save
42453 #undef TARGET_OPTION_RESTORE
42454 #define TARGET_OPTION_RESTORE ix86_function_specific_restore
42456 #undef TARGET_OPTION_PRINT
42457 #define TARGET_OPTION_PRINT ix86_function_specific_print
42459 #undef TARGET_OPTION_FUNCTION_VERSIONS
42460 #define TARGET_OPTION_FUNCTION_VERSIONS ix86_function_versions
42462 #undef TARGET_CAN_INLINE_P
42463 #define TARGET_CAN_INLINE_P ix86_can_inline_p
42465 #undef TARGET_EXPAND_TO_RTL_HOOK
42466 #define TARGET_EXPAND_TO_RTL_HOOK ix86_maybe_switch_abi
42468 #undef TARGET_LEGITIMATE_ADDRESS_P
42469 #define TARGET_LEGITIMATE_ADDRESS_P ix86_legitimate_address_p
42471 #undef TARGET_LRA_P
42472 #define TARGET_LRA_P hook_bool_void_true
42474 #undef TARGET_REGISTER_PRIORITY
42475 #define TARGET_REGISTER_PRIORITY ix86_register_priority
42477 #undef TARGET_LEGITIMATE_CONSTANT_P
42478 #define TARGET_LEGITIMATE_CONSTANT_P ix86_legitimate_constant_p
42480 #undef TARGET_FRAME_POINTER_REQUIRED
42481 #define TARGET_FRAME_POINTER_REQUIRED ix86_frame_pointer_required
42483 #undef TARGET_CAN_ELIMINATE
42484 #define TARGET_CAN_ELIMINATE ix86_can_eliminate
42486 #undef TARGET_EXTRA_LIVE_ON_ENTRY
42487 #define TARGET_EXTRA_LIVE_ON_ENTRY ix86_live_on_entry
42489 #undef TARGET_ASM_CODE_END
42490 #define TARGET_ASM_CODE_END ix86_code_end
42492 #undef TARGET_CONDITIONAL_REGISTER_USAGE
42493 #define TARGET_CONDITIONAL_REGISTER_USAGE ix86_conditional_register_usage
42496 #undef TARGET_INIT_LIBFUNCS
42497 #define TARGET_INIT_LIBFUNCS darwin_rename_builtins
42500 #undef TARGET_SPILL_CLASS
42501 #define TARGET_SPILL_CLASS ix86_spill_class
42503 struct gcc_target targetm
= TARGET_INITIALIZER
;
42505 #include "gt-i386.h"